id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1784225 | <filename>asyncgTTS/_decos.py<gh_stars>1-10
from __future__ import annotations
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, cast
from .errors import NoInitialisedSession
if TYPE_CHECKING:
from typing import TypeVar
from typing_extensions import ParamSpec
_R = TypeVar("_R")
_P = ParamSpec("_P")
def require_session(func: Callable[_P, _R]) -> Callable[_P, _R]:
@wraps(func)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R:
self = cast(Any, args[0])
if self.session:
return func(*args, **kwargs)
raise NoInitialisedSession("Session is not initialized, use async context manager or pass aiohttp.ClientSession on init")
return wrapper
| StarcoderdataPython |
1616096 | # coding: utf-8
from django.contrib import admin
from .models import TarotJoueur, TarotPartie, TarotParticipant, TarotJeu
from django.utils.safestring import mark_safe
from . import forms
class TarotJoueurAdmin(admin.ModelAdmin):
list_display = ('owner', 'pseudo', 'email')
list_filter = ('owner',)
# search_fields = ('pseudo', 'email')
pass
class TarotPartieAdmin(admin.ModelAdmin):
list_display = ('owner', 'name', 'jeu', 'modified')
list_filter = ('owner',)
# fields = ('name',)
# search_fields = ('name',)
# list_editable = ['jeu']
pass
class TarotParticipantAdmin(admin.ModelAdmin):
list_display = ('partie', 'joueur', 'score', 'order', 'donneur')
# search_fields = ('partie', 'joueur')
# ordering = ('partie', '-score')
list_filter = ('partie',)
# readonly_fields = ('score',)
pass
admin.site.register(TarotJoueur, TarotJoueurAdmin)
admin.site.register(TarotPartie, TarotPartieAdmin)
# admin.site.register(TarotJeu, TarotJeuAdmin)
admin.site.register(TarotParticipant, TarotParticipantAdmin)
| StarcoderdataPython |
3218275 | <filename>reviewboard/webapi/tests/test_server_info.py
from __future__ import unicode_literals
from django.utils import six
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import server_info_mimetype
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import get_server_info_url
@six.add_metaclass(BasicTestsMetaclass)
class ResourceTests(BaseWebAPITestCase):
"""Testing the ServerInfoResource APIs."""
fixtures = ['test_users']
sample_api_url = 'info/'
resource = resources.server_info
def setup_http_not_allowed_list_test(self, user):
return get_server_info_url()
def setup_http_not_allowed_item_test(self, user):
return get_server_info_url()
def compare_item(self, item_rsp, obj):
self.assertIn('product', item_rsp)
self.assertIn('site', item_rsp)
self.assertIn('capabilities', item_rsp)
caps = item_rsp['capabilities']
self.assertIn('diffs', caps)
diffs_caps = caps['diffs']
self.assertTrue(diffs_caps['moved_files'])
self.assertTrue(diffs_caps['base_commit_ids'])
diff_validation_caps = diffs_caps['validation']
self.assertTrue(diff_validation_caps['base_commit_ids'])
review_request_caps = caps['review_requests']
self.assertTrue(review_request_caps['commit_ids'])
text_caps = caps['text']
self.assertTrue(text_caps['markdown'])
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
return (get_server_info_url(local_site_name),
server_info_mimetype,
None)
| StarcoderdataPython |
4835174 | # number = None
# while (not number) or not (number > 0):
# try_number = input("Please enter a number > 0: ")
# try:
# number = float(try_number)
# print("Got it!")
# except ValueError as err:
# print("Error: ", err)
# try:
# file_handle = open("my_file")
# except IOError as err:
# print("Could not open file! Error: ", err)
# else:
# content = file_handle.read()
# result = analyse(content)
# finally:
# file_handle.close()
# key_list = ["key", "my_key", "bike_key", "transponder"]
# key_to_lock = {
# "my_key": "Finally I can go home again!",
# "bike_key": "This unlocks my Bike!",
# "transponder": "Back to work it is."
# }
# try:
# idx = int(input(f"A number bewteen 0 and {len(key_list)-1} please: "))
# key = key_list[idx]
# print(key_to_lock[key])
# except (IndexError, KeyError, ValueError) as err:
# print("Well this didn't work:", err)
# key_list = ["key", "my_key", "bike_key", "transponder"]
# key_to_lock = {
# "my_key": "Finally I can go home again!",
# "bike_key": "This unlocks my Bike!",
# "transponder": "Back to work it is."
# }
# try:
# idx = int(input(f"A number bewteen 0 and {len(key_list)-1} please: "))
# key = key_list[idx]
# print(key_to_lock[key])
# except IndexError as err:
# print("No, no. This index doesn't work.")
# except KeyError as err:
# print("Seems like that key has no lock. How strange.")
# except ValueError as err:
# print("That's not a number...")
# key_list = ["key", "my_key", "bike_key", "transponder"]
# key_to_lock = {
# "my_key": "Finally I can go home again!",
# "bike_key": "This unlocks my Bike!",
# "transponder": "Back to work it is."
# }
# try:
# idx = int(input(f"A number bewteen 0 and {len(key_list)-1} please: "))
# key = key_list[idx]
# print(key_to_lock[key])
# except (IndexError, ValueError) as err:
# print("That was not a valid index:", err)
# except KeyError:
# print("Oh no! That key has no lock!")
# def sub(a, b):
# return a + b
# assert sub(5, 4) == 1, '5 - 4 != 1'
# assert sub(7, 3) == 4, '7 - 3 != 4'
# def diff(a, b):
# """Returns the absolute difference of a and b"""
# sub = a - b
# return sub if sub >= 0 else -sub
# ##########
# help(diff)
# import turtle
# help(turtle.up)
# def get_number(message):
# number = None
# while number is None:
# try:
# value = input(message)
# number = float(value)
# except ValueError:
# print("That was no number.")
# return number
# def get_idx():
# number = get_number("A positive integer please: ")
# if number < 0 or not (int(number) == number):
# raise ValueError(f"{number} is no positive integer")
# return number
# def add(a, b):
# """Returns the sum of a and b
# Args:
# a : the left operand
# b : the right operand
# Returns:
# The sum of of a and b
# """
# return a + b
# def difficult_function(argument, other_arg=None):
# """Concise description.
# Longer description (if concise is not enough)
# which might need multiple lines.
# Or even some paragraphs.
# Args:
# argument: A description of this argument.
# other_arg: Another description.
# Returns:
# A short summary of what is returned, especially its format.
# Raises:
# ValueError: When does this occur?
# """
# pass
<<<<<<< HEAD
=======
import Untitled
>>>>>>> f02e5a0dc271881fab8d9e89529da082c88d8fae
| StarcoderdataPython |
1744336 | <gh_stars>0
# -*- coding: utf-8 -*-
import urllib
import urllib2
import json
import os
import collections
import xml.etree.ElementTree as ET
url = u'http://thetvdb.com/api'
api_key = u'<KEY>'
# GetSeries.php?seriesname=<seriesname>
# GetSeriesByRemoteID.php?imdbid=<imdbid>
def search(term):
data = dict(seriesname=term)
req = urllib.urlencode(data)
fullurl = url + u'/GetSeries.php?' + req
xml = urllib2.urlopen(fullurl).read()
return ET.fromstring(xml)
def remote_id(imdbid):
data = dict(imdbid=imdbid)
req = urllib.urlencode(data)
fullurl = url + u'/GetSeriesByRemoteID.php?' + req
# print fullurl
xml = urllib2.urlopen(fullurl).read()
# print xml
return ET.fromstring(xml)
| StarcoderdataPython |
3355003 | # coding=utf-8
"""Annotations parsing feature tests."""
from typing import Dict
import pytest
from pytest_bdd import given, scenario, then, when
from yummy_cereal import AnnotationsParser
from ..models.menus.course import Course
from ..models.menus.dish import Dish
from ..models.menus.menu import Menu
@pytest.fixture()
def bdd_context() -> Dict:
return {}
@scenario(
"annotations_parsing.feature", "Parsing a menu from a yaml file"
)
def test_parsing_a_menu_from_a_yaml_file():
"""Parsing a menu from a yaml file."""
@given("I have a serialized menu")
def i_have_a_serialized_menu():
"""I have a serialized menu."""
@given("I have annotated menu classes")
def i_have_annotated_menu_classes():
"""I have annotated menu classes."""
@when("I create a menu parser")
def i_create_a_menu_parser(bdd_context: Dict):
"""I create a menu parser."""
dish_parser = AnnotationsParser(Dish)
course_parser = AnnotationsParser(
Course, specified_parsers={Dish: dish_parser}
)
bdd_context["menu_parser"] = AnnotationsParser(
Menu,
specified_parsers={Course: course_parser, Dish: dish_parser},
)
@when("I parse the serialized menu")
def i_parse_the_serialized_menu(
bdd_context: Dict, serialized_menu: Menu
):
"""I parse the serialized menu."""
menu_parser = bdd_context["menu_parser"]
bdd_context["parsed_menu"] = menu_parser(serialized_menu)
@then("I recieve a menu object")
def i_recieve_a_menu_object(bdd_context: Dict, parsed_menu: Menu):
"""I recieve a menu object."""
assert bdd_context["parsed_menu"] == parsed_menu
| StarcoderdataPython |
3391672 | <gh_stars>0
from __future__ import unicode_literals
import django
from django.conf import settings
from django.core.management import call_command
def main():
# Dynamically configure the Django settings with the minimum necessary to
# get Django running tests
settings.configure(
MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
],
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'oembed',
),
TEST_RUNNER='django.test.runner.DiscoverRunner',
COVERAGE_EXCLUDES_FOLDERS=['migrations'],
# Django replaces this, but it still wants it. *shrugs*
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': '',
}
},
)
django.setup()
# Fire off the tests
call_command('migrate')
call_command('test')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1724503 | <reponame>choonho/identity
import logging
from google.protobuf.json_format import MessageToDict
from spaceone.core import pygrpc
from spaceone.core.connector import BaseConnector
from spaceone.core.utils import parse_endpoint
from spaceone.identity.error.error_authentication import *
_LOGGER = logging.getLogger(__name__)
class AuthPluginConnector(BaseConnector):
def __init__(self, transaction, config):
super().__init__(transaction, config)
self.client = None
def initialize(self, endpoint):
_LOGGER.info(f'[initialize] endpoint: {endpoint}')
endpoint = endpoint.replace('"', '')
e = parse_endpoint(endpoint)
protocol = e['scheme']
if protocol == 'grpc':
self.client = pygrpc.client(endpoint="%s:%s" % (e['hostname'], e['port']), version='plugin')
elif protocol == 'http':
# TODO:
pass
if self.client is None:
raise ERROR_GRPC_CONFIGURATION
def call_login(self, endpoint, credentials):
self.initialize(endpoint)
# TODO: secret_data
params = {
'secret_data': {},
'user_credentials': credentials,
'options': {}
}
try:
meta = self.transaction.get_meta('transaction_id')
user_info = self.client.Auth.login(
params
# metadata=meta
)
except ERROR_BASE as e:
_LOGGER.error(f'[call_login] Auth.login failed. (reason={e.message})')
raise ERROR_INVALID_CREDENTIALS()
except Exception as e:
_LOGGER.error(f'[call_login] Auth.login failed. (reason={str(e)})')
raise ERROR_INVALID_CREDENTIALS()
return user_info
def init(self, options):
params = {
'options': options
}
try:
plugin_info = self.client.Auth.init(params)
return MessageToDict(plugin_info)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
def verify(self, options, secret_data):
params = {
'options': options,
'secret_data': secret_data
}
try:
# TODO: meta (plugin has no meta)
auth_verify_info = self.client.Auth.verify(params)
return MessageToDict(auth_verify_info)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
def call_find(self, keyword, user_id, domain):
params = {
'options': domain.plugin_info.options,
'secret_data': {},
'keyword': keyword,
'user_id': user_id
}
_LOGGER.info(f'[call_find] params: {params}')
try:
user_info = self.client.Auth.find(
params
)
_LOGGER.debug(f'[call_find] MessageToDict(user_info): '
f'{MessageToDict(user_info, preserving_proto_field_name=True)}')
return MessageToDict(user_info, preserving_proto_field_name=True)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
| StarcoderdataPython |
1788482 | <gh_stars>0
import math
import numpy as nx
from pylab import linspace
import PQmath
from numpy.random import normal
if __name__=='__main__':
if 0:
P = nx.array( [[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1, 2.1, 3.01, 4, 5, 6]] )
P.transpose()
else:
t = linspace(0, 10*math.pi, 400 )
P = nx.sin(t)[:,nx.newaxis]
P = P + normal( 0, .1, P.shape )
Pstar = PQmath.smooth_position( P, 0.01, 0.5, 1e-9, 1e12)
from pylab import *
plot( t, P, 'b-' )
plot( t, Pstar, 'r-' )
show()
| StarcoderdataPython |
3339435 | <gh_stars>100-1000
from unittest_reinvent.running_modes.lib_invent_tests.logger_tests import *
from unittest_reinvent.running_modes.lib_invent_tests.reinforcement_learning_tests import *
from unittest_reinvent.running_modes.lib_invent_tests.scoring_strategy_tests import *
from unittest_reinvent.running_modes.lib_invent_tests.learning_strategy_tests import *
| StarcoderdataPython |
1643635 | <filename>AdventOfCode2020/solutions/day01/puzzle1.py
#!/usr/bin/env python3
from itertools import combinations
tab = []
with open("input", 'r', encoding="utf8") as input:
for number in input:
number = number[:-1]
tab.append(int(number))
# part 1
comb_list = (list(combinations(tab, r=2)))
for i in comb_list:
a, b = i
if a + b == 2020:
print("part1 :", a * b)
# part 2
comb_list = (list(combinations(tab, r=3)))
for i in comb_list:
a, b, c = i
if a + b + c == 2020:
print("part2: ", a * b * c)
| StarcoderdataPython |
1603088 | <filename>pe0001.py
def pe0001(upto):
total = 0
for i in range(upto):
if i % 3 == 0 or i % 5 == 0:
total += i
return total
print(pe0001(1000)) | StarcoderdataPython |
1605057 | import abc
import nakama.client
import nakama.config
import nakama.types
class BaseApi(abc.ABC):
def __init__(
self, http_client: nakama.client.HttpClient, nakama_config: nakama.config.NakamaConfig
):
self.http_client = http_client
self.nakama_config = nakama_config
class AccountApi(BaseApi):
def get(self, user_id: str = None) -> nakama.types.ApiResponse:
url = f"account/{user_id}" if user_id else "account"
return self.http_client.get(
self.nakama_config.build_url(f"console/{url}"), self.nakama_config.auth_header
)
class ConfigApi(BaseApi):
def get(self) -> nakama.types.ApiResponse:
return self.http_client.get(
self.nakama_config.build_url("console/config"), self.nakama_config.auth_header
)
class EndpointsApi(BaseApi):
def get(self) -> nakama.types.ApiResponse:
return self.http_client.get(
self.nakama_config.build_url("console/api/endpoints"), self.nakama_config.auth_header
)
class RuntimeApi(BaseApi):
def get(self) -> nakama.types.ApiResponse:
return self.http_client.get(
self.nakama_config.build_url("console/runtime"), self.nakama_config.auth_header
)
class StatusApi(BaseApi):
def get(self) -> nakama.types.ApiResponse:
return self.http_client.get(
self.nakama_config.build_url("console/status"), self.nakama_config.auth_header
)
class UserApi(BaseApi):
def create(
self, username: str, password: str, email: str, role: str
) -> nakama.types.ApiResponse:
payload = dict(username=username, password=password, email=email, role=role)
return self.http_client.post(
self.nakama_config.build_url("console/user"),
self.nakama_config.auth_header,
payload,
)
def get(
self, username: str = None, email: str = None, role: str = None
) -> nakama.types.ApiResponse:
return self.http_client.get(
self.nakama_config.build_url("console/user"),
self.nakama_config.auth_header,
)
def remove(self, username: str) -> nakama.types.ApiResponse:
return self.http_client.delete(
self.nakama_config.build_url(f"console/user?username={username}"),
self.nakama_config.auth_header,
)
class NakamaConsoleApi:
def __init__(self, **kwargs) -> None:
self.nakama_config = nakama.config.NakamaConfig(**kwargs)
self.http_client = nakama.client.HttpClient()
@property
def accounts(self):
return AccountApi(self.http_client, self.nakama_config)
@property
def config(self):
return ConfigApi(self.http_client, self.nakama_config)
@property
def endpoints(self):
return EndpointsApi(self.http_client, self.nakama_config)
@property
def runtime(self):
return RuntimeApi(self.http_client, self.nakama_config)
@property
def status(self):
return StatusApi(self.http_client, self.nakama_config)
@property
def users(self):
return UserApi(self.http_client, self.nakama_config)
def authenticate(self, username: str, password: str) -> str:
data = {"username": username, "password": password}
response = self.http_client.post(
self.nakama_config.build_url("console/authenticate"),
data=data,
)
self.nakama_config.token = response.payload["token"]
return response
| StarcoderdataPython |
44681 | <gh_stars>0
from django.contrib import admin
# Register your models here.
from goodsManage.models import *
class GoodInventoryInline(admin.TabularInline):
model = GoodInventory
extra = 1
@admin.register(GoodKind)
class GoodKindAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodKind._meta.fields if f.name != 'id']
@admin.register(Good)
class GoodAdmin(admin.ModelAdmin):
list_display = [f.name for f in Good._meta.fields if f.name != 'id']
list_filter = ('kind',)
search_fields = ('partNumber', 'partNumber_once', 'partNumber_old', 'type' )
inlines = (GoodInventoryInline,)
@admin.register(Department)
class DepartmentAdmin(admin.ModelAdmin):
list_display = [f.name for f in Department._meta.fields if f.name != 'id']
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
list_display = [f.name for f in Person._meta.fields if f.name != 'id']
list_filter = ('department',)
search_fields = ('name',)
@admin.register(GoodInventory)
class GoodInventoryAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodInventory._meta.fields if f.name != 'id']
list_filter = ('department', 'good__kind',)
search_fields = ('good__type',)
@admin.register(GoodRequisition)
class GoodRequisitionAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodRequisition._meta.fields if f.name != 'id']
list_filter = ('datetime', 'person__department', 'good__kind',)
search_fields = ('good__type',)
@admin.register(GoodBack)
class GoodBackAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodBack._meta.fields if f.name != 'id']
list_filter = ('datetime', 'person__department', 'good__kind',)
#search_fields = ('person',)
@admin.register(GoodBuy)
class GoodBuyAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodBuy._meta.fields if f.name != 'id']
list_filter = ('date', 'person__department', 'good__kind',)
#search_fields = ('pr','po')
@admin.register(GoodAllocate)
class GoodAllocateAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodAllocate._meta.fields if f.name != 'id']
list_filter = ('datetime', 'person__department', 'toDepartment', 'good__kind',)
#search_fields = ('person',)
@admin.register(WastageStatus)
class WastageStatusAdmin(admin.ModelAdmin):
list_display = [f.name for f in WastageStatus._meta.fields if f.name != 'id']
@admin.register(GoodWastage)
class GoodWastageAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodWastage._meta.fields if f.name != 'id']
list_filter = ('datetime', 'person__department', 'good__kind',)
search_fields = ('good__type',)
@admin.register(GoodRepair)
class GoodRepairAdmin(admin.ModelAdmin):
list_display = [f.name for f in GoodRepair._meta.fields if f.name != 'id']
list_filter = ('date', 'person__department') | StarcoderdataPython |
3392982 | """A module contains as set API for the puzzle grids."""
import string
import random
from abc import ABC, abstractmethod
from types import TracebackType
from typing import Any, List, Optional, Sequence, Type, Union
from loguru import logger as _logger
from puzzle.properties import Coordinate, GridSize, LetterCoordinates
class Content(ABC):
"""The class represents an abstract content."""
__slots__: Sequence[str] = ()
@abstractmethod
def to_coordinates(self) -> LetterCoordinates:
"""Return the abstract coordinates of a content.
Returns:
dict: a collection of abstract letters coordinates.
"""
pass
@abstractmethod
def __str__(self) -> str:
"""Return an abstract content as a string.
Returns:
str: an abstract string content.
"""
pass
class Grid(ABC):
"""The class represents an abstract interface of a grid.
Any implementation of this interface allows to close connection:
- using context manager (**with** statement)
- automatically when an object will be deleted by garbage collector
- manually with **__del__**
"""
__slots__: Sequence[str] = ()
@property
@abstractmethod
def content(self) -> Content:
"""Create a new abstract grid content.
Returns:
Content: an abstract grid content.
"""
pass
@property
@abstractmethod
def height(self) -> int:
"""Specify an abstract grid height.
Returns:
int: an abstract grid height e.g `10`.
"""
pass
@property
@abstractmethod
def width(self) -> int:
"""Specify an abstract grid width.
Returns:
int: an abstract grid width e.g `10`.
"""
pass
@abstractmethod
def build(self) -> None:
"""Build an abstract grid."""
pass
@abstractmethod
def refresh(self) -> None:
"""Clear an abstract grid."""
pass
@abstractmethod
def __enter__(self) -> 'Grid':
"""Return runtime connection itself."""
pass
@abstractmethod
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Close connection itself.
Raise any exception triggered within the runtime context.
"""
pass
@classmethod
def __subclasshook__(cls, other: Any) -> Union[bool, NotImplementedError]:
"""Customize ``issubclass`` builtin function on the ABC level.
Args:
other: (any) other class type
Returns:
bool: True, False or NotImplemented of subclassing procedure.
"""
if cls is Grid:
method_order: Sequence[Any] = other.__mro__
for method in '__enter__', '__exit__':
for other_type in method_order:
if method in other_type.__dict__:
if other_type.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
return NotImplemented
class GridContent(Content):
"""The class represents a grid content."""
__slots__: Sequence[str] = ('_rows',)
def __init__(self, rows: List[str]) -> None:
self._rows = rows
def to_coordinates(self) -> LetterCoordinates:
"""Return the abstract coordinates of a content.
Every letter in a grid is able to contain multiple coordinates.
Example:
>>> content = GridContent(['a', 'b'])
>>> content.to_coordinates()
{'a': [Coordinate(0, 0)], 'b': [Coordinate(1, 0)]}
Returns:
dict: a collection of coordinates for letters.
"""
board: LetterCoordinates = {}
for row_index, row_value in enumerate(
str(self).split()
): # type: int, str
for column_index, column_value in enumerate(
row_value
): # type: int, str
if column_value not in board:
board[column_value] = []
board[column_value].append(Coordinate(row_index, column_index))
return board
def __str__(self) -> str:
"""Return grid content.
Returns:
str: an grid content as string.
Raises:
ValueError: if grid rows are empty.
"""
if len(self._rows) == 0:
raise ValueError('Cannot build a grid as it contains empty rows')
content: str = '\n'.join(self._rows)
_logger.info(f'The following grid of letters is generated\n{content}')
return content
class RandomWordsGrid(Grid):
"""The class represents randomly created grid of letters.
Any implementation of this interface allows to close connection:
- using context manager (**with** statement)
- automatically when an object will be deleted by garbage collector
- manually with **__del__**
Example:
>>> with RandomWordsGrid(GridSize(10, 10)) as grid:
>>> content = grid.content
...
"""
__slots__: Sequence[str] = ('_size', '_rows')
def __init__(self, grid_size: GridSize) -> None:
self._size = grid_size
self._rows: List[str] = []
@property
def content(self) -> Content:
"""Create a new grid content.
Returns:
Content: a grid content.
"""
return GridContent(self._rows)
@property
def height(self) -> int:
"""Specify a grid height.
Returns:
int: a grid height e.g `10`.
"""
return self._size.height
@property
def width(self) -> int:
"""Specify a grid width.
Returns:
int: an abstract grid width e.g `10`.
"""
return self._size.width
def build(self) -> None:
"""Create a grid of randomly created letters (a-z only).
Raises:
ValueError: if the size of a grid is invalid.
"""
_logger.info(f'{self._size} is used')
if (self.height < 0 or self.width < 0) or (
not self.height or not self.width
):
raise ValueError(
'Cannot generate a grid of letters due to '
f'invalid "{self.height}x{self.width}" grid size. '
'It should not contain negative or zero values!'
)
_logger.info('Generating a grid of random letters ...')
rows_counter: int = 0
while rows_counter < self.height:
next_row: str = ''.join(
random.choices(
population=string.ascii_lowercase,
k=self.width,
)
)
self._rows.append(next_row)
rows_counter += 1
def refresh(self) -> None:
"""Clear a grid of letters."""
self._rows = []
def __enter__(self) -> Grid:
"""Build grid rows of randomly created words.
Returns:
Grid: a grid connection.
"""
self.build()
return self
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Close a grid connection.
Raise any exception triggered within the runtime context.
"""
self.refresh()
| StarcoderdataPython |
3303861 | import os
import unittest
from datetime import datetime as dt
from satstac.sentinel.cli import parse_args
testpath = os.path.dirname(__file__)
class Test(unittest.TestCase):
def test_parse_no_args(self):
with self.assertRaises(SystemExit):
parse_args([''])
with self.assertRaises(SystemExit):
parse_args(['-h'])
def test_parse_args(self):
args = parse_args('ingest catalog.json'.split(' '))
assert(args['catalog'] == 'catalog.json') | StarcoderdataPython |
98786 | <reponame>tabulon-ext/moban
import csv
from lml.plugin import PluginInfo
from moban import constants
@PluginInfo(constants.DATA_LOADER_EXTENSION, tags=["custom"])
def open_custom(file_name):
with open(file_name, "r") as data_csv:
csvreader = csv.reader(data_csv)
rows = []
for row in csvreader:
rows.append(row)
data = dict(zip(rows[0], rows[1]))
return data
| StarcoderdataPython |
1622335 | <gh_stars>1-10
"""
app.routes.report
=================
"""
import json
from flask import Blueprint, Response, current_app, make_response, request
from app.extensions import csrf_protect
blueprint = Blueprint("report", __name__, url_prefix="/report")
@blueprint.route("/csp_violations", methods=["POST"])
@csrf_protect.exempt
def csp_report() -> Response:
"""Post Content Security Report to ``report-uri``.
Log CSP violations JSON payload.
:return: Response object with HTTP Status 204 (No Content) status.
"""
current_app.logger.info(
json.dumps(request.get_json(force=True), indent=4, sort_keys=True)
)
response = make_response()
response.status_code = 204
return response
| StarcoderdataPython |
1702137 | <reponame>dasyak/winagent
import asyncio
import json
import subprocess
from time import perf_counter
import requests
from agent import WindowsAgent
class TaskRunner(WindowsAgent):
def __init__(self, task_pk):
super().__init__()
self.task_pk = task_pk
self.task_url = f"{self.astor.server}/api/v1/{self.task_pk}/taskrunner/"
def run(self):
# called manually and not from within a check
ret = self.get_task()
if not ret:
return False
asyncio.run(self.run_task(ret))
async def run_while_in_event_loop(self):
# called from inside a check
ret = self.get_task()
if not ret:
return False
await asyncio.gather(self.run_task(ret))
def get_task(self):
try:
resp = requests.get(self.task_url, headers=self.headers, timeout=15)
except:
return False
else:
return resp.json()
async def run_task(self, data):
try:
script = data["script"]
timeout = data["timeout"]
except:
return False
try:
if script["shell"] == "python":
cmd = [
self.salt_call,
"win_agent.run_python_script",
script["filename"],
f"timeout={timeout}",
]
else:
cmd = [
self.salt_call,
"cmd.script",
script["filepath"],
f"shell={script['shell']}",
f"timeout={timeout}",
]
start = perf_counter()
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
proc_timeout = int(timeout) + 2
try:
proc_stdout, proc_stderr = await asyncio.wait_for(
proc.communicate(), proc_timeout
)
except asyncio.TimeoutError:
try:
proc.terminate()
except:
pass
self.logger.error(f"Task timed out after {timeout} seconds")
proc_stdout, proc_stderr = False, False
stdout = ""
stderr = f"Task timed out after {timeout} seconds"
retcode = 98
stop = perf_counter()
if proc_stdout:
resp = json.loads(proc_stdout.decode("utf-8", errors="ignore"))
retcode = resp["local"]["retcode"]
stdout = resp["local"]["stdout"]
stderr = resp["local"]["stderr"]
elif proc_stderr:
retcode = 99
stdout = ""
stderr = proc_stderr.decode("utf-8", errors="ignore")
payload = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
"execution_time": "{:.4f}".format(round(stop - start)),
}
resp = requests.patch(
self.task_url, json.dumps(payload), headers=self.headers, timeout=15,
)
except:
pass
return "ok"
| StarcoderdataPython |
1784610 | <reponame>alex-dow/sourcelyzer<filename>sourcelyzer/rest/utils/auth.py
from base64 import b64encode, b64decode
import hashlib
import os
class InvalidAuthToken(Exception):
pass
def gen_auth_token(username, password, userid, session_id, encoding='utf-8'):
salt = os.urandom(128)
if isinstance(session_id, str):
session_id = session_id.encode(encoding)
elif isinstance(session_id, int):
session_id = bytes([session_id])
else:
session_id = str(session_id).encode(encoding)
token = hashlib.sha256()
token.update(username.encode(encoding))
token.update(password.encode(encoding))
token.update(bytes([userid]))
token.update(session_id)
token.update(salt)
b64salt = b64encode(salt).decode('utf-8')
b64token = b64encode(token.digest()).decode('utf-8')
return b64salt + '/' + b64token
def verify_auth_token(token, username, password, userid, session_id, encoding='utf-8'):
salt = token[0:172]
expected_token = hashlib.sha256()
expected_token.update(username.encode(encoding))
expected_token.update(password.encode(encoding))
expected_token.update(bytes([userid]))
expected_token.update(bytes([session_id]))
expected_token.update(b64decode(salt))
b64token = b64encode(expected_token.digest())
new_token = salt + '/' + b64token
if token != new_token:
raise InvalidAuthToken()
| StarcoderdataPython |
3299310 | from sklearn.feature_extraction.text import CountVectorizer
class FeatureExtractor():
def buildVectorizer(self, data, kwargs):
"""
Constructs a CountVectorizer based on the given data.
Args:
data: Data to train the CountVectorizer
"""
# Instantiate CountVectorizer
self.vectorizer = CountVectorizer(**kwargs)#TODO Solve Error related to new arguments causing an array too big to fit normally in memory
# Train CountVectorizer
self.vectorizer.fit(data)
def process(self, feature, data):
"""
Processes a set of data in a way determined by the feature.
Args:
feature: Method to extract features from the data
data: the data to extract features from
Returns:
data: The feature extracted data
"""
# Transform the data
data = self.vectorizer.transform(data)
return data | StarcoderdataPython |
3280191 | """
Very basic Q-learning model to play frozen lake from the gym library.
No hidden layers, simple q-table update method and exponential explore/exploit
rate of decay.
"""
import numpy as np
import gym
import random
import time
from IPython.display import clear_output
env = gym.make("FrozenLake-v0")
action_space = env.action_space.n
state_space = env.observation_space.n
q_table = np.zeros((state_space, action_space))
episodes = 10000
max_steps = 100
learning_rate = 0.1
discount_rate = 0.99 # gama
# exploration vs exploitation: epsilon greedy strategy
exploration_rate = 1
max_exploration_rate = 1
min_exploration_rate = 0.01
exploration_decay_rate = 0.01
episode_rewards = []
# Qlearning algorithm
# everything for a single episode
for episode in range(episodes):
state = env.reset()
done = False
current_reward = 0
for step in range(max_steps):
# everything for a single time step
# explore or exploit this step?
exploration_threshold = random.uniform(0,1)
if exploration_threshold > exploration_rate:
# exploit
action = np.argmax(q_table[state,:])
else:
# explore
action = env.action_space.sample()
new_state, reward, done, info = env.step(action)
# print(reward)
# Update Qtable
q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[new_state, :]))
state = new_state
current_reward += reward
if done == True:
break
# exponential exploration rate decay
exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate * episode)
episode_rewards.append(current_reward)
print(episode_rewards)
# calculate and print the average reward per thousand episodes
rewards_per_thousand_eps = np.split(np.array(episode_rewards), episodes/1000)
count = 1000
# print("====== Avg. reward per thousand episodes ======\n")
# for r in rewards_per_thousand_eps:
# print(f"{count}: {sum(r/1000)}")
# count += 1000
#
# print(f"====== Updated Qtable ======\n{q_table}")
# visualising 3 episodes to see how the agent behaves.
for episode in range(3):
state = env.reset()
done = False
print(f"====== Episode {episode+1} ======\n\n")
time.sleep(2)
for step in range(max_steps):
clear_output(wait=True)
env.render()
time.sleep(0.3)
action = np.argmax(q_table[state,:])
new_state, reward, done, info = env.step(action)
if done:
clear_output(wait=True)
env.render()
if reward == 1:
print("====== Agent reached the goal! ======\n")
time.sleep(3)
clear_output(wait=True)
break
state = new_state
env.close()
"""
Final notes:
To further optimize, adding a reward decay for each step might help
the agent move more directly to the goal. This would have to be tweaked to find
the optimal decay value.
"""
| StarcoderdataPython |
169339 | <filename>test/integration/test_command.py
import os.path
import re
from six import assertRegex
from . import *
class TestCommand(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '07_commands'), *args, **kwargs
)
def test_hello(self):
assertRegex(self, self.build('hello'),
re.compile(r'^\s*hello$', re.MULTILINE))
def test_world(self):
assertRegex(self, self.build('world'),
re.compile(r'^\s*world$', re.MULTILINE))
def test_script(self):
assertRegex(self, self.build('script'),
re.compile(r'^\s*hello, world!$', re.MULTILINE))
self.assertExists(output_file('file'))
def test_alias(self):
output = self.build('hello-world')
assertRegex(self, output, re.compile(r'^\s*hello$', re.MULTILINE))
assertRegex(self, output, re.compile(r'^\s*world$', re.MULTILINE))
@skip_if_backend('msbuild')
class TestRunExecutable(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, 'run_executable', *args, **kwargs)
def test_env_run(self):
self.assertExists(output_file('file.txt'))
def test_cxx(self):
assertRegex(self, self.build('cxx'),
re.compile(r'^\s*hello from c\+\+!$', re.MULTILINE))
def test_java(self):
assertRegex(self, self.build('java'),
re.compile(r'^\s*hello from java!$', re.MULTILINE))
def test_java_classlist(self):
assertRegex(self, self.build('java-classlist'),
re.compile(r'^\s*hello from java!$', re.MULTILINE))
def test_python(self):
assertRegex(self, self.build('python'),
re.compile(r'^\s*hello from python!$', re.MULTILINE))
| StarcoderdataPython |
3308689 | from setuptools import setup
with open ( "README.md" , "r" ) as fh :
long_description = fh . read ()
setup(
name="organizador",
version="0.1.1",
description="Organiza archivos en carpetas teniendo como referencia las similitudes en sus nombres.",
long_description = long_description ,
long_description_content_type = "text/markdown" ,
author="<NAME>",
url="https://github.com/Armando-J/organizador",
packages=['organizador',],
) | StarcoderdataPython |
3307059 | <gh_stars>0
import enum
class StrEnumMeta(enum.EnumMeta):
auto = enum.auto
def from_str(self, member: str):
try:
return self[member]
except KeyError:
# TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as
# soon as it is migrated.
raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None
class StrEnum(enum.Enum, metaclass=StrEnumMeta):
pass
| StarcoderdataPython |
1604223 | # functionaltests/tests.py
# -*- coding: utf-8 -*-
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class HomePageTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(1)
def tearDown(self):
self.browser.quit()
# Project
def test_home_page(self):
self.browser.get(self.live_server_url)
page_text = self.browser.page_source
self.assertIn('Projekte', page_text)
def test_project_list(self):
self.browser.get('%s%s' % (self.live_server_url, '/project/'))
page_text = self.browser.page_source
self.assertIn('Projekte', page_text)
def test_new_project_test_page(self):
self.browser.get('%s%s' % (self.live_server_url, '/project/new/'))
page_text = self.browser.page_source
self.assertIn('Neues Projekt', page_text)
# Member
def test_member_list(self):
self.browser.get('%s%s' % (self.live_server_url, '/member/'))
page_text = self.browser.page_source
self.assertIn('Mitglieder', page_text)
def test_new_member_test_page(self):
self.browser.get('%s%s' % (self.live_server_url, '/member/new/'))
page_text = self.browser.page_source
self.assertIn('Neues Mitglied', page_text)
def test_add_member(self):
self.assertIn('a', 'a')
| StarcoderdataPython |
150305 | <reponame>tgragnato/geneva
import logging
import pytest
import actions.tree
import actions.drop
import actions.tamper
import actions.duplicate
import actions.sleep
import actions.utils
import actions.strategy
import evaluator
import evolve
import layers.layer
from scapy.all import IP, TCP, Raw
def test_mate(logger):
"""
Tests string representation.
"""
strat1 = actions.utils.parse("\/", logger)
strat2 = actions.utils.parse("\/", logger)
assert not actions.strategy.mate(strat1, strat2, 1)
strat1 = actions.utils.parse("[TCP:flags:R]-duplicate-| \/", logger)
strat2 = actions.utils.parse("[TCP:flags:S]-drop-| \/", logger)
# Mate with 100% probability
actions.strategy.mate(strat1, strat2, 1)
assert str(strat1).strip() == "[TCP:flags:R]-drop-| \/"
assert str(strat2).strip() == "[TCP:flags:S]-duplicate-| \/"
strat1 = actions.utils.parse("[TCP:flags:R]-duplicate(drop,drop)-| \/", logger)
strat2 = actions.utils.parse("[TCP:flags:S]-drop-| \/", logger)
assert str(strat1).strip() == "[TCP:flags:R]-duplicate(drop,drop)-| \/"
assert str(strat2).strip() == "[TCP:flags:S]-drop-| \/"
# Mate with 100% probability
actions.strategy.mate(strat1, strat2, 1)
assert str(strat1).strip() in ["[TCP:flags:R]-duplicate(drop,drop)-| \/",
"[TCP:flags:R]-drop-| \/"]
assert str(strat2).strip() in ["[TCP:flags:S]-duplicate(drop,drop)-| \/",
"[TCP:flags:S]-drop-| \/"]
# Cannot have a strategy with a space in it - malformed
with pytest.raises(AssertionError):
actions.utils.parse("[TCP:flags:R]-duplicate(drop, drop)-| \/", logger)
def test_init(logger):
"""
Tests various strategy initialization.
"""
# 1 inbound tree with 1 action, zero outbound trees
strat = actions.strategy.Strategy([], []).initialize(logger, 1, 0, 1, 0, None)
s = "[TCP:flags:R]-drop-| \/"
# initialize with a seed
assert str(actions.strategy.Strategy([], []).initialize(logger, 1, 1, 1, 1, s)).strip() == s
def test_run(logger):
"""
Tests strategy execution.
"""
strat1 = actions.utils.parse("[TCP:flags:R]-duplicate-| \/", logger)
strat2 = actions.utils.parse("[TCP:flags:S]-drop-| \/", logger)
strat3 = actions.utils.parse("[TCP:flags:A]-duplicate(tamper{TCP:dataofs:replace:0},)-| \/", logger)
strat4 = actions.utils.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:15239},),duplicate(tamper{TCP:flags:replace:S}(tamper{TCP:chksum:replace:14539}(tamper{TCP:seq:corrupt},),),))-| \/", logger)
p1 = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="S"))
packets = strat1.act_on_packet(p1, logger, direction="out")
assert packets, "Strategy dropped SYN packets"
assert len(packets) == 1
assert packets[0]["TCP"].flags == "S"
p1 = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="S"))
packets = strat2.act_on_packet(p1, logger, direction="out")
assert not packets, "Strategy failed to drop SYN packets"
p1 = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="A", dataofs=5))
packets = strat3.act_on_packet(p1, logger, direction="out")
assert packets, "Strategy dropped packets"
assert len(packets) == 2, "Incorrect number of packets emerged from forest"
assert packets[0]["TCP"].dataofs == 0, "Packet tamper failed"
assert packets[1]["TCP"].dataofs == 5, "Duplicate packet was tampered"
p1 = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="A", dataofs=5, chksum=100))
packets = strat4.act_on_packet(p1, logger, direction="out")
assert packets, "Strategy dropped packets"
assert len(packets) == 3, "Incorrect number of packets emerged from forest"
assert packets[0]["TCP"].flags == "R", "Packet tamper failed"
assert packets[0]["TCP"].chksum != p1["TCP"].chksum, "Packet tamper failed"
assert packets[1]["TCP"].flags == "S", "Packet tamper failed"
assert packets[1]["TCP"].chksum != p1["TCP"].chksum, "Packet tamper failed"
assert packets[1]["TCP"].seq != p1["TCP"].seq, "Packet tamper failed"
assert packets[2]["TCP"].flags == "A", "Duplicate failed"
strat4 = actions.utils.parse("[TCP:load:]-tamper{TCP:load:replace:mhe76jm0bd}(fragment{ip:-1:True}(tamper{IP:load:corrupt},drop),)-| \/ ", logger)
p1 = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="S"))
packets = strat4.act_on_packet(p1, logger)
# Will fail with scapy 2.4.2 if packet is reparsed
strat5 = actions.utils.parse("[TCP:options-eol:]-tamper{TCP:load:replace:o}(tamper{TCP:dataofs:replace:11},)-| \/", logger)
p1 = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="S"))
packets = strat5.act_on_packet(p1, logger)
def test_mutate():
"""
Mutates some stratiges
"""
logger = logging.getLogger("test")
logger.setLevel(logging.ERROR)
strat1 = actions.utils.parse("\/", logger)
strat1.environment_id = 1000
strat1.mutate(logger)
assert len(strat1.out_actions) == 1
assert len(strat1.in_actions) == 1
assert strat1.out_actions[0].environment_id == 1000
strat1.out_actions[0].mutate()
assert strat1.out_actions[0].environment_id == 1000
def test_pretty_print(logger):
"""
Tests if the string representation of this strategy is correct
"""
strat = actions.utils.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:corrupt},),)-| \/ ", logger)
correct = "TCP:flags:A\nduplicate\n├── tamper{TCP:flags:replace:R}\n│ └── tamper{TCP:chksum:corrupt}\n│ └── ===> \n└── ===> \n \n \/ \n "
assert strat.pretty_print() == correct
def test_collection(logger):
"""
Tests collection phase.
"""
# Create an evaluator
cmd = [
"--test-type", "echo",
"--censor", "censor2",
"--log", actions.utils.CONSOLE_LOG_LEVEL,
"--no-skip-empty",
"--bad-word", "facebook",
"--output-directory", actions.utils.RUN_DIRECTORY
]
tester = evaluator.Evaluator(cmd, logger)
canary = evolve.generate_strategy(logger, 0, 0, 0, 0, None)
environment_id = tester.canary_phase(canary)
packets = actions.utils.read_packets(environment_id)
assert packets
test_pop = []
for _ in range(0, 5):
test_pop.append(evolve.generate_strategy(logger, 0, 0, 0, 0, None))
environment_id = evolve.run_collection_phase(logger, tester)
packets = actions.utils.read_packets(environment_id)
assert packets
assert len(packets) > 1
def test_sleep_parse_handling(logger):
"""
Tests that the sleep action handles bad parsing.
"""
print("Testing incorrect parsing:")
assert not actions.sleep.SleepAction().parse("THISHSOULDFAIL", logger)
assert actions.sleep.SleepAction().parse("10.5", logger)
def test_get_from_fuzzed_or_real(logger):
"""
Tests utils.get_from_fuzzed_or_real_packet(environment_id, real_packet_probability):
"""
# Create an evaluator
cmd = [
"--test-type", "echo",
"--censor", "censor2",
"--log", actions.utils.CONSOLE_LOG_LEVEL,
"--no-skip-empty",
"--bad-word", "facebook",
"--output-directory", actions.utils.RUN_DIRECTORY
]
tester = evaluator.Evaluator(cmd, logger)
canary = evolve.generate_strategy(logger, 0, 0, 0, 0, None)
environment_id = tester.canary_phase(canary)
for i in range(0, 100):
proto, field, value = actions.utils.get_from_fuzzed_or_real_packet(environment_id, 1)
assert proto
assert field
assert value is not None
proto, field, value = actions.utils.get_from_fuzzed_or_real_packet(environment_id, 0)
assert proto
assert field
assert value is not None
def test_fail_cases(logger):
"""
Odd strategies that have caused failures in nightly testing.
"""
s = "[IP:proto:6]-tamper{IP:proto:replace:125}(fragment{tcp:48:True:26}(tamper{TCP:options-md5header:replace:37f0e737da65224ea03d46c713ed6fd2},),)-| \/ "
s = actions.utils.parse(s, logger)
p = layers.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags="S")/Raw("aaaaaaaaaa"))
s.act_on_packet(p, logger)
| StarcoderdataPython |
1763270 | <reponame>WillDaSilva/daily-questions
from datetime import date, timedelta
from dateutil.parser import parse
def working_days(year, weekend=(5, 6), holidays=tuple()):
year_start, year_end = date(year, 1, 1), date(year, 12, 31)
year_range = range((year_end - year_start).days + 1)
year_dates = (year_start + timedelta(x) for x in year_range)
yield from (x for x in year_dates if not (
x.weekday() in weekend or x in [parse(h).date() for h in holidays]))
| StarcoderdataPython |
1759157 | #! python
# coding:utf-8
import svgwrite
import maya.cmds as cmds
import nnutil as nu
def draw_edge(filepath, imagesize=4096, stroke_width=1, integer_mode=False, normalize=True):
"""
選択エッジを svg 形式で指定したパスに書き出す
integer_mode: True で UV 座標をピクセル変換後に端数を切り捨てる
水平垂直ラインをそのままテクスチャとして使用したい場合等に使う
normalize: True なら UV 座標を [0, 1) に納める
"""
print(filepath)
dwg = svgwrite.Drawing(filepath, size=(imagesize, imagesize) )
selections = cmds.ls(selection=True, flatten=True)
for edge in selections:
vf_list = nu.to_vtxface(edge)
vf_pairs = []
if len(vf_list) > 2 :
vf_pairs = [vf_list[0:0+2] , vf_list[2:2+2]]
else:
vf_pairs = [vf_list[0:0+2]]
# ライン描画
for vf_pair in vf_pairs:
if vf_pair[0]:
uv_comp1, uv_comp2 = nu.to_uv(vf_pair)
uv_coord1 = nu.get_uv_coord(uv_comp1)
uv_coord2 = nu.get_uv_coord(uv_comp2)
if normalize:
uv_coord1 = [x % 1.0 for x in uv_coord1]
uv_coord2 = [x % 1.0 for x in uv_coord2]
uv_px_coord1 = nu.mul(uv_coord1, imagesize)
uv_px_coord2 = nu.mul(uv_coord2, imagesize)
# 上下反転
uv_px_coord1[1] = imagesize - uv_px_coord1[1]
uv_px_coord2[1] = imagesize - uv_px_coord2[1]
# 整数座標モード
if integer_mode:
uv_px_coord1 = [int(x) for x in uv_px_coord1]
uv_px_coord2 = [int(x) for x in uv_px_coord2]
dwg.add(dwg.line(uv_px_coord1, uv_px_coord2, stroke=svgwrite.rgb(0, 0, 0, '%'), stroke_width=stroke_width))
# ファイルとして保存
dwg.save() | StarcoderdataPython |
3220612 | <reponame>Yunicorn228/web-tools
import logging
from server.auth import user_mediacloud_client, user_admin_mediacloud_client
from flask import request
logger = logging.getLogger(__name__)
MAX_SOURCES = 60
def media_search_with_page(search_str, tags_id=None, **kwargs):
link_id = request.args.get('linkId', 0)
user_mc = user_admin_mediacloud_client()
media_page = user_mc.mediaList(name_like=search_str, tags_id=tags_id, last_media_id=link_id, rows=100,
sort="num_stories", **kwargs)
if len(media_page) == 0:
last_media_id = -1
else:
last_media_id = media_page[len(media_page)-1]['media_id']
return media_page, last_media_id
def media_search(search_str, tags_id=None, **kwargs):
user_mc = user_mediacloud_client()
return user_mc.mediaList(name_like=search_str, tags_id=tags_id, rows=MAX_SOURCES, sort="num_stories", **kwargs)
def collection_search_with_page(search_str, public_only, tag_sets_id_list):
link_id = request.args.get('linkId') if 'linkId' in request.args else 0
user_mc = user_mediacloud_client()
collection_page = user_mc.tagList(tag_sets_id_list, public_only=public_only, name_like=search_str, rows=100,
last_tags_id=link_id,)
if len(collection_page) == 0:
last_tags_id = -1
else:
last_tags_id = collection_page[len(collection_page)-1]['tags_id']
return collection_page, last_tags_id
def collection_search(search_str, public_only, tag_sets_id_list):
user_mc = user_mediacloud_client()
return user_mc.tagList(tag_sets_id_list, public_only=public_only, name_like=search_str)
| StarcoderdataPython |
150717 | <reponame>zeemzoet/nuke
import nukescripts
axis = 1
nuke.thisNode()['code'].execute()
_input = checkInput()
if _input['cam'] and _input['geo']:
### checks how many vertices are selected
i = 0
for vertex in nukescripts.snap3d.selectedPoints():
i += 1
if i:
gen = nukescripts.snap3d.selectedPoints()
points = [point for point in gen]
avgLen = int(len(points)/2)
x = 0; y = 0; z = 0
for i in range(avgLen):
x += points[i][0]
y += points[i][1]
z += points[i][2]
x /= avgLen; y /= avgLen; z /= avgLen
nuke.toNode('xPt%d' %axis)['translate'].setValue([x,y,z])
else:
nuke.message('Select some vertices first')
elif not _input['geo']:
nuke.message('Geometry is not connected or recognized')
else:
nuke.message('Camera is not connected or recognized')
| StarcoderdataPython |
4811615 | <reponame>sudeep0901/python<filename>FaceDetection/readimagewithcv2.py
import numpy as np
import cv2
img = cv2.imread('image.jpg')
# print(img)
while True:
cv2.imshow('mandrill', img)
if cv2.waitKey(1) & 0xFF == 27: # getting escape key
break
cv2.imwrite("final_image.png", img)
cv2.destroyAllWindows()
| StarcoderdataPython |
3227195 | # -*- coding: utf-8 -*-
"""Header here."""
import numpy as np
def borehole_model(x, theta):
"""Given x and theta, return matrix of [row x] times [row theta] of values."""
return f
def borehole_true(x):
"""Given x, return matrix of [row x] times 1 of values."""
return y
| StarcoderdataPython |
107016 | from . import tools
import os
from datetime import datetime
import logging
import matplotlib.cm as mplcm
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import Layout
import ipywidgets as widgets
from IPython.display import display
import cv2
DEFAULT_EXTENSIONS = ['jpg', 'png', 'tif', 'iff', 'peg', 'ppm']
class OutputWidgetHandler(logging.Handler):
""" Custom logging handler sending logs to an output widget """
def __init__(self, *args, **kwargs):
super(OutputWidgetHandler, self).__init__(*args, **kwargs)
layout = {
'width': '100%',
'height': '160px',
'border': '1px solid black'
}
self.out = widgets.Output(layout=layout)
def emit(self, record):
""" Overload of logging.Handler method """
formatted_record = self.format(record)
new_output = {
'name': 'stdout',
'output_type': 'stream',
'text': formatted_record + '\n'
}
self.out.outputs = (new_output,) + self.out.outputs
def show_logs(self):
""" Show the logs """
display(self.out)
def clear_logs(self):
""" Clear the current logs """
self.out.clear_output()
def create_logger():
logger = logging.getLogger(__name__)
handler = OutputWidgetHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return handler, logger
# Global variables set in tools module:
# tools.set_binary_thresholds()
# global original_shape
# global target_binary
# global target_overlay
# global target_grey
# tools.adjust_contour_filters()
# global filtered_contours
def set_binary_thresholds(target_fn, cropx=None, cropy=None, thresholds=(100, 255), invert=False, gamma=1.0,
brightness=0, contrast=0, clahe=False, clahe_window=50,
figwidth=32, figheight=16, displayplot=True):
# set global variables to be available to the widgets:
global original_shape
global target_binary
global target_overlay
global target_grey
# print(target_fn, thresholds, figwidth, figheight)
# get initial images:
if cropx and cropy:
x0, x1 = cropx
y0, y1 = cropy
crop = (y0, y1, x0, x1)
else:
crop = None
target_original, target_overlay, target_grey = tools.get_base_images(target_fn, crop=crop)
# invert
if invert:
target_grey = cv2.bitwise_not(target_grey)
# apply contrast limited adaptive histogram equalization (CLAHE)
if clahe:
clahe_model = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(clahe_window, clahe_window))
target_grey = clahe_model.apply(target_grey)
# apply brightness/contrast
target_bc = tools.apply_contrast(target_grey, contrast, brightness)
# apply gamma transformation
target_gamma = tools.apply_gamma(target_bc, gamma)
# convert to binary image
target_binary = tools.get_binary(target_gamma, thresh=thresholds[0], maxval=thresholds[1])
# display output
if displayplot:
tools.display_three_plots(target_original, target_bc, target_binary, figsize=(figwidth, figheight,))
original_shape = target_original.shape
# return target_binary, target_overlay
def adjust_contour_filters(figwidth=32, figheight=16, target_fn=None,
area=(20, 50000), contour_ratio=0.67, minwidth=20, ):
global filtered_contours
target_savedir = tools.get_savedir(target_fn)
minarea = area[0]
maxarea = area[1]
# calculate contours of images
(contours, hierarchy) = cv2.findContours(target_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# annotate contours
filtered_ids, filtered_contours = tools.collect_contours(contours, hierarchy, minarea=minarea, maxarea=maxarea,
skip_first=False,
contour_ratio=contour_ratio, minwidth=minwidth
)
# draw contours
target_contours = tools.draw_contours(target_overlay, filtered_contours, target_savedir=target_savedir,
color=(255, 0, 0), figwidth=figwidth / 2, figheight=figheight,
)
def widget_find_discontinuities(ksize=(3, 13), edge_thresholds=(15, 100), min_length=30, target_fn=None):
min_edge_threshold, max_edge_threshold = edge_thresholds
target_savedir = tools.get_savedir(target_fn)
tools.find_discontinuities(target_grey, ksize=(3, 13), min_edge_threshold=15, max_edge_threshold=100, min_length=30,
target_savedir=target_savedir)
def widget_map_color(cmap, ):
"Displays greyscale image and an LUT-converted image"
if target_grey.shape[0] / target_grey.shape[1] < 1:
tools.display_two_plots_v(target_grey, tools.apply_cmap(target_grey, cmap=cmap), figsize=(16,32))
else:
tools.display_two_plots(target_grey, tools.apply_cmap(target_grey, cmap=cmap), figsize=(32, 16))
def widget_contour_similarity(target_fn=None, figsize=(30, 60), nrows=0, ncols=0, equalize=True,
cmap=mplcm.gist_ncar):
target_savedir = tools.get_savedir(target_fn)
df_matchDist, Z, band_images, sorted_idx = tools.get_similar_bands(filtered_contours,
target_savedir,
target_grey,
)
idx_filtered = tools.plot_colored_bands(sorted_idx, band_images, target_savedir, figsize=figsize, nrows=nrows,
ncols=ncols,
equalize=equalize, cmap=cmap
)
def widget_similarity_listener(b):
widget_contour_similarity(wfilepath.value)
def widget_plot_dendrogram():
return None
def widget_equalize(rows, columns, saveas, savetype, show_images):
if show_images:
splitsave = saveas
else:
splitsave = None
splits = tools.split_image(target_grey, rows, columns, splitsave, savetype, show_images)
equalized_cols = [np.vstack([cv2.equalizeHist(img) for img in col]) for col in splits if len(col) > 0]
res = np.hstack(equalized_cols) # stacking images side-by-side
plt.close()
fig, ax = plt.subplots(figsize=(20, 10))
plt.imshow(res)
plt.tight_layout()
# plt.savefig(f"{saveas}_equalized.{savetype}")
cv2.imwrite(f"{saveas}_equalized.{savetype}", res)
def widget_noise_calculator(filepath, gaussian_k, median_k, bilateral_k, bilateral_r, figwidth, figheight):
img = cv2.imread(filepath)
tools.calculate_noise(target_grey, gaussian_k, median_k, bilateral_k, bilateral_r, show=True,
figsize=(figwidth, figheight))
def load_binary_widgets(DIRECTORY, ext_list=DEFAULT_EXTENSIONS):
"""
Loads the widgets necessary for image cropping and exposure adjustment.
Parameters
----------
DIRECTORY: str
The location containing the image(s) to crop
ext_list: list
List of file extensions (as strings) to display
Returns
-------
wdirectory, wfilepath, wcropx, wcropy, winvert, wclahe, wbrange, wgamma, wbright, wcontrast, wfigwidth, wfigheight
widget objects
"""
global wfilepath # globalize to make available to observe & update functions
# define styling of widgets:
items_layout = Layout(width='auto')
# define all widgets for binary thresholding and output figsize
wdirectory = widgets.Text(value=DIRECTORY, description="Directory of images:")
wfilepath = widgets.Dropdown(
options=[os.path.join(DIRECTORY, f) for f in os.listdir(DIRECTORY) if
f[-3:].upper() in [ext.upper() for ext in ext_list]],
description='File:', layout=items_layout)
def update_image_options(change):
wfilepath.options = [os.path.join(change.new, f) for f in os.listdir(change.new) if
f[-3:].lower() in ['jpg', 'png', 'tif', 'iff', 'peg', 'ppm']]
wdirectory.observe(update_image_options, 'value')
wcropx = widgets.IntRangeSlider(value=[0, 1000], min=0, max=1000, step=10, description='Crop X axis:',
continuous_update=False, layout=items_layout)
wcropy = widgets.IntRangeSlider(value=[0, 1000], min=0, max=1000, step=10, description='Crop Y axis:',
continuous_update=False, layout=items_layout)
winvert = widgets.Checkbox(value=False, description="Invert image", layout=items_layout)
wclahe = widgets.Checkbox(value=False, description="CLAH equalization:", layout=items_layout)
wclahewin = widgets.IntSlider(value=50, min=1, max=200, step=1, description='CLAHE window:', layout=items_layout)
wbrange = widgets.IntRangeSlider(value=[100, 255], min=0, max=255, step=1, description='Thresholds:',
layout=items_layout)
wgamma = widgets.FloatSlider(value=0.8, min=0, max=2.0, step=0.05, description="Gamma:", layout=items_layout)
wbright = widgets.IntSlider(value=0.0, min=-100, max=100, step=1, description="Brightness:", layout=items_layout)
wcontrast = widgets.FloatSlider(value=0.8, min=0, max=3.0, step=0.05, description="Contrast:", layout=items_layout)
wfigwidth = widgets.IntSlider(value=32, min=1, max=32, step=1, description='Fig width:', layout=items_layout)
wfigheight = widgets.IntSlider(value=16, min=1, max=48, step=1, description='Fig height:', layout=items_layout)
return wdirectory, wfilepath, wcropx, wcropy, winvert, wclahe, wclahewin, wbrange, wgamma, wbright, wcontrast, wfigwidth, wfigheight
def load_evaluation_widget(DIRECTORY, ext_list=DEFAULT_EXTENSIONS):
"""
Load the main widget for analyzing images from the specified directory
Parameters
----------
DIRECTORY : str
directory path of all images to concatenate
ext_list : list
list of all file extensions to include
Returns
-------
widget_tab
widget object
"""
# define styling of widgets:
items_layout = Layout(width='auto')
# define all widgets for binary thresholding and output figsize
wdirectory, wfilepath, wcropx, wcropy, winvert, wclahe, wclahewin, wbrange, wgamma, wbright, wcontrast, wfigwidth, wfigheight = load_binary_widgets(
DIRECTORY, ext_list)
# set widgets for contour extraction
warange = widgets.IntRangeSlider(value=[20, 10000], min=10, max=10000, step=10, description='Area:',
continuous_update=False, layout=items_layout)
wratio = widgets.FloatSlider(value=0.67, min=0.1, max=2.0, step=0.02, description='ht/wdth ratio:',
continuous_update=False, layout=items_layout)
wminwidth = widgets.IntSlider(value=30, min=1, max=250, step=1, description='Min width:', continuous_update=False,
layout=items_layout)
# ### set widgets for edge discontinuity detection
wksize = widgets.IntRangeSlider(value=[3, 13], min=1, max=21, step=2, description='k size:',
continuous_update=False,
layout=items_layout)
wedgethresholds = widgets.IntRangeSlider(value=[15, 100], min=1, max=100, step=1, description='Edge thresholds:',
continuous_update=False, layout=items_layout)
wminedgelen = widgets.IntSlider(value=30, min=1, max=250, step=1, description='Min edge length:',
continuous_update=False, layout=items_layout)
### set widgets for color mapping
cmap_list = ['Spectral', 'coolwarm', 'gist_rainbow', 'viridis', 'jet', 'inferno', 'hsv', 'nipy_spectral',
'gist_ncar',
'gist_stern', 'RdYlGn', ]
wcmaps = widgets.Dropdown(options=[(x, getattr(mplcm, x)) for x in cmap_list], description='CMAP:',
layout=items_layout)
wsavecmap = widgets.Button(description="FUTURE: Save Me")
### set widgets for band similarity detection
wcalcsimilar = widgets.Button(description="Show similarities")
wdummy = widgets.IntSlider(value=30, min=1, max=250, step=1, description='Dummy slider:', continuous_update=False,
layout=items_layout)
wsavebands = widgets.Button(description="FUTURE: Save Me")
wbandfigsize = widgets.IntRangeSlider(value=[30, 30], min=5, max=120, step=1, description='Figsize (w,h):',
continuous_update=False, layout=items_layout)
wbandnrows = widgets.IntSlider(value=0, min=0, max=40, step=1, description='Num. rows:', continuous_update=False,
layout=items_layout)
wbandncols = widgets.IntSlider(value=0, min=0, max=40, step=1, description='Num. cols:', continuous_update=False,
layout=items_layout)
wequalize = widgets.Checkbox(value=True, description="Equalize bands", layout=items_layout)
wcalcsimilar.on_click(widget_contour_similarity)
### set widgets for noise detection
wgaussian = widgets.IntSlider(value=5, min=1, max=15, step=2, description='Gaussian kernal size:',
continuous_update=False,
layout=items_layout)
wmedian = widgets.IntSlider(value=5, min=1, max=15, step=2, description='Median kernal size:',
continuous_update=False,
layout=items_layout)
wbilateralk = widgets.IntSlider(value=9, min=1, max=15, step=2, description='Bilateral kernal size:',
continuous_update=False,
layout=items_layout)
wbilateralr = widgets.IntSlider(value=25, min=1, max=95, step=2, description='Bilateral radiius:',
continuous_update=False,
layout=items_layout)
wnfigwidth = widgets.BoundedIntText(value=20, min=1, max=100, step=1, description="Figure width:",
layout=items_layout)
wnfigheight = widgets.BoundedIntText(value=30, min=1, max=100, step=1, description="Figure height:",
layout=items_layout)
# set reporting of widget values
widgetlist = [wdirectory, wfilepath, wcropx, wcropy, winvert, wclahe, wclahewin, wbrange, wgamma, wbright, wcontrast,
wfigwidth, wfigheight, warange, wratio, wminwidth, wksize, wedgethresholds, wminedgelen, wcmaps,
wsavecmap,
wbandfigsize, wbandnrows, wbandncols, wequalize, wgaussian, wmedian, wbilateralk, wbilateralr,
wnfigwidth, wnfigheight,
]
widgetnames = ["wdirectory", "wfilepath", "wcropx", "wcropy", "winvert", "wclahe", "wclahewin", "wbrange", "wgamma",
"wbright", "wcontrast",
"wfigwidth", "wfigheight", "warange", "wratio", "wminwidth", "wksize", "wedgethresholds",
"wminedgelen", "wcmaps",
"wsavecmap",
"wbandfigsize", "wbandnrows", "wbandncols", "wequalize", "wgaussian", "wmedian", "wbilateralk",
"wbilateralr",
"wnfigwidth", "wnfigheight",
]
def get_widget_value_string():
valuelog = {"TIME": datetime.now()}
for i, w in enumerate(widgetlist):
try:
valuelog[widgetnames[i]] = w.value
except AttributeError:
pass
logstring = "\n".join([f"{w:<15s}: {v}" for w, v in valuelog.items()])
return logstring
def get_log_file():
savedir = os.path.join(wdirectory.value, 'log_files')
if os.path.exists(savedir):
pass
else:
os.mkdir(savedir)
analysis_file = os.path.basename(wfilepath.value)
logfile = os.path.join(savedir, f"{analysis_file}.log")
return logfile
wviewvalues = widgets.Button(description="Show widget values")
wsavelog = widgets.Button(description=f"Save to {get_log_file()}", layout={'width': 'auto'})
outlog = widgets.Output(layout={'border': '1px solid black'})
@outlog.capture(clear_output=True)
def report_widget_values(click):
logstring = get_widget_value_string()
print(logstring)
def save_value_log(click):
logfile = get_log_file()
logstring = get_widget_value_string()
with open(logfile, 'a') as handle:
handle.write(logstring)
def update_save_button(change):
wsavelog.description = f"Save to {get_log_file()}"
wviewvalues.on_click(report_widget_values)
wsavelog.on_click(save_value_log)
wfilepath.observe(update_save_button, 'value')
##########################
# customize binary display
outbin = widgets.interactive_output(set_binary_thresholds, {'target_fn': wfilepath,
'cropx': wcropx,
'cropy': wcropy,
'thresholds': wbrange,
'invert': winvert,
'gamma': wgamma,
'brightness': wbright,
'contrast': wcontrast,
'clahe': wclahe,
'clahe_window':wclahewin,
'figwidth': wfigwidth,
'figheight': wfigheight
})
# customize contour extraction display
outcont = widgets.interactive_output(adjust_contour_filters, {
'figwidth': wfigwidth,
'figheight': wfigheight,
'target_fn': wfilepath,
'area': warange, 'contour_ratio': wratio, 'minwidth': wminwidth,
})
# customize discontinuity finder display
outedge = widgets.interactive_output(widget_find_discontinuities, {'ksize': wksize,
'edge_thresholds': wedgethresholds,
'min_length': wminedgelen,
'target_fn': wfilepath,
})
# LUT color mapping display
outcmap = widgets.interactive_output(widget_map_color, {'cmap': wcmaps})
# customize noise display
outnoise = widgets.interactive_output(widget_noise_calculator,
{'filepath': wfilepath,
'gaussian_k': wgaussian,
'median_k': wmedian,
'bilateral_k': wbilateralk,
'bilateral_r': wbilateralr,
'figwidth': wnfigwidth,
'figheight': wnfigheight,
}, )
# customize band similarity display
outsimilar = widgets.interactive_output(widget_contour_similarity,
{'target_fn': wfilepath, 'figsize': wbandfigsize, 'nrows': wbandnrows,
'ncols': wbandncols,
'equalize': wequalize, 'cmap': wcmaps,
}, )
# update crop sliders with dimensions of original image
def update_xylim(change):
wcropx.max = original_shape[1]
wcropy.max = original_shape[0]
outbin.observe(update_xylim, )
# create tab views
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
# border='dashed',
width='50%',
margin='10px',
padding='10px',
)
binarytab = widgets.VBox([widgets.VBox([wdirectory, wfilepath, wcropx, wcropy,
widgets.HBox([winvert, wclahe], ),
wclahewin, wbrange, wgamma, wbright, wcontrast, wfigwidth,
wfigheight],
layout=box_layout), outbin],
layout=Layout(border='solid', margin='3'))
contourtab = widgets.VBox([widgets.VBox([warange, wratio, wminwidth],
layout=box_layout), outcont],
layout=Layout(border='solid'))
edgetab = widgets.VBox([widgets.VBox([wksize, wedgethresholds, wminedgelen],
layout=box_layout), outedge],
layout=Layout(border='solid'))
noisetab = widgets.VBox([widgets.VBox([wgaussian, wmedian, wbilateralk, wbilateralr, wnfigwidth, wnfigheight],
layout=box_layout), outnoise, ],
layout=Layout(border='solid'))
cmaptab = widgets.VBox([widgets.VBox([wcmaps, wsavecmap],
layout=box_layout), outcmap, ],
layout=Layout(border='solid'))
bandstab = widgets.VBox([widgets.VBox([wcalcsimilar, wbandfigsize, wbandnrows, wbandncols, wcmaps, wequalize],
layout=box_layout), outsimilar, ],
layout=Layout(border='solid'))
reporttab = widgets.VBox([widgets.VBox([wviewvalues, wsavelog, ],
layout=box_layout), outlog, ],
layout=Layout(border='solid'))
# add layouts to tabs for condensed viewing and handling:
tab = widgets.Tab()
tab.children = [binarytab, contourtab, edgetab, noisetab, cmaptab, bandstab, reporttab, ]
tab.set_title(0, "Create Mask")
tab.set_title(1, "Create Contours")
tab.set_title(2, "Find Discontinuities")
tab.set_title(3, "View Noise")
tab.set_title(4, "View False Color")
tab.set_title(5, "View similarities")
tab.set_title(6, "View widget values")
return tab
def crop_and_equalize(DIRECTORY, ext_list=DEFAULT_EXTENSIONS):
# This interactive widget is for dividing the image up into columns and performing histogram equalization on each one.
# define styling of widgets:
items_layout = Layout(width='auto')
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
# border='dashed',
width='75%',
margin='10px',
padding='10px',
)
wdirectory, wfilepath, wcropx, wcropy, winvert, wclahe, wbrange, wgamma, wbright, wcontrast, wfigwidth, wfigheight = load_binary_widgets(
DIRECTORY, ext_list)
def update_xylim(change):
wcropx.max = original_shape[1]
wcropy.max = original_shape[0]
# customize display
outbin = widgets.interactive_output(set_binary_thresholds, {'target_fn': wfilepath,
'cropx': wcropx,
'cropy': wcropy,
'thresholds': wbrange,
'invert': winvert,
'gamma': wgamma,
'brightness': wbright,
'contrast': wcontrast,
'clahe': wclahe,
'figwidth': wfigwidth,
'figheight': wfigheight
})
outbin.observe(update_xylim, )
# define all widgets for image splitting and output split images
# wdirectory = widgets.Text(value=DIRECTORY, description="Directory of images:")
# wfilepath = widgets.Dropdown(options=[os.path.join(DIRECTORY, f) for f in os.listdir(DIRECTORY) if f[-3:] in ['jpg', 'png', 'peg', 'ppm']],description='File:', layout=items_layout)
# wdirectory.observe(update_image_options, 'value')
wrowfloat = widgets.FloatSlider(value=2, min=1, max=15.0, step=0.05, description="# rows:",
layout=Layout(width='80%'),
continuous_update=False)
wcolfloat = widgets.FloatSlider(value=2, min=1, max=15.0, step=0.05, description="# columns:",
layout=Layout(width='80%'), continuous_update=False)
wrowtext = widgets.FloatText(value=2, description='# rows:', disabled=False, layout=items_layout)
wcoltext = widgets.FloatText(value=2, description='# columns:', disabled=False, layout=items_layout)
wsavesplits = widgets.Text(value=f"{DIRECTORY}split_image", description="Save new images as:",
continuous_update=False)
wfiletype = widgets.Dropdown(options=['jpg', 'png', 'svg', 'tif'], description='File type:', layout=items_layout)
wshowsplit = widgets.Checkbox(value=False, description="Show splits:", layout=items_layout)
# customize display
outsplit = widgets.interactive_output(widget_equalize, {'rows': wrowfloat,
'columns': wcolfloat,
'saveas': wsavesplits,
'savetype': wfiletype,
'show_images': wshowsplit,
})
# In[157]:
croppingtab = widgets.VBox([widgets.VBox([wdirectory, wfilepath, wcropx, wcropy, winvert, wbrange, wgamma,
wbright, wcontrast, wsavesplits, wfiletype],
layout=box_layout), outbin],
layout=Layout(border='solid', margin='3'))
splittingtab = widgets.VBox([widgets.VBox([widgets.HBox([wrowfloat, wrowtext]),
widgets.HBox([wcolfloat, wcoltext]),
wsavesplits,
wfiletype,
wshowsplit, ],
layout=box_layout), outsplit],
layout=Layout(border='solid', margin='3'))
# synchronise the slider and text box values
def update_col_val(*args):
wcolfloat.value = wcoltext.value
def update_row_val(*args):
wrowfloat.value = wrowtext.value
wcoltext.observe(update_col_val, 'value')
wrowtext.observe(update_row_val, 'value')
# add layouts to tabs for condensed viewing and handling:
tab = widgets.Tab()
tab.children = [croppingtab, splittingtab, ]
tab.set_title(0, "Crop image")
tab.set_title(1, "Split & Equalize")
return tab
| StarcoderdataPython |
3279446 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class Cluster(pulumi.CustomResource):
aks_config: pulumi.Output[dict]
"""
The Azure AKS configuration for `aks` Clusters. Conflicts with `eks_config`, `gke_config` and `rke_config` (list maxitems:1)
* `aadServerAppSecret` (`str`) - The secret of an Azure Active Directory server application (string)
* `aadTenantId` (`str`) - The ID of an Azure Active Directory tenant (string)
* `addClientAppId` (`str`) - The ID of an Azure Active Directory client application of type \"Native\". This application is for user login via kubectl (string)
* `addServerAppId` (`str`) - The ID of an Azure Active Directory server application of type \"Web app/API\". This application represents the managed cluster's apiserver (Server application) (string)
* `adminUsername` (`str`) - The administrator username to use for Linux hosts. Default `azureuser` (string)
* `agentDnsPrefix` (`str`) - DNS prefix to be used to create the FQDN for the agent pool (string)
* `agentOsDiskSize` (`float`) - GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the \"agent vm size\" specified. Default `0` (int)
* `agentPoolName` (`str`) - Name for the agent pool, upto 12 alphanumeric characters. Default `agentpool0` (string)
* `agentStorageProfile` (`str`) - Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default `ManagedDisks` (string)
* `agentVmSize` (`str`) - Size of machine in the agent pool. Default `Standard_D1_v2` (string)
* `authBaseUrl` (`str`) - Different authentication API url to use. Default `https://login.microsoftonline.com/` (string)
* `baseUrl` (`str`) - Different resource management API url to use. Default `https://management.azure.com/` (string)
* `client_id` (`str`) - Azure client ID to use (string)
* `client_secret` (`str`) - Azure client secret associated with the \"client id\" (string)
* `count` (`float`) - Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default `1` (int)
* `dnsServiceIp` (`str`) - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in \"service cidr\". Default `10.0.0.10` (string)
* `dockerBridgeCidr` (`str`) - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in \"service cidr\". Default `172.17.0.1/16` (string)
* `enableHttpApplicationRouting` (`bool`) - Enable the Kubernetes ingress with automatic public DNS name creation. Default `false` (bool)
* `enableMonitoring` (`bool`) - Turn on Azure Log Analytics monitoring. Uses the Log Analytics \"Default\" workspace if it exists, else creates one. if using an existing workspace, specifies \"log analytics workspace resource id\". Default `true` (bool)
* `kubernetesVersion` (`str`) - The Kubernetes master version (string)
* `location` (`str`) - Azure Kubernetes cluster location. Default `eastus` (string)
* `logAnalyticsWorkspace` (`str`) - The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
* `logAnalyticsWorkspaceResourceGroup` (`str`) - The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
* `masterDnsPrefix` (`str`) - DNS prefix to use the Kubernetes cluster control pane (string)
* `maxPods` (`float`) - Maximum number of pods that can run on a node. Default `110` (int)
* `networkPlugin` (`str`) - Network plugin used for building Kubernetes network. Chooses from `azure` or `kubenet`. Default `azure` (string)
* `networkPolicy` (`str`) - Network policy used for building Kubernetes network. Chooses from `calico` (string)
* `podCidr` (`str`) - A CIDR notation IP range from which to assign Kubernetes Pod IPs when \"network plugin\" is specified in \"kubenet\". Default `192.168.127.12/16` (string)
* `resourceGroup` (`str`) - The name of the Cluster resource group (string)
* `serviceCidr` (`str`) - A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default `10.0.0.0/16` (string)
* `sshPublicKeyContents` (`str`) - Contents of the SSH public key used to authenticate with Linux hosts (string)
* `subnet` (`str`) - The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
* `subscriptionId` (`str`) - Subscription credentials which uniquely identify Microsoft Azure subscription (string)
* `tag` (`dict`) - Tags for Kubernetes cluster. For example, foo=bar (map)
* `tenant_id` (`str`) - Azure tenant ID to use (string)
* `virtualNetwork` (`str`) - The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
* `virtualNetworkResourceGroup` (`str`) - The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
"""
annotations: pulumi.Output[dict]
"""
Annotations for cluster registration token object (map)
"""
cluster_auth_endpoint: pulumi.Output[dict]
"""
Enabling the [local cluster authorized endpoint](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#local-cluster-auth-endpoint) allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
* `ca_certs` (`str`) - CA certs for the authorized cluster endpoint (string)
* `enabled` (`bool`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `fqdn` (`str`) - FQDN for the authorized cluster endpoint (string)
"""
cluster_monitoring_input: pulumi.Output[dict]
"""
Cluster monitoring config. Any parameter defined in [rancher-monitoring charts](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) could be configured (list maxitems:1)
* `answers` (`dict`) - Key/value answers for monitor input (map)
=======
"""
cluster_registration_token: pulumi.Output[dict]
"""
(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
* `annotations` (`dict`) - Annotations for cluster registration token object (map)
* `cluster_id` (`str`) - Cluster ID (string)
* `command` (`str`) - Command to execute in a imported k8s cluster (string)
* `id` (`str`) - (Computed) The ID of the resource (string)
* `insecureCommand` (`str`) - Insecure command to execute in a imported k8s cluster (string)
* `labels` (`dict`) - Labels for cluster registration token object (map)
* `manifestUrl` (`str`) - K8s manifest url to execute with `kubectl` to import an existing k8s cluster (string)
* `name` (`str`) - Name of cluster registration token (string)
* `nodeCommand` (`str`) - Node command to execute in linux nodes for custom k8s cluster (string)
* `token` (`str`) - Token for cluster registration token object (string)
* `windowsNodeCommand` (`str`) - Node command to execute in windows nodes for custom k8s cluster (string)
"""
cluster_template_answers: pulumi.Output[dict]
"""
Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
* `cluster_id` (`str`) - Cluster ID (string)
* `project_id` (`str`) - Project ID to apply answer (string)
* `values` (`dict`) - Key/values for answer (map)
"""
cluster_template_id: pulumi.Output[str]
"""
Cluster template ID. Just for Rancher v2.3.x and above (string)
"""
cluster_template_questions: pulumi.Output[list]
"""
Cluster template questions. Just for Rancher v2.3.x and above (list)
* `default` (`str`) - Default variable value (string)
* `required` (`bool`) - Required variable. Default `false` (bool)
* `type` (`str`) - Variable type. `boolean`, `int` and `string` are allowed. Default `string` (string)
* `variable` (`str`) - Variable name (string)
>>>>>>> c6a2cbc... Feat: added .ClusterTemplate datasource and resource. For rancher V2.3.x. Doc files
"""
cluster_template_revision_id: pulumi.Output[str]
"""
Cluster template revision ID. Just for Rancher v2.3.x and above (string)
"""
default_pod_security_policy_template_id: pulumi.Output[str]
"""
[Default pod security policy template id](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#pod-security-policy-support) (string)
"""
default_project_id: pulumi.Output[str]
"""
(Computed) Default project ID for the cluster (string)
"""
description: pulumi.Output[str]
"""
An optional description of this cluster (string)
"""
desired_agent_image: pulumi.Output[str]
"""
Desired agent image. Just for Rancher v2.3.x and above (string)
"""
desired_auth_image: pulumi.Output[str]
"""
Desired auth image. Just for Rancher v2.3.x and above (string)
"""
docker_root_dir: pulumi.Output[str]
"""
Desired auth image. Just for Rancher v2.3.x and above (string)
"""
driver: pulumi.Output[str]
"""
(Computed) The driver used for the Cluster. `imported`, `azurekubernetesservice`, `amazonelasticcontainerservice`, `googlekubernetesengine` and `rancherKubernetesEngine` are supported (string)
"""
eks_config: pulumi.Output[dict]
"""
The Amazon EKS configuration for `eks` Clusters. Conflicts with `aks_config`, `gke_config` and `rke_config` (list maxitems:1)
* `access_key` (`str`) - The AWS Client ID to use (string)
* `ami` (`str`) - AMI ID to use for the worker nodes instead of the default (string)
* `associateWorkerNodePublicIp` (`bool`) - Associate public ip EKS worker nodes. Default `true` (bool)
* `desiredNodes` (`float`) - The desired number of worker nodes. Just for Rancher v2.3.x and above. Default `3` (int)
* `instanceType` (`str`) - The type of machine to use for worker nodes. Default `t2.medium` (string)
* `keyPairName` (`str`) - Allow user to specify key name to use. Just for Rancher v2.2.7 and above (string)
* `kubernetesVersion` (`str`) - The Kubernetes master version (string)
* `maximumNodes` (`float`) - The maximum number of worker nodes. Default `3` (int)
* `minimumNodes` (`float`) - The minimum number of worker nodes. Default `1` (int)
* `nodeVolumeSize` (`float`) - The volume size for each node. Default `20` (int)
* `region` (`str`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`str`) - The AWS Client Secret associated with the Client ID (string)
* `securityGroups` (`list`) - List of security groups to use for the cluster. If it's not specified Rancher will create a new security group (list)
* `serviceRole` (`str`) - The service role to use to perform the cluster operations in AWS. If it's not specified Rancher will create a new service role (string)
* `sessionToken` (`str`) - A session token to use with the client key and secret if applicable (string)
* `subnets` (`list`) - List of subnets in the virtual network to use. If it's not specified Rancher will create 3 news subnets (list)
* `userData` (`str`) - Pass user-data to the nodes to perform automated configuration tasks (string)
* `virtualNetwork` (`str`) - The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
"""
enable_cluster_alerting: pulumi.Output[bool]
"""
Enable built-in cluster alerting. Default `false` (bool)
"""
enable_cluster_istio: pulumi.Output[bool]
"""
Enable built-in cluster istio. Default `false`. Just for Rancher v2.3.x and above (bool)
"""
enable_cluster_monitoring: pulumi.Output[bool]
"""
Enable built-in cluster monitoring. Default `false` (bool)
"""
enable_network_policy: pulumi.Output[bool]
"""
Enable project network isolation. Default `false` (bool)
"""
gke_config: pulumi.Output[dict]
"""
The Google GKE configuration for `gke` Clusters. Conflicts with `aks_config`, `eks_config` and `rke_config` (list maxitems:1)
* `clusterIpv4Cidr` (`str`) - The IP address range of the container pods (string)
* `credential` (`str`) - The contents of the GC credential file (string)
* `description` (`str`) - An optional description of this cluster (string)
* `diskSizeGb` (`float`) - Size of the disk attached to each node. Default `100` (int)
* `diskType` (`str`) - Type of the disk attached to each node (string)
* `enableAlphaFeature` (`bool`) - To enable Kubernetes alpha feature. Default `true` (bool)
* `enableAutoRepair` (`bool`) - Specifies whether the node auto-repair is enabled for the node pool. Default `false` (bool)
* `enableAutoUpgrade` (`bool`) - Specifies whether node auto-upgrade is enabled for the node pool. Default `false` (bool)
* `enableHorizontalPodAutoscaling` (`bool`) - Enable horizontal pod autoscaling for the cluster. Default `true` (bool)
* `enableHttpLoadBalancing` (`bool`) - Enable HTTP load balancing on GKE cluster. Default `true` (bool)
* `enableKubernetesDashboard` (`bool`) - Whether to enable the Kubernetes dashboard. Default `false` (bool)
* `enableLegacyAbac` (`bool`) - Whether to enable legacy abac on the cluster. Default `false` (bool)
* `enableMasterAuthorizedNetwork` (`bool`)
* `enableNetworkPolicyConfig` (`bool`) - Enable stackdriver logging. Default `true` (bool)
* `enableNodepoolAutoscaling` (`bool`) - Enable nodepool autoscaling. Default `false` (bool)
* `enablePrivateEndpoint` (`bool`) - Whether the master's internal IP address is used as the cluster endpoint. Default `false` (bool)
* `enablePrivateNodes` (`bool`) - Whether nodes have internal IP address only. Default `false` (bool)
* `enableStackdriverLogging` (`bool`) - Enable stackdriver monitoring. Default `true` (bool)
* `enableStackdriverMonitoring` (`bool`) - Enable stackdriver monitoring on GKE cluster (bool)
* `imageType` (`str`) - The image to use for the worker nodes (string)
* `ipPolicyClusterIpv4CidrBlock` (`str`) - The IP address range for the cluster pod IPs (string)
* `ipPolicyClusterSecondaryRangeName` (`str`) - The name of the secondary range to be used for the cluster CIDR block (string)
* `ipPolicyCreateSubnetwork` (`bool`) - Whether a new subnetwork will be created automatically for the cluster. Default `false` (bool)
* `ipPolicyNodeIpv4CidrBlock` (`str`) - The IP address range of the instance IPs in this cluster (string)
* `ipPolicyServicesIpv4CidrBlock` (`str`) - The IP address range of the services IPs in this cluster (string)
* `ipPolicyServicesSecondaryRangeName` (`str`) - The name of the secondary range to be used for the services CIDR block (string)
* `ipPolicySubnetworkName` (`str`) - A custom subnetwork name to be used if createSubnetwork is true (string)
* `issueClientCertificate` (`bool`) - Issue a client certificate. Default `false` (bool)
* `kubernetesDashboard` (`bool`) - Enable the Kubernetes dashboard. Default `false` (bool)
* `labels` (`dict`) - Labels for cluster registration token object (map)
* `localSsdCount` (`float`) - The number of local SSD disks to be attached to the node. Default `0` (int)
* `locations` (`list`) - Locations for GKE cluster (list)
* `machineType` (`str`) - Machine type for GKE cluster (string)
* `maintenanceWindow` (`str`) - Maintenance window for GKE cluster (string)
* `masterAuthorizedNetworkCidrBlocks` (`list`) - Define up to 10 external networks that could access Kubernetes master through HTTPS (list)
* `masterIpv4CidrBlock` (`str`) - The IP range in CIDR notation to use for the hosted master network (string)
* `masterVersion` (`str`) - Master version for GKE cluster (string)
* `maxNodeCount` (`float`) - Maximum number of nodes in the NodePool. Must be >= minNodeCount. There has to enough quota to scale up the cluster. Default `0` (int)
* `minNodeCount` (`float`) - Minimmum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. Default `0` (int)
* `network` (`str`) - Network for GKE cluster (string)
* `nodeCount` (`float`) - Node count for GKE cluster. Default `3` (int)
* `nodePool` (`str`) - The ID of the cluster node pool (string)
* `nodeVersion` (`str`) - Node version for GKE cluster (string)
* `oauthScopes` (`list`) - The set of Google API scopes to be made available on all of the node VMs under the default service account (list)
* `preemptible` (`bool`) - Whether the nodes are created as preemptible VM instances. Default `false` (bool)
* `project_id` (`str`) - Project ID to apply answer (string)
* `resourceLabels` (`dict`) - The map of Kubernetes labels to be applied to each cluster (map)
* `serviceAccount` (`str`) - The Google Cloud Platform Service Account to be used by the node VMs (string)
* `subNetwork` (`str`) - Subnetwork for GKE cluster (string)
* `taints` (`list`) - List of Kubernetes taints to be applied to each node (list)
* `useIpAliases` (`bool`) - Whether alias IPs will be used for pod IPs in the cluster. Default `false` (bool)
* `zone` (`str`) - Zone GKE cluster (string)
"""
kube_config: pulumi.Output[str]
"""
(Computed) Kube Config generated for the cluster (string)
"""
labels: pulumi.Output[dict]
"""
Labels for cluster registration token object (map)
"""
name: pulumi.Output[str]
"""
Name of cluster registration token (string)
"""
rke_config: pulumi.Output[dict]
"""
The RKE configuration for `rke` Clusters. Conflicts with `aks_config`, `eks_config` and `gke_config` (list maxitems:1)
* `addonJobTimeout` (`float`) - Duration in seconds of addon job (int)
* `addons` (`str`) - Addons descripton to deploy on RKE cluster.
* `addonsIncludes` (`list`) - Addons yaml manifests to deploy on RKE cluster (list)
* `authentication` (`dict`) - Kubernetes cluster authentication (list maxitems:1)
* `sans` (`list`) - RKE sans for authentication ([]string)
* `strategy` (`str`) - RKE strategy for authentication (string)
* `authorization` (`dict`) - Kubernetes cluster authorization (list maxitems:1)
* `mode` (`str`) - RKE mode for authorization. `rbac` and `none` modes are available. Default `rbac` (string)
* `options` (`dict`) - RKE options for network (map)
* `bastionHost` (`dict`) - RKE bastion host (list maxitems:1)
* `address` (`str`) - Address ip for node (string)
* `port` (`str`) - Port for node. Default `22` (string)
* `sshAgentAuth` (`bool`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`str`) - Node SSH private key (string)
* `sshKeyPath` (`str`) - Node SSH private key path (string)
* `user` (`str`) - Registry user (string)
* `cloudProvider` (`dict`) - RKE options for Calico network provider (string)
* `awsCloudProvider` (`dict`) - RKE AWS Cloud Provider config for Cloud Provider [rke-aws-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/aws/) (list maxitems:1)
* `global` (`dict`) - (list maxitems:1)
* `disableSecurityGroupIngress` (`bool`) - Default `false` (bool)
* `disableStrictZoneCheck` (`bool`) - Default `false` (bool)
* `elbSecurityGroup` (`str`) - (string)
* `kubernetesClusterId` (`str`) - (string)
* `kubernetesClusterTag` (`str`) - (string)
* `roleArn` (`str`) - (string)
* `routeTableId` (`str`) - (string)
* `subnetId` (`str`) - (string)
* `vpc` (`str`) - (string)
* `zone` (`str`) - Zone GKE cluster (string)
* `serviceOverrides` (`list`) - (list)
* `region` (`str`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `service` (`str`) - (string)
* `signingMethod` (`str`) - (string)
* `signingName` (`str`) - (string)
* `signingRegion` (`str`) - (string)
* `url` (`str`) - Registry URL (string)
* `azureCloudProvider` (`dict`) - RKE Azure Cloud Provider config for Cloud Provider [rke-azure-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/azure/) (list maxitems:1)
* `aadClientCertPassword` (`str`) - (string)
* `aadClientCertPath` (`str`) - (string)
* `aadClientId` (`str`) - (string)
* `aadClientSecret` (`str`) - (string)
* `cloud` (`str`) - (string)
* `cloudProviderBackoff` (`bool`) - (bool)
* `cloudProviderBackoffDuration` (`float`) - (int)
* `cloudProviderBackoffExponent` (`float`) - (int)
* `cloudProviderBackoffJitter` (`float`) - (int)
* `cloudProviderBackoffRetries` (`float`) - (int)
* `cloudProviderRateLimit` (`bool`) - (bool)
* `cloudProviderRateLimitBucket` (`float`) - (int)
* `cloudProviderRateLimitQps` (`float`) - (int)
* `location` (`str`) - Azure Kubernetes cluster location. Default `eastus` (string)
* `maximumLoadBalancerRuleCount` (`float`) - (int)
* `primaryAvailabilitySetName` (`str`) - (string)
* `primaryScaleSetName` (`str`) - (string)
* `resourceGroup` (`str`) - The name of the Cluster resource group (string)
* `routeTableName` (`str`) - (string)
* `securityGroupName` (`str`) - (string)
* `subnetName` (`str`) - (string)
* `subscriptionId` (`str`) - Subscription credentials which uniquely identify Microsoft Azure subscription (string)
* `tenant_id` (`str`) - Azure tenant ID to use (string)
* `useInstanceMetadata` (`bool`) - (bool)
* `useManagedIdentityExtension` (`bool`) - (bool)
* `vmType` (`str`) - (string)
* `vnetName` (`str`) - (string)
* `vnetResourceGroup` (`str`) - (string)
* `customCloudProvider` (`str`) - RKE Custom Cloud Provider config for Cloud Provider (string) (string)
* `name` (`str`) - Name of cluster registration token (string)
* `openstackCloudProvider` (`dict`) - RKE Openstack Cloud Provider config for Cloud Provider [rke-openstack-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) (list maxitems:1)
* `blockStorage` (`dict`) - (list maxitems:1)
* `bsVersion` (`str`) - (string)
* `ignoreVolumeAz` (`bool`) - (string)
* `trustDevicePath` (`bool`) - (string)
* `global` (`dict`) - (list maxitems:1)
* `authUrl` (`str`) - (string)
* `caFile` (`str`) - (string)
* `domainId` (`str`) - Required if `domain_name` not provided. (string)
* `domainName` (`str`) - Required if `domain_id` not provided. (string)
* `password` (`str`) - Registry password (string)
* `region` (`str`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `tenant_id` (`str`) - Azure tenant ID to use (string)
* `tenantName` (`str`) - Required if `tenant_id` not provided. (string)
* `trustId` (`str`) - (string)
* `username` (`str`) - (string)
* `loadBalancer` (`dict`) - (list maxitems:1)
* `createMonitor` (`bool`) - (bool)
* `floatingNetworkId` (`str`) - (string)
* `lbMethod` (`str`) - (string)
* `lbProvider` (`str`) - (string)
* `lbVersion` (`str`) - (string)
* `manageSecurityGroups` (`bool`) - (bool)
* `monitorDelay` (`str`) - Default `60s` (string)
* `monitorMaxRetries` (`float`) - Default 5 (int)
* `monitorTimeout` (`str`) - Default `30s` (string)
* `subnetId` (`str`) - (string)
* `useOctavia` (`bool`) - (bool)
* `metadata` (`dict`) - (list maxitems:1)
* `requestTimeout` (`float`) - (int)
* `searchOrder` (`str`) - (string)
* `route` (`dict`) - (list maxitems:1)
* `routerId` (`str`) - (string)
* `vsphereCloudProvider` (`dict`) - RKE Vsphere Cloud Provider config for Cloud Provider [rke-vsphere-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/) Extra argument `name` is required on `virtual_center` configuration. (list maxitems:1)
* `disk` (`dict`) - (list maxitems:1)
* `scsiControllerType` (`str`) - (string)
* `global` (`dict`) - (list maxitems:1)
* `datacenters` (`str`) - (string)
* `insecureFlag` (`bool`) - (bool)
* `password` (`str`) - Registry password (string)
* `port` (`str`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`float`) - (int)
* `user` (`str`) - Registry user (string)
* `network` (`dict`) - Network for GKE cluster (string)
* `publicNetwork` (`str`) - (string)
* `virtualCenters` (`list`) - (List)
* `datacenters` (`str`) - (string)
* `name` (`str`) - Name of cluster registration token (string)
* `password` (`str`) - Registry password (string)
* `port` (`str`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`float`) - (int)
* `user` (`str`) - Registry user (string)
* `workspace` (`dict`) - (list maxitems:1)
* `datacenter` (`str`) - (string)
* `defaultDatastore` (`str`) - (string)
* `folder` (`str`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `resourcepoolPath` (`str`) - (string)
* `server` (`str`) - (string)
* `dns` (`dict`) - RKE dns add-on. Just for Rancher v2.2.x (list maxitems:1)
* `nodeSelector` (`dict`) - Node selector for RKE Ingress (map)
* `provider` (`str`) - Provider for RKE monitoring (string)
* `reverseCidrs` (`list`) - DNS add-on reverse cidr (list)
* `upstreamNameservers` (`list`) - DNS add-on upstream nameservers (list)
* `ignoreDockerVersion` (`bool`) - Ignore docker version. Default `true` (bool)
* `ingress` (`dict`) - Kubernetes ingress configuration (list maxitems:1)
* `dnsPolicy` (`str`) - Ingress controller DNS policy. `ClusterFirstWithHostNet`, `ClusterFirst`, `Default`, and `None` are supported. [K8S dns Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) (string)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `nodeSelector` (`dict`) - Node selector for RKE Ingress (map)
* `options` (`dict`) - RKE options for network (map)
* `provider` (`str`) - Provider for RKE monitoring (string)
* `kubernetesVersion` (`str`) - The Kubernetes master version (string)
* `monitoring` (`dict`) - Kubernetes cluster monitoring (list maxitems:1)
* `options` (`dict`) - RKE options for network (map)
* `provider` (`str`) - Provider for RKE monitoring (string)
* `network` (`dict`) - Network for GKE cluster (string)
* `calicoNetworkProvider` (`dict`) - Calico provider config for RKE network (list maxitems:1)
* `cloudProvider` (`str`) - RKE options for Calico network provider (string)
* `canalNetworkProvider` (`dict`) - Canal provider config for RKE network (list maxitems:1)
* `iface` (`str`) - Iface config Flannel network provider (string)
* `flannelNetworkProvider` (`dict`) - Flannel provider config for RKE network (list maxitems:1)
* `iface` (`str`) - Iface config Flannel network provider (string)
* `options` (`dict`) - RKE options for network (map)
* `plugin` (`str`) - Plugin for RKE network. `canal` (default), `flannel`, `calico` and `weave` are supported. (string)
* `weaveNetworkProvider` (`dict`) - Weave provider config for RKE network (list maxitems:1)
* `password` (`str`) - Registry password (string)
* `nodes` (`list`) - RKE cluster nodes (list)
* `address` (`str`) - Address ip for node (string)
* `dockerSocket` (`str`) - Docker socket for node (string)
* `hostnameOverride` (`str`) - Hostname override for node (string)
* `internalAddress` (`str`) - Internal ip for node (string)
* `labels` (`dict`) - Labels for cluster registration token object (map)
* `nodeId` (`str`) - Id for the node (string)
* `port` (`str`) - Port for node. Default `22` (string)
* `roles` (`list`) - Roles for the node. `controlplane`, `etcd` and `worker` are supported. (list)
* `sshAgentAuth` (`bool`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`str`) - Node SSH private key (string)
* `sshKeyPath` (`str`) - Node SSH private key path (string)
* `user` (`str`) - Registry user (string)
* `prefixPath` (`str`) - Prefix to customize Kubernetes path (string)
* `privateRegistries` (`list`) - private registries for docker images (list)
* `isDefault` (`bool`) - Set as default registry. Default `false` (bool)
* `password` (`str`) - Registry password (string)
* `url` (`str`) - Registry URL (string)
* `user` (`str`) - Registry user (string)
* `services` (`dict`) - Kubernetes cluster services (list maxitems:1)
* `etcd` (`dict`) - Etcd options for RKE services (list maxitems:1)
* `backup_config` (`dict`) - Backup options for etcd service. Just for Rancher v2.2.x (list maxitems:1)
* `enabled` (`bool`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `intervalHours` (`float`) - Interval hours for etcd backup. Default `12` (int)
* `retention` (`float`) - Retention for etcd backup. Default `6` (int)
* `s3BackupConfig` (`dict`) - S3 config options for etcd backup (list maxitems:1)
* `access_key` (`str`) - The AWS Client ID to use (string)
* `bucketName` (`str`) - Bucket name for S3 service (string)
* `customCa` (`str`) - Base64 encoded custom CA for S3 service. Use filebase64(<FILE>) for encoding file. Available from Rancher v2.2.5 (string)
* `endpoint` (`str`) - Endpoint for S3 service (string)
* `folder` (`str`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `region` (`str`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`str`) - The AWS Client Secret associated with the Client ID (string)
* `safeTimestamp` (`bool`) - Safe timestamp for etcd backup. Default: `false` (bool)
* `caCert` (`str`) - TLS CA certificate for etcd service (string)
* `cert` (`str`) - TLS certificate for etcd service (string)
* `creation` (`str`) - Creation option for etcd service (string)
* `externalUrls` (`list`) - External urls for etcd service (list)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `extraBinds` (`list`) - Extra binds for scheduler service (list)
* `extraEnvs` (`list`) - Extra environment for scheduler service (list)
* `gid` (`float`) - Etcd service GID. Default: `0`. For Rancher v2.3.x or above (int)
* `image` (`str`) - Docker image for scheduler service (string)
* `key` (`str`) - TLS key for etcd service (string)
* `path` (`str`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `retention` (`str`) - Retention for etcd backup. Default `6` (int)
* `snapshot` (`bool`) - Snapshot option for etcd service (bool)
* `uid` (`float`) - Etcd service UID. Default: `0`. For Rancher v2.3.x or above (int)
* `kubeApi` (`dict`) - Kube API options for RKE services (list maxitems:1)
* `admissionConfiguration` (`dict`) - Admission configuration (map)
* `alwaysPullImages` (`bool`) - Enable [AlwaysPullImages](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) Admission controller plugin. [Rancher docs](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options) Default: `false` (bool)
* `auditLog` (`dict`) - K8s audit log configuration. (list maxitem: 1)
* `configuration` (`dict`) - Event rate limit configuration. (map)
* `format` (`str`) - Audit log format. Default: 'json' (string)
* `maxAge` (`float`) - Audit log max age. Default: `30` (int)
* `maxBackup` (`float`) - Audit log max backup. Default: `10` (int)
* `maxSize` (`float`) - Audit log max size. Default: `100` (int)
* `path` (`str`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `policy` (`str`) - Audit log policy json formated string. `omitStages` and `rules` json fields are supported. Example: `policy = jsonencode({"rules":[{"level": "Metadata"}]})` (string)
* `enabled` (`bool`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `eventRateLimit` (`dict`) - K8s event rate limit configuration. (list maxitem: 1)
* `configuration` (`dict`) - Event rate limit configuration. (map)
* `enabled` (`bool`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `extraBinds` (`list`) - Extra binds for scheduler service (list)
* `extraEnvs` (`list`) - Extra environment for scheduler service (list)
* `image` (`str`) - Docker image for scheduler service (string)
* `podSecurityPolicy` (`bool`) - Pod Security Policy option for kube API service. Default `false` (bool)
* `secretsEncryptionConfig` (`dict`) - [Encrypt k8s secret data configration](https://rancher.com/docs/rke/latest/en/config-options/secrets-encryption/). (list maxitem: 1)
* `customConfig` (`dict`) - Secrets encryption configuration. (map)
* `enabled` (`bool`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `serviceClusterIpRange` (`str`) - Service Cluster ip Range option for kube controller service (string)
* `serviceNodePortRange` (`str`) - Service Node Port Range option for kube API service (string)
* `kubeController` (`dict`) - Kube Controller options for RKE services (list maxitems:1)
* `clusterCidr` (`str`) - Cluster CIDR option for kube controller service (string)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `extraBinds` (`list`) - Extra binds for scheduler service (list)
* `extraEnvs` (`list`) - Extra environment for scheduler service (list)
* `image` (`str`) - Docker image for scheduler service (string)
* `serviceClusterIpRange` (`str`) - Service Cluster ip Range option for kube controller service (string)
* `kubelet` (`dict`) - Kubelet options for RKE services (list maxitems:1)
* `clusterDnsServer` (`str`) - Cluster DNS Server option for kubelet service (string)
* `clusterDomain` (`str`) - Cluster Domain option for kubelet service (string)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `extraBinds` (`list`) - Extra binds for scheduler service (list)
* `extraEnvs` (`list`) - Extra environment for scheduler service (list)
* `failSwapOn` (`bool`) - Enable or disable failing when swap on is not supported (bool)
* `generate_serving_certificate` [Generate a certificate signed by the kube-ca](https://rancher.com/docs/rke/latest/en/config-options/services/#kubelet-serving-certificate-requirements). Default `false` (bool)
* `generateServingCertificate` (`bool`)
* `image` (`str`) - Docker image for scheduler service (string)
* `infraContainerImage` (`str`) - Infra container image for kubelet service (string)
* `kubeproxy` (`dict`) - Kubeproxy options for RKE services (list maxitems:1)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `extraBinds` (`list`) - Extra binds for scheduler service (list)
* `extraEnvs` (`list`) - Extra environment for scheduler service (list)
* `image` (`str`) - Docker image for scheduler service (string)
* `scheduler` (`dict`) - Scheduler options for RKE services (list maxitems:1)
* `extraArgs` (`dict`) - Extra arguments for scheduler service (map)
* `extraBinds` (`list`) - Extra binds for scheduler service (list)
* `extraEnvs` (`list`) - Extra environment for scheduler service (list)
* `image` (`str`) - Docker image for scheduler service (string)
* `sshAgentAuth` (`bool`) - Use ssh agent auth. Default `false` (bool)
* `sshCertPath` (`str`) - Cluster level SSH certificate path (string)
* `sshKeyPath` (`str`) - Node SSH private key path (string)
"""
system_project_id: pulumi.Output[str]
"""
(Computed) System project ID for the cluster (string)
"""
windows_prefered_cluster: pulumi.Output[bool]
"""
Windows preferred cluster. Default: `false` (bool)
"""
def __init__(__self__, resource_name, opts=None, aks_config=None, annotations=None, cluster_auth_endpoint=None, cluster_monitoring_input=None, cluster_template_answers=None, cluster_template_id=None, cluster_template_questions=None, cluster_template_revision_id=None, default_pod_security_policy_template_id=None, description=None, desired_agent_image=None, desired_auth_image=None, docker_root_dir=None, driver=None, eks_config=None, enable_cluster_alerting=None, enable_cluster_istio=None, enable_cluster_monitoring=None, enable_network_policy=None, gke_config=None, labels=None, name=None, rke_config=None, windows_prefered_cluster=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Rancher v2 Cluster resource. This can be used to create Clusters for Rancher v2 environments and retrieve their information.
> This content is derived from https://github.com/terraform-providers/terraform-provider-rancher2/blob/master/website/docs/r/cluster.html.markdown.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] aks_config: The Azure AKS configuration for `aks` Clusters. Conflicts with `eks_config`, `gke_config` and `rke_config` (list maxitems:1)
:param pulumi.Input[dict] annotations: Annotations for cluster registration token object (map)
:param pulumi.Input[dict] cluster_auth_endpoint: Enabling the [local cluster authorized endpoint](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#local-cluster-auth-endpoint) allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
:param pulumi.Input[dict] cluster_monitoring_input: Cluster monitoring config. Any parameter defined in [rancher-monitoring charts](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) could be configured (list maxitems:1)
:param pulumi.Input[dict] cluster_template_answers: Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
:param pulumi.Input[str] cluster_template_id: Cluster template ID. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[list] cluster_template_questions: Cluster template questions. Just for Rancher v2.3.x and above (list)
:param pulumi.Input[str] cluster_template_revision_id: Cluster template revision ID. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] default_pod_security_policy_template_id: [Default pod security policy template id](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#pod-security-policy-support) (string)
:param pulumi.Input[str] description: An optional description of this cluster (string)
:param pulumi.Input[str] desired_agent_image: Desired agent image. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] desired_auth_image: Desired auth image. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] docker_root_dir: Desired auth image. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] driver: (Computed) The driver used for the Cluster. `imported`, `azurekubernetesservice`, `amazonelasticcontainerservice`, `googlekubernetesengine` and `rancherKubernetesEngine` are supported (string)
:param pulumi.Input[dict] eks_config: The Amazon EKS configuration for `eks` Clusters. Conflicts with `aks_config`, `gke_config` and `rke_config` (list maxitems:1)
:param pulumi.Input[bool] enable_cluster_alerting: Enable built-in cluster alerting. Default `false` (bool)
:param pulumi.Input[bool] enable_cluster_istio: Enable built-in cluster istio. Default `false`. Just for Rancher v2.3.x and above (bool)
:param pulumi.Input[bool] enable_cluster_monitoring: Enable built-in cluster monitoring. Default `false` (bool)
:param pulumi.Input[bool] enable_network_policy: Enable project network isolation. Default `false` (bool)
:param pulumi.Input[dict] gke_config: The Google GKE configuration for `gke` Clusters. Conflicts with `aks_config`, `eks_config` and `rke_config` (list maxitems:1)
:param pulumi.Input[dict] labels: Labels for cluster registration token object (map)
:param pulumi.Input[str] name: Name of cluster registration token (string)
:param pulumi.Input[dict] rke_config: The RKE configuration for `rke` Clusters. Conflicts with `aks_config`, `eks_config` and `gke_config` (list maxitems:1)
:param pulumi.Input[bool] windows_prefered_cluster: Windows preferred cluster. Default: `false` (bool)
The **aks_config** object supports the following:
* `aadServerAppSecret` (`pulumi.Input[str]`) - The secret of an Azure Active Directory server application (string)
* `aadTenantId` (`pulumi.Input[str]`) - The ID of an Azure Active Directory tenant (string)
* `addClientAppId` (`pulumi.Input[str]`) - The ID of an Azure Active Directory client application of type \"Native\". This application is for user login via kubectl (string)
* `addServerAppId` (`pulumi.Input[str]`) - The ID of an Azure Active Directory server application of type \"Web app/API\". This application represents the managed cluster's apiserver (Server application) (string)
* `adminUsername` (`pulumi.Input[str]`) - The administrator username to use for Linux hosts. Default `azureuser` (string)
* `agentDnsPrefix` (`pulumi.Input[str]`) - DNS prefix to be used to create the FQDN for the agent pool (string)
* `agentOsDiskSize` (`pulumi.Input[float]`) - GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the \"agent vm size\" specified. Default `0` (int)
* `agentPoolName` (`pulumi.Input[str]`) - Name for the agent pool, upto 12 alphanumeric characters. Default `agentpool0` (string)
* `agentStorageProfile` (`pulumi.Input[str]`) - Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default `ManagedDisks` (string)
* `agentVmSize` (`pulumi.Input[str]`) - Size of machine in the agent pool. Default `Standard_D1_v2` (string)
* `authBaseUrl` (`pulumi.Input[str]`) - Different authentication API url to use. Default `https://login.microsoftonline.com/` (string)
* `baseUrl` (`pulumi.Input[str]`) - Different resource management API url to use. Default `https://management.azure.com/` (string)
* `client_id` (`pulumi.Input[str]`) - Azure client ID to use (string)
* `client_secret` (`pulumi.Input[str]`) - Azure client secret associated with the \"client id\" (string)
* `count` (`pulumi.Input[float]`) - Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default `1` (int)
* `dnsServiceIp` (`pulumi.Input[str]`) - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in \"service cidr\". Default `10.0.0.10` (string)
* `dockerBridgeCidr` (`pulumi.Input[str]`) - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in \"service cidr\". Default `172.17.0.1/16` (string)
* `enableHttpApplicationRouting` (`pulumi.Input[bool]`) - Enable the Kubernetes ingress with automatic public DNS name creation. Default `false` (bool)
* `enableMonitoring` (`pulumi.Input[bool]`) - Turn on Azure Log Analytics monitoring. Uses the Log Analytics \"Default\" workspace if it exists, else creates one. if using an existing workspace, specifies \"log analytics workspace resource id\". Default `true` (bool)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `location` (`pulumi.Input[str]`) - Azure Kubernetes cluster location. Default `eastus` (string)
* `logAnalyticsWorkspace` (`pulumi.Input[str]`) - The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
* `logAnalyticsWorkspaceResourceGroup` (`pulumi.Input[str]`) - The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
* `masterDnsPrefix` (`pulumi.Input[str]`) - DNS prefix to use the Kubernetes cluster control pane (string)
* `maxPods` (`pulumi.Input[float]`) - Maximum number of pods that can run on a node. Default `110` (int)
* `networkPlugin` (`pulumi.Input[str]`) - Network plugin used for building Kubernetes network. Chooses from `azure` or `kubenet`. Default `azure` (string)
* `networkPolicy` (`pulumi.Input[str]`) - Network policy used for building Kubernetes network. Chooses from `calico` (string)
* `podCidr` (`pulumi.Input[str]`) - A CIDR notation IP range from which to assign Kubernetes Pod IPs when \"network plugin\" is specified in \"kubenet\". Default `192.168.127.12/16` (string)
* `resourceGroup` (`pulumi.Input[str]`) - The name of the Cluster resource group (string)
* `serviceCidr` (`pulumi.Input[str]`) - A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default `10.0.0.0/16` (string)
* `sshPublicKeyContents` (`pulumi.Input[str]`) - Contents of the SSH public key used to authenticate with Linux hosts (string)
* `subnet` (`pulumi.Input[str]`) - The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
* `subscriptionId` (`pulumi.Input[str]`) - Subscription credentials which uniquely identify Microsoft Azure subscription (string)
* `tag` (`pulumi.Input[dict]`) - Tags for Kubernetes cluster. For example, foo=bar (map)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `virtualNetwork` (`pulumi.Input[str]`) - The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
* `virtualNetworkResourceGroup` (`pulumi.Input[str]`) - The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
The **cluster_auth_endpoint** object supports the following:
* `ca_certs` (`pulumi.Input[str]`) - CA certs for the authorized cluster endpoint (string)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `fqdn` (`pulumi.Input[str]`) - FQDN for the authorized cluster endpoint (string)
The **cluster_monitoring_input** object supports the following:
* `answers` (`pulumi.Input[dict]`) - Key/value answers for monitor input (map)
=======
The **cluster_template_answers** object supports the following:
* `cluster_id` (`pulumi.Input[str]`) - Cluster ID (string)
* `project_id` (`pulumi.Input[str]`) - Project ID to apply answer (string)
* `values` (`pulumi.Input[dict]`) - Key/values for answer (map)
The **cluster_template_questions** object supports the following:
* `default` (`pulumi.Input[str]`) - Default variable value (string)
* `required` (`pulumi.Input[bool]`) - Required variable. Default `false` (bool)
* `type` (`pulumi.Input[str]`) - Variable type. `boolean`, `int` and `string` are allowed. Default `string` (string)
* `variable` (`pulumi.Input[str]`) - Variable name (string)
>>>>>>> c6a2cbc... Feat: added .ClusterTemplate datasource and resource. For rancher V2.3.x. Doc files
The **eks_config** object supports the following:
* `access_key` (`pulumi.Input[str]`) - The AWS Client ID to use (string)
* `ami` (`pulumi.Input[str]`) - AMI ID to use for the worker nodes instead of the default (string)
* `associateWorkerNodePublicIp` (`pulumi.Input[bool]`) - Associate public ip EKS worker nodes. Default `true` (bool)
* `desiredNodes` (`pulumi.Input[float]`) - The desired number of worker nodes. Just for Rancher v2.3.x and above. Default `3` (int)
* `instanceType` (`pulumi.Input[str]`) - The type of machine to use for worker nodes. Default `t2.medium` (string)
* `keyPairName` (`pulumi.Input[str]`) - Allow user to specify key name to use. Just for Rancher v2.2.7 and above (string)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `maximumNodes` (`pulumi.Input[float]`) - The maximum number of worker nodes. Default `3` (int)
* `minimumNodes` (`pulumi.Input[float]`) - The minimum number of worker nodes. Default `1` (int)
* `nodeVolumeSize` (`pulumi.Input[float]`) - The volume size for each node. Default `20` (int)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`pulumi.Input[str]`) - The AWS Client Secret associated with the Client ID (string)
* `securityGroups` (`pulumi.Input[list]`) - List of security groups to use for the cluster. If it's not specified Rancher will create a new security group (list)
* `serviceRole` (`pulumi.Input[str]`) - The service role to use to perform the cluster operations in AWS. If it's not specified Rancher will create a new service role (string)
* `sessionToken` (`pulumi.Input[str]`) - A session token to use with the client key and secret if applicable (string)
* `subnets` (`pulumi.Input[list]`) - List of subnets in the virtual network to use. If it's not specified Rancher will create 3 news subnets (list)
* `userData` (`pulumi.Input[str]`) - Pass user-data to the nodes to perform automated configuration tasks (string)
* `virtualNetwork` (`pulumi.Input[str]`) - The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
The **gke_config** object supports the following:
* `clusterIpv4Cidr` (`pulumi.Input[str]`) - The IP address range of the container pods (string)
* `credential` (`pulumi.Input[str]`) - The contents of the GC credential file (string)
* `description` (`pulumi.Input[str]`) - An optional description of this cluster (string)
* `diskSizeGb` (`pulumi.Input[float]`) - Size of the disk attached to each node. Default `100` (int)
* `diskType` (`pulumi.Input[str]`) - Type of the disk attached to each node (string)
* `enableAlphaFeature` (`pulumi.Input[bool]`) - To enable Kubernetes alpha feature. Default `true` (bool)
* `enableAutoRepair` (`pulumi.Input[bool]`) - Specifies whether the node auto-repair is enabled for the node pool. Default `false` (bool)
* `enableAutoUpgrade` (`pulumi.Input[bool]`) - Specifies whether node auto-upgrade is enabled for the node pool. Default `false` (bool)
* `enableHorizontalPodAutoscaling` (`pulumi.Input[bool]`) - Enable horizontal pod autoscaling for the cluster. Default `true` (bool)
* `enableHttpLoadBalancing` (`pulumi.Input[bool]`) - Enable HTTP load balancing on GKE cluster. Default `true` (bool)
* `enableKubernetesDashboard` (`pulumi.Input[bool]`) - Whether to enable the Kubernetes dashboard. Default `false` (bool)
* `enableLegacyAbac` (`pulumi.Input[bool]`) - Whether to enable legacy abac on the cluster. Default `false` (bool)
* `enableMasterAuthorizedNetwork` (`pulumi.Input[bool]`)
* `enableNetworkPolicyConfig` (`pulumi.Input[bool]`) - Enable stackdriver logging. Default `true` (bool)
* `enableNodepoolAutoscaling` (`pulumi.Input[bool]`) - Enable nodepool autoscaling. Default `false` (bool)
* `enablePrivateEndpoint` (`pulumi.Input[bool]`) - Whether the master's internal IP address is used as the cluster endpoint. Default `false` (bool)
* `enablePrivateNodes` (`pulumi.Input[bool]`) - Whether nodes have internal IP address only. Default `false` (bool)
* `enableStackdriverLogging` (`pulumi.Input[bool]`) - Enable stackdriver monitoring. Default `true` (bool)
* `enableStackdriverMonitoring` (`pulumi.Input[bool]`) - Enable stackdriver monitoring on GKE cluster (bool)
* `imageType` (`pulumi.Input[str]`) - The image to use for the worker nodes (string)
* `ipPolicyClusterIpv4CidrBlock` (`pulumi.Input[str]`) - The IP address range for the cluster pod IPs (string)
* `ipPolicyClusterSecondaryRangeName` (`pulumi.Input[str]`) - The name of the secondary range to be used for the cluster CIDR block (string)
* `ipPolicyCreateSubnetwork` (`pulumi.Input[bool]`) - Whether a new subnetwork will be created automatically for the cluster. Default `false` (bool)
* `ipPolicyNodeIpv4CidrBlock` (`pulumi.Input[str]`) - The IP address range of the instance IPs in this cluster (string)
* `ipPolicyServicesIpv4CidrBlock` (`pulumi.Input[str]`) - The IP address range of the services IPs in this cluster (string)
* `ipPolicyServicesSecondaryRangeName` (`pulumi.Input[str]`) - The name of the secondary range to be used for the services CIDR block (string)
* `ipPolicySubnetworkName` (`pulumi.Input[str]`) - A custom subnetwork name to be used if createSubnetwork is true (string)
* `issueClientCertificate` (`pulumi.Input[bool]`) - Issue a client certificate. Default `false` (bool)
* `kubernetesDashboard` (`pulumi.Input[bool]`) - Enable the Kubernetes dashboard. Default `false` (bool)
* `labels` (`pulumi.Input[dict]`) - Labels for cluster registration token object (map)
* `localSsdCount` (`pulumi.Input[float]`) - The number of local SSD disks to be attached to the node. Default `0` (int)
* `locations` (`pulumi.Input[list]`) - Locations for GKE cluster (list)
* `machineType` (`pulumi.Input[str]`) - Machine type for GKE cluster (string)
* `maintenanceWindow` (`pulumi.Input[str]`) - Maintenance window for GKE cluster (string)
* `masterAuthorizedNetworkCidrBlocks` (`pulumi.Input[list]`) - Define up to 10 external networks that could access Kubernetes master through HTTPS (list)
* `masterIpv4CidrBlock` (`pulumi.Input[str]`) - The IP range in CIDR notation to use for the hosted master network (string)
* `masterVersion` (`pulumi.Input[str]`) - Master version for GKE cluster (string)
* `maxNodeCount` (`pulumi.Input[float]`) - Maximum number of nodes in the NodePool. Must be >= minNodeCount. There has to enough quota to scale up the cluster. Default `0` (int)
* `minNodeCount` (`pulumi.Input[float]`) - Minimmum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. Default `0` (int)
* `network` (`pulumi.Input[str]`) - Network for GKE cluster (string)
* `nodeCount` (`pulumi.Input[float]`) - Node count for GKE cluster. Default `3` (int)
* `nodePool` (`pulumi.Input[str]`) - The ID of the cluster node pool (string)
* `nodeVersion` (`pulumi.Input[str]`) - Node version for GKE cluster (string)
* `oauthScopes` (`pulumi.Input[list]`) - The set of Google API scopes to be made available on all of the node VMs under the default service account (list)
* `preemptible` (`pulumi.Input[bool]`) - Whether the nodes are created as preemptible VM instances. Default `false` (bool)
* `project_id` (`pulumi.Input[str]`) - Project ID to apply answer (string)
* `resourceLabels` (`pulumi.Input[dict]`) - The map of Kubernetes labels to be applied to each cluster (map)
* `serviceAccount` (`pulumi.Input[str]`) - The Google Cloud Platform Service Account to be used by the node VMs (string)
* `subNetwork` (`pulumi.Input[str]`) - Subnetwork for GKE cluster (string)
* `taints` (`pulumi.Input[list]`) - List of Kubernetes taints to be applied to each node (list)
* `useIpAliases` (`pulumi.Input[bool]`) - Whether alias IPs will be used for pod IPs in the cluster. Default `false` (bool)
* `zone` (`pulumi.Input[str]`) - Zone GKE cluster (string)
The **rke_config** object supports the following:
* `addonJobTimeout` (`pulumi.Input[float]`) - Duration in seconds of addon job (int)
* `addons` (`pulumi.Input[str]`) - Addons descripton to deploy on RKE cluster.
* `addonsIncludes` (`pulumi.Input[list]`) - Addons yaml manifests to deploy on RKE cluster (list)
* `authentication` (`pulumi.Input[dict]`) - Kubernetes cluster authentication (list maxitems:1)
* `sans` (`pulumi.Input[list]`) - RKE sans for authentication ([]string)
* `strategy` (`pulumi.Input[str]`) - RKE strategy for authentication (string)
* `authorization` (`pulumi.Input[dict]`) - Kubernetes cluster authorization (list maxitems:1)
* `mode` (`pulumi.Input[str]`) - RKE mode for authorization. `rbac` and `none` modes are available. Default `rbac` (string)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `bastionHost` (`pulumi.Input[dict]`) - RKE bastion host (list maxitems:1)
* `address` (`pulumi.Input[str]`) - Address ip for node (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`pulumi.Input[str]`) - Node SSH private key (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `cloudProvider` (`pulumi.Input[dict]`) - RKE options for Calico network provider (string)
* `awsCloudProvider` (`pulumi.Input[dict]`) - RKE AWS Cloud Provider config for Cloud Provider [rke-aws-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/aws/) (list maxitems:1)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `disableSecurityGroupIngress` (`pulumi.Input[bool]`) - Default `false` (bool)
* `disableStrictZoneCheck` (`pulumi.Input[bool]`) - Default `false` (bool)
* `elbSecurityGroup` (`pulumi.Input[str]`) - (string)
* `kubernetesClusterId` (`pulumi.Input[str]`) - (string)
* `kubernetesClusterTag` (`pulumi.Input[str]`) - (string)
* `roleArn` (`pulumi.Input[str]`) - (string)
* `routeTableId` (`pulumi.Input[str]`) - (string)
* `subnetId` (`pulumi.Input[str]`) - (string)
* `vpc` (`pulumi.Input[str]`) - (string)
* `zone` (`pulumi.Input[str]`) - Zone GKE cluster (string)
* `serviceOverrides` (`pulumi.Input[list]`) - (list)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `service` (`pulumi.Input[str]`) - (string)
* `signingMethod` (`pulumi.Input[str]`) - (string)
* `signingName` (`pulumi.Input[str]`) - (string)
* `signingRegion` (`pulumi.Input[str]`) - (string)
* `url` (`pulumi.Input[str]`) - Registry URL (string)
* `azureCloudProvider` (`pulumi.Input[dict]`) - RKE Azure Cloud Provider config for Cloud Provider [rke-azure-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/azure/) (list maxitems:1)
* `aadClientCertPassword` (`pulumi.Input[str]`) - (string)
* `aadClientCertPath` (`pulumi.Input[str]`) - (string)
* `aadClientId` (`pulumi.Input[str]`) - (string)
* `aadClientSecret` (`pulumi.Input[str]`) - (string)
* `cloud` (`pulumi.Input[str]`) - (string)
* `cloudProviderBackoff` (`pulumi.Input[bool]`) - (bool)
* `cloudProviderBackoffDuration` (`pulumi.Input[float]`) - (int)
* `cloudProviderBackoffExponent` (`pulumi.Input[float]`) - (int)
* `cloudProviderBackoffJitter` (`pulumi.Input[float]`) - (int)
* `cloudProviderBackoffRetries` (`pulumi.Input[float]`) - (int)
* `cloudProviderRateLimit` (`pulumi.Input[bool]`) - (bool)
* `cloudProviderRateLimitBucket` (`pulumi.Input[float]`) - (int)
* `cloudProviderRateLimitQps` (`pulumi.Input[float]`) - (int)
* `location` (`pulumi.Input[str]`) - Azure Kubernetes cluster location. Default `eastus` (string)
* `maximumLoadBalancerRuleCount` (`pulumi.Input[float]`) - (int)
* `primaryAvailabilitySetName` (`pulumi.Input[str]`) - (string)
* `primaryScaleSetName` (`pulumi.Input[str]`) - (string)
* `resourceGroup` (`pulumi.Input[str]`) - The name of the Cluster resource group (string)
* `routeTableName` (`pulumi.Input[str]`) - (string)
* `securityGroupName` (`pulumi.Input[str]`) - (string)
* `subnetName` (`pulumi.Input[str]`) - (string)
* `subscriptionId` (`pulumi.Input[str]`) - Subscription credentials which uniquely identify Microsoft Azure subscription (string)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `useInstanceMetadata` (`pulumi.Input[bool]`) - (bool)
* `useManagedIdentityExtension` (`pulumi.Input[bool]`) - (bool)
* `vmType` (`pulumi.Input[str]`) - (string)
* `vnetName` (`pulumi.Input[str]`) - (string)
* `vnetResourceGroup` (`pulumi.Input[str]`) - (string)
* `customCloudProvider` (`pulumi.Input[str]`) - RKE Custom Cloud Provider config for Cloud Provider (string) (string)
* `name` (`pulumi.Input[str]`) - Name of cluster registration token (string)
* `openstackCloudProvider` (`pulumi.Input[dict]`) - RKE Openstack Cloud Provider config for Cloud Provider [rke-openstack-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) (list maxitems:1)
* `blockStorage` (`pulumi.Input[dict]`) - (list maxitems:1)
* `bsVersion` (`pulumi.Input[str]`) - (string)
* `ignoreVolumeAz` (`pulumi.Input[bool]`) - (string)
* `trustDevicePath` (`pulumi.Input[bool]`) - (string)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `authUrl` (`pulumi.Input[str]`) - (string)
* `caFile` (`pulumi.Input[str]`) - (string)
* `domainId` (`pulumi.Input[str]`) - Required if `domain_name` not provided. (string)
* `domainName` (`pulumi.Input[str]`) - Required if `domain_id` not provided. (string)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `tenantName` (`pulumi.Input[str]`) - Required if `tenant_id` not provided. (string)
* `trustId` (`pulumi.Input[str]`) - (string)
* `username` (`pulumi.Input[str]`) - (string)
* `loadBalancer` (`pulumi.Input[dict]`) - (list maxitems:1)
* `createMonitor` (`pulumi.Input[bool]`) - (bool)
* `floatingNetworkId` (`pulumi.Input[str]`) - (string)
* `lbMethod` (`pulumi.Input[str]`) - (string)
* `lbProvider` (`pulumi.Input[str]`) - (string)
* `lbVersion` (`pulumi.Input[str]`) - (string)
* `manageSecurityGroups` (`pulumi.Input[bool]`) - (bool)
* `monitorDelay` (`pulumi.Input[str]`) - Default `60s` (string)
* `monitorMaxRetries` (`pulumi.Input[float]`) - Default 5 (int)
* `monitorTimeout` (`pulumi.Input[str]`) - Default `30s` (string)
* `subnetId` (`pulumi.Input[str]`) - (string)
* `useOctavia` (`pulumi.Input[bool]`) - (bool)
* `metadata` (`pulumi.Input[dict]`) - (list maxitems:1)
* `requestTimeout` (`pulumi.Input[float]`) - (int)
* `searchOrder` (`pulumi.Input[str]`) - (string)
* `route` (`pulumi.Input[dict]`) - (list maxitems:1)
* `routerId` (`pulumi.Input[str]`) - (string)
* `vsphereCloudProvider` (`pulumi.Input[dict]`) - RKE Vsphere Cloud Provider config for Cloud Provider [rke-vsphere-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/) Extra argument `name` is required on `virtual_center` configuration. (list maxitems:1)
* `disk` (`pulumi.Input[dict]`) - (list maxitems:1)
* `scsiControllerType` (`pulumi.Input[str]`) - (string)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `datacenters` (`pulumi.Input[str]`) - (string)
* `insecureFlag` (`pulumi.Input[bool]`) - (bool)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`pulumi.Input[float]`) - (int)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `network` (`pulumi.Input[dict]`) - Network for GKE cluster (string)
* `publicNetwork` (`pulumi.Input[str]`) - (string)
* `virtualCenters` (`pulumi.Input[list]`) - (List)
* `datacenters` (`pulumi.Input[str]`) - (string)
* `name` (`pulumi.Input[str]`) - Name of cluster registration token (string)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`pulumi.Input[float]`) - (int)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `workspace` (`pulumi.Input[dict]`) - (list maxitems:1)
* `datacenter` (`pulumi.Input[str]`) - (string)
* `defaultDatastore` (`pulumi.Input[str]`) - (string)
* `folder` (`pulumi.Input[str]`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `resourcepoolPath` (`pulumi.Input[str]`) - (string)
* `server` (`pulumi.Input[str]`) - (string)
* `dns` (`pulumi.Input[dict]`) - RKE dns add-on. Just for Rancher v2.2.x (list maxitems:1)
* `nodeSelector` (`pulumi.Input[dict]`) - Node selector for RKE Ingress (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `reverseCidrs` (`pulumi.Input[list]`) - DNS add-on reverse cidr (list)
* `upstreamNameservers` (`pulumi.Input[list]`) - DNS add-on upstream nameservers (list)
* `ignoreDockerVersion` (`pulumi.Input[bool]`) - Ignore docker version. Default `true` (bool)
* `ingress` (`pulumi.Input[dict]`) - Kubernetes ingress configuration (list maxitems:1)
* `dnsPolicy` (`pulumi.Input[str]`) - Ingress controller DNS policy. `ClusterFirstWithHostNet`, `ClusterFirst`, `Default`, and `None` are supported. [K8S dns Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `nodeSelector` (`pulumi.Input[dict]`) - Node selector for RKE Ingress (map)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `monitoring` (`pulumi.Input[dict]`) - Kubernetes cluster monitoring (list maxitems:1)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `network` (`pulumi.Input[dict]`) - Network for GKE cluster (string)
* `calicoNetworkProvider` (`pulumi.Input[dict]`) - Calico provider config for RKE network (list maxitems:1)
* `cloudProvider` (`pulumi.Input[str]`) - RKE options for Calico network provider (string)
* `canalNetworkProvider` (`pulumi.Input[dict]`) - Canal provider config for RKE network (list maxitems:1)
* `iface` (`pulumi.Input[str]`) - Iface config Flannel network provider (string)
* `flannelNetworkProvider` (`pulumi.Input[dict]`) - Flannel provider config for RKE network (list maxitems:1)
* `iface` (`pulumi.Input[str]`) - Iface config Flannel network provider (string)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `plugin` (`pulumi.Input[str]`) - Plugin for RKE network. `canal` (default), `flannel`, `calico` and `weave` are supported. (string)
* `weaveNetworkProvider` (`pulumi.Input[dict]`) - Weave provider config for RKE network (list maxitems:1)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `nodes` (`pulumi.Input[list]`) - RKE cluster nodes (list)
* `address` (`pulumi.Input[str]`) - Address ip for node (string)
* `dockerSocket` (`pulumi.Input[str]`) - Docker socket for node (string)
* `hostnameOverride` (`pulumi.Input[str]`) - Hostname override for node (string)
* `internalAddress` (`pulumi.Input[str]`) - Internal ip for node (string)
* `labels` (`pulumi.Input[dict]`) - Labels for cluster registration token object (map)
* `nodeId` (`pulumi.Input[str]`) - Id for the node (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `roles` (`pulumi.Input[list]`) - Roles for the node. `controlplane`, `etcd` and `worker` are supported. (list)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`pulumi.Input[str]`) - Node SSH private key (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `prefixPath` (`pulumi.Input[str]`) - Prefix to customize Kubernetes path (string)
* `privateRegistries` (`pulumi.Input[list]`) - private registries for docker images (list)
* `isDefault` (`pulumi.Input[bool]`) - Set as default registry. Default `false` (bool)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `url` (`pulumi.Input[str]`) - Registry URL (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `services` (`pulumi.Input[dict]`) - Kubernetes cluster services (list maxitems:1)
* `etcd` (`pulumi.Input[dict]`) - Etcd options for RKE services (list maxitems:1)
* `backup_config` (`pulumi.Input[dict]`) - Backup options for etcd service. Just for Rancher v2.2.x (list maxitems:1)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `intervalHours` (`pulumi.Input[float]`) - Interval hours for etcd backup. Default `12` (int)
* `retention` (`pulumi.Input[float]`) - Retention for etcd backup. Default `6` (int)
* `s3BackupConfig` (`pulumi.Input[dict]`) - S3 config options for etcd backup (list maxitems:1)
* `access_key` (`pulumi.Input[str]`) - The AWS Client ID to use (string)
* `bucketName` (`pulumi.Input[str]`) - Bucket name for S3 service (string)
* `customCa` (`pulumi.Input[str]`) - Base64 encoded custom CA for S3 service. Use filebase64(<FILE>) for encoding file. Available from Rancher v2.2.5 (string)
* `endpoint` (`pulumi.Input[str]`) - Endpoint for S3 service (string)
* `folder` (`pulumi.Input[str]`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`pulumi.Input[str]`) - The AWS Client Secret associated with the Client ID (string)
* `safeTimestamp` (`pulumi.Input[bool]`) - Safe timestamp for etcd backup. Default: `false` (bool)
* `caCert` (`pulumi.Input[str]`) - TLS CA certificate for etcd service (string)
* `cert` (`pulumi.Input[str]`) - TLS certificate for etcd service (string)
* `creation` (`pulumi.Input[str]`) - Creation option for etcd service (string)
* `externalUrls` (`pulumi.Input[list]`) - External urls for etcd service (list)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `gid` (`pulumi.Input[float]`) - Etcd service GID. Default: `0`. For Rancher v2.3.x or above (int)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `key` (`pulumi.Input[str]`) - TLS key for etcd service (string)
* `path` (`pulumi.Input[str]`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `retention` (`pulumi.Input[str]`) - Retention for etcd backup. Default `6` (int)
* `snapshot` (`pulumi.Input[bool]`) - Snapshot option for etcd service (bool)
* `uid` (`pulumi.Input[float]`) - Etcd service UID. Default: `0`. For Rancher v2.3.x or above (int)
* `kubeApi` (`pulumi.Input[dict]`) - Kube API options for RKE services (list maxitems:1)
* `admissionConfiguration` (`pulumi.Input[dict]`) - Admission configuration (map)
* `alwaysPullImages` (`pulumi.Input[bool]`) - Enable [AlwaysPullImages](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) Admission controller plugin. [Rancher docs](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options) Default: `false` (bool)
* `auditLog` (`pulumi.Input[dict]`) - K8s audit log configuration. (list maxitem: 1)
* `configuration` (`pulumi.Input[dict]`) - Event rate limit configuration. (map)
* `format` (`pulumi.Input[str]`) - Audit log format. Default: 'json' (string)
* `maxAge` (`pulumi.Input[float]`) - Audit log max age. Default: `30` (int)
* `maxBackup` (`pulumi.Input[float]`) - Audit log max backup. Default: `10` (int)
* `maxSize` (`pulumi.Input[float]`) - Audit log max size. Default: `100` (int)
* `path` (`pulumi.Input[str]`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `policy` (`pulumi.Input[str]`) - Audit log policy json formated string. `omitStages` and `rules` json fields are supported. Example: `policy = jsonencode({"rules":[{"level": "Metadata"}]})` (string)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `eventRateLimit` (`pulumi.Input[dict]`) - K8s event rate limit configuration. (list maxitem: 1)
* `configuration` (`pulumi.Input[dict]`) - Event rate limit configuration. (map)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `podSecurityPolicy` (`pulumi.Input[bool]`) - Pod Security Policy option for kube API service. Default `false` (bool)
* `secretsEncryptionConfig` (`pulumi.Input[dict]`) - [Encrypt k8s secret data configration](https://rancher.com/docs/rke/latest/en/config-options/secrets-encryption/). (list maxitem: 1)
* `customConfig` (`pulumi.Input[dict]`) - Secrets encryption configuration. (map)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `serviceClusterIpRange` (`pulumi.Input[str]`) - Service Cluster ip Range option for kube controller service (string)
* `serviceNodePortRange` (`pulumi.Input[str]`) - Service Node Port Range option for kube API service (string)
* `kubeController` (`pulumi.Input[dict]`) - Kube Controller options for RKE services (list maxitems:1)
* `clusterCidr` (`pulumi.Input[str]`) - Cluster CIDR option for kube controller service (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `serviceClusterIpRange` (`pulumi.Input[str]`) - Service Cluster ip Range option for kube controller service (string)
* `kubelet` (`pulumi.Input[dict]`) - Kubelet options for RKE services (list maxitems:1)
* `clusterDnsServer` (`pulumi.Input[str]`) - Cluster DNS Server option for kubelet service (string)
* `clusterDomain` (`pulumi.Input[str]`) - Cluster Domain option for kubelet service (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `failSwapOn` (`pulumi.Input[bool]`) - Enable or disable failing when swap on is not supported (bool)
* `generate_serving_certificate` [Generate a certificate signed by the kube-ca](https://rancher.com/docs/rke/latest/en/config-options/services/#kubelet-serving-certificate-requirements). Default `false` (bool)
* `generateServingCertificate` (`pulumi.Input[bool]`)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `infraContainerImage` (`pulumi.Input[str]`) - Infra container image for kubelet service (string)
* `kubeproxy` (`pulumi.Input[dict]`) - Kubeproxy options for RKE services (list maxitems:1)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `scheduler` (`pulumi.Input[dict]`) - Scheduler options for RKE services (list maxitems:1)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshCertPath` (`pulumi.Input[str]`) - Cluster level SSH certificate path (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['aks_config'] = aks_config
__props__['annotations'] = annotations
__props__['cluster_auth_endpoint'] = cluster_auth_endpoint
__props__['cluster_monitoring_input'] = cluster_monitoring_input
__props__['cluster_template_answers'] = cluster_template_answers
__props__['cluster_template_id'] = cluster_template_id
__props__['cluster_template_questions'] = cluster_template_questions
__props__['cluster_template_revision_id'] = cluster_template_revision_id
__props__['default_pod_security_policy_template_id'] = default_pod_security_policy_template_id
__props__['description'] = description
__props__['desired_agent_image'] = desired_agent_image
__props__['desired_auth_image'] = desired_auth_image
__props__['docker_root_dir'] = docker_root_dir
__props__['driver'] = driver
__props__['eks_config'] = eks_config
__props__['enable_cluster_alerting'] = enable_cluster_alerting
__props__['enable_cluster_istio'] = enable_cluster_istio
__props__['enable_cluster_monitoring'] = enable_cluster_monitoring
__props__['enable_network_policy'] = enable_network_policy
__props__['gke_config'] = gke_config
__props__['labels'] = labels
__props__['name'] = name
__props__['rke_config'] = rke_config
__props__['windows_prefered_cluster'] = windows_prefered_cluster
__props__['cluster_registration_token'] = None
__props__['default_project_id'] = None
__props__['kube_config'] = None
__props__['system_project_id'] = None
super(Cluster, __self__).__init__(
'rancher2:index/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, aks_config=None, annotations=None, cluster_auth_endpoint=None, cluster_monitoring_input=None, cluster_registration_token=None, cluster_template_answers=None, cluster_template_id=None, cluster_template_questions=None, cluster_template_revision_id=None, default_pod_security_policy_template_id=None, default_project_id=None, description=None, desired_agent_image=None, desired_auth_image=None, docker_root_dir=None, driver=None, eks_config=None, enable_cluster_alerting=None, enable_cluster_istio=None, enable_cluster_monitoring=None, enable_network_policy=None, gke_config=None, kube_config=None, labels=None, name=None, rke_config=None, system_project_id=None, windows_prefered_cluster=None):
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] aks_config: The Azure AKS configuration for `aks` Clusters. Conflicts with `eks_config`, `gke_config` and `rke_config` (list maxitems:1)
:param pulumi.Input[dict] annotations: Annotations for cluster registration token object (map)
:param pulumi.Input[dict] cluster_auth_endpoint: Enabling the [local cluster authorized endpoint](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#local-cluster-auth-endpoint) allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
:param pulumi.Input[dict] cluster_monitoring_input: Cluster monitoring config. Any parameter defined in [rancher-monitoring charts](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) could be configured (list maxitems:1)
:param pulumi.Input[dict] cluster_registration_token: (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
:param pulumi.Input[dict] cluster_template_answers: Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
:param pulumi.Input[str] cluster_template_id: Cluster template ID. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[list] cluster_template_questions: Cluster template questions. Just for Rancher v2.3.x and above (list)
:param pulumi.Input[str] cluster_template_revision_id: Cluster template revision ID. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] default_pod_security_policy_template_id: [Default pod security policy template id](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#pod-security-policy-support) (string)
:param pulumi.Input[str] default_project_id: (Computed) Default project ID for the cluster (string)
:param pulumi.Input[str] description: An optional description of this cluster (string)
:param pulumi.Input[str] desired_agent_image: Desired agent image. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] desired_auth_image: Desired auth image. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] docker_root_dir: Desired auth image. Just for Rancher v2.3.x and above (string)
:param pulumi.Input[str] driver: (Computed) The driver used for the Cluster. `imported`, `azurekubernetesservice`, `amazonelasticcontainerservice`, `googlekubernetesengine` and `rancherKubernetesEngine` are supported (string)
:param pulumi.Input[dict] eks_config: The Amazon EKS configuration for `eks` Clusters. Conflicts with `aks_config`, `gke_config` and `rke_config` (list maxitems:1)
:param pulumi.Input[bool] enable_cluster_alerting: Enable built-in cluster alerting. Default `false` (bool)
:param pulumi.Input[bool] enable_cluster_istio: Enable built-in cluster istio. Default `false`. Just for Rancher v2.3.x and above (bool)
:param pulumi.Input[bool] enable_cluster_monitoring: Enable built-in cluster monitoring. Default `false` (bool)
:param pulumi.Input[bool] enable_network_policy: Enable project network isolation. Default `false` (bool)
:param pulumi.Input[dict] gke_config: The Google GKE configuration for `gke` Clusters. Conflicts with `aks_config`, `eks_config` and `rke_config` (list maxitems:1)
:param pulumi.Input[str] kube_config: (Computed) Kube Config generated for the cluster (string)
:param pulumi.Input[dict] labels: Labels for cluster registration token object (map)
:param pulumi.Input[str] name: Name of cluster registration token (string)
:param pulumi.Input[dict] rke_config: The RKE configuration for `rke` Clusters. Conflicts with `aks_config`, `eks_config` and `gke_config` (list maxitems:1)
:param pulumi.Input[str] system_project_id: (Computed) System project ID for the cluster (string)
:param pulumi.Input[bool] windows_prefered_cluster: Windows preferred cluster. Default: `false` (bool)
The **aks_config** object supports the following:
* `aadServerAppSecret` (`pulumi.Input[str]`) - The secret of an Azure Active Directory server application (string)
* `aadTenantId` (`pulumi.Input[str]`) - The ID of an Azure Active Directory tenant (string)
* `addClientAppId` (`pulumi.Input[str]`) - The ID of an Azure Active Directory client application of type \"Native\". This application is for user login via kubectl (string)
* `addServerAppId` (`pulumi.Input[str]`) - The ID of an Azure Active Directory server application of type \"Web app/API\". This application represents the managed cluster's apiserver (Server application) (string)
* `adminUsername` (`pulumi.Input[str]`) - The administrator username to use for Linux hosts. Default `azureuser` (string)
* `agentDnsPrefix` (`pulumi.Input[str]`) - DNS prefix to be used to create the FQDN for the agent pool (string)
* `agentOsDiskSize` (`pulumi.Input[float]`) - GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the \"agent vm size\" specified. Default `0` (int)
* `agentPoolName` (`pulumi.Input[str]`) - Name for the agent pool, upto 12 alphanumeric characters. Default `agentpool0` (string)
* `agentStorageProfile` (`pulumi.Input[str]`) - Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default `ManagedDisks` (string)
* `agentVmSize` (`pulumi.Input[str]`) - Size of machine in the agent pool. Default `Standard_D1_v2` (string)
* `authBaseUrl` (`pulumi.Input[str]`) - Different authentication API url to use. Default `https://login.microsoftonline.com/` (string)
* `baseUrl` (`pulumi.Input[str]`) - Different resource management API url to use. Default `https://management.azure.com/` (string)
* `client_id` (`pulumi.Input[str]`) - Azure client ID to use (string)
* `client_secret` (`pulumi.Input[str]`) - Azure client secret associated with the \"client id\" (string)
* `count` (`pulumi.Input[float]`) - Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default `1` (int)
* `dnsServiceIp` (`pulumi.Input[str]`) - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in \"service cidr\". Default `10.0.0.10` (string)
* `dockerBridgeCidr` (`pulumi.Input[str]`) - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in \"service cidr\". Default `172.17.0.1/16` (string)
* `enableHttpApplicationRouting` (`pulumi.Input[bool]`) - Enable the Kubernetes ingress with automatic public DNS name creation. Default `false` (bool)
* `enableMonitoring` (`pulumi.Input[bool]`) - Turn on Azure Log Analytics monitoring. Uses the Log Analytics \"Default\" workspace if it exists, else creates one. if using an existing workspace, specifies \"log analytics workspace resource id\". Default `true` (bool)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `location` (`pulumi.Input[str]`) - Azure Kubernetes cluster location. Default `eastus` (string)
* `logAnalyticsWorkspace` (`pulumi.Input[str]`) - The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
* `logAnalyticsWorkspaceResourceGroup` (`pulumi.Input[str]`) - The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
* `masterDnsPrefix` (`pulumi.Input[str]`) - DNS prefix to use the Kubernetes cluster control pane (string)
* `maxPods` (`pulumi.Input[float]`) - Maximum number of pods that can run on a node. Default `110` (int)
* `networkPlugin` (`pulumi.Input[str]`) - Network plugin used for building Kubernetes network. Chooses from `azure` or `kubenet`. Default `azure` (string)
* `networkPolicy` (`pulumi.Input[str]`) - Network policy used for building Kubernetes network. Chooses from `calico` (string)
* `podCidr` (`pulumi.Input[str]`) - A CIDR notation IP range from which to assign Kubernetes Pod IPs when \"network plugin\" is specified in \"kubenet\". Default `192.168.127.12/16` (string)
* `resourceGroup` (`pulumi.Input[str]`) - The name of the Cluster resource group (string)
* `serviceCidr` (`pulumi.Input[str]`) - A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default `10.0.0.0/16` (string)
* `sshPublicKeyContents` (`pulumi.Input[str]`) - Contents of the SSH public key used to authenticate with Linux hosts (string)
* `subnet` (`pulumi.Input[str]`) - The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
* `subscriptionId` (`pulumi.Input[str]`) - Subscription credentials which uniquely identify Microsoft Azure subscription (string)
* `tag` (`pulumi.Input[dict]`) - Tags for Kubernetes cluster. For example, foo=bar (map)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `virtualNetwork` (`pulumi.Input[str]`) - The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
* `virtualNetworkResourceGroup` (`pulumi.Input[str]`) - The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
The **cluster_auth_endpoint** object supports the following:
* `ca_certs` (`pulumi.Input[str]`) - CA certs for the authorized cluster endpoint (string)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `fqdn` (`pulumi.Input[str]`) - FQDN for the authorized cluster endpoint (string)
The **cluster_monitoring_input** object supports the following:
* `answers` (`pulumi.Input[dict]`) - Key/value answers for monitor input (map)
=======
The **cluster_registration_token** object supports the following:
* `annotations` (`pulumi.Input[dict]`) - Annotations for cluster registration token object (map)
* `cluster_id` (`pulumi.Input[str]`) - Cluster ID (string)
* `command` (`pulumi.Input[str]`) - Command to execute in a imported k8s cluster (string)
* `id` (`pulumi.Input[str]`) - (Computed) The ID of the resource (string)
* `insecureCommand` (`pulumi.Input[str]`) - Insecure command to execute in a imported k8s cluster (string)
* `labels` (`pulumi.Input[dict]`) - Labels for cluster registration token object (map)
* `manifestUrl` (`pulumi.Input[str]`) - K8s manifest url to execute with `kubectl` to import an existing k8s cluster (string)
* `name` (`pulumi.Input[str]`) - Name of cluster registration token (string)
* `nodeCommand` (`pulumi.Input[str]`) - Node command to execute in linux nodes for custom k8s cluster (string)
* `token` (`pulumi.Input[str]`) - Token for cluster registration token object (string)
* `windowsNodeCommand` (`pulumi.Input[str]`) - Node command to execute in windows nodes for custom k8s cluster (string)
The **cluster_template_answers** object supports the following:
* `cluster_id` (`pulumi.Input[str]`) - Cluster ID (string)
* `project_id` (`pulumi.Input[str]`) - Project ID to apply answer (string)
* `values` (`pulumi.Input[dict]`) - Key/values for answer (map)
The **cluster_template_questions** object supports the following:
* `default` (`pulumi.Input[str]`) - Default variable value (string)
* `required` (`pulumi.Input[bool]`) - Required variable. Default `false` (bool)
* `type` (`pulumi.Input[str]`) - Variable type. `boolean`, `int` and `string` are allowed. Default `string` (string)
* `variable` (`pulumi.Input[str]`) - Variable name (string)
>>>>>>> c6a2cbc... Feat: added .ClusterTemplate datasource and resource. For rancher V2.3.x. Doc files
The **eks_config** object supports the following:
* `access_key` (`pulumi.Input[str]`) - The AWS Client ID to use (string)
* `ami` (`pulumi.Input[str]`) - AMI ID to use for the worker nodes instead of the default (string)
* `associateWorkerNodePublicIp` (`pulumi.Input[bool]`) - Associate public ip EKS worker nodes. Default `true` (bool)
* `desiredNodes` (`pulumi.Input[float]`) - The desired number of worker nodes. Just for Rancher v2.3.x and above. Default `3` (int)
* `instanceType` (`pulumi.Input[str]`) - The type of machine to use for worker nodes. Default `t2.medium` (string)
* `keyPairName` (`pulumi.Input[str]`) - Allow user to specify key name to use. Just for Rancher v2.2.7 and above (string)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `maximumNodes` (`pulumi.Input[float]`) - The maximum number of worker nodes. Default `3` (int)
* `minimumNodes` (`pulumi.Input[float]`) - The minimum number of worker nodes. Default `1` (int)
* `nodeVolumeSize` (`pulumi.Input[float]`) - The volume size for each node. Default `20` (int)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`pulumi.Input[str]`) - The AWS Client Secret associated with the Client ID (string)
* `securityGroups` (`pulumi.Input[list]`) - List of security groups to use for the cluster. If it's not specified Rancher will create a new security group (list)
* `serviceRole` (`pulumi.Input[str]`) - The service role to use to perform the cluster operations in AWS. If it's not specified Rancher will create a new service role (string)
* `sessionToken` (`pulumi.Input[str]`) - A session token to use with the client key and secret if applicable (string)
* `subnets` (`pulumi.Input[list]`) - List of subnets in the virtual network to use. If it's not specified Rancher will create 3 news subnets (list)
* `userData` (`pulumi.Input[str]`) - Pass user-data to the nodes to perform automated configuration tasks (string)
* `virtualNetwork` (`pulumi.Input[str]`) - The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
The **gke_config** object supports the following:
* `clusterIpv4Cidr` (`pulumi.Input[str]`) - The IP address range of the container pods (string)
* `credential` (`pulumi.Input[str]`) - The contents of the GC credential file (string)
* `description` (`pulumi.Input[str]`) - An optional description of this cluster (string)
* `diskSizeGb` (`pulumi.Input[float]`) - Size of the disk attached to each node. Default `100` (int)
* `diskType` (`pulumi.Input[str]`) - Type of the disk attached to each node (string)
* `enableAlphaFeature` (`pulumi.Input[bool]`) - To enable Kubernetes alpha feature. Default `true` (bool)
* `enableAutoRepair` (`pulumi.Input[bool]`) - Specifies whether the node auto-repair is enabled for the node pool. Default `false` (bool)
* `enableAutoUpgrade` (`pulumi.Input[bool]`) - Specifies whether node auto-upgrade is enabled for the node pool. Default `false` (bool)
* `enableHorizontalPodAutoscaling` (`pulumi.Input[bool]`) - Enable horizontal pod autoscaling for the cluster. Default `true` (bool)
* `enableHttpLoadBalancing` (`pulumi.Input[bool]`) - Enable HTTP load balancing on GKE cluster. Default `true` (bool)
* `enableKubernetesDashboard` (`pulumi.Input[bool]`) - Whether to enable the Kubernetes dashboard. Default `false` (bool)
* `enableLegacyAbac` (`pulumi.Input[bool]`) - Whether to enable legacy abac on the cluster. Default `false` (bool)
* `enableMasterAuthorizedNetwork` (`pulumi.Input[bool]`)
* `enableNetworkPolicyConfig` (`pulumi.Input[bool]`) - Enable stackdriver logging. Default `true` (bool)
* `enableNodepoolAutoscaling` (`pulumi.Input[bool]`) - Enable nodepool autoscaling. Default `false` (bool)
* `enablePrivateEndpoint` (`pulumi.Input[bool]`) - Whether the master's internal IP address is used as the cluster endpoint. Default `false` (bool)
* `enablePrivateNodes` (`pulumi.Input[bool]`) - Whether nodes have internal IP address only. Default `false` (bool)
* `enableStackdriverLogging` (`pulumi.Input[bool]`) - Enable stackdriver monitoring. Default `true` (bool)
* `enableStackdriverMonitoring` (`pulumi.Input[bool]`) - Enable stackdriver monitoring on GKE cluster (bool)
* `imageType` (`pulumi.Input[str]`) - The image to use for the worker nodes (string)
* `ipPolicyClusterIpv4CidrBlock` (`pulumi.Input[str]`) - The IP address range for the cluster pod IPs (string)
* `ipPolicyClusterSecondaryRangeName` (`pulumi.Input[str]`) - The name of the secondary range to be used for the cluster CIDR block (string)
* `ipPolicyCreateSubnetwork` (`pulumi.Input[bool]`) - Whether a new subnetwork will be created automatically for the cluster. Default `false` (bool)
* `ipPolicyNodeIpv4CidrBlock` (`pulumi.Input[str]`) - The IP address range of the instance IPs in this cluster (string)
* `ipPolicyServicesIpv4CidrBlock` (`pulumi.Input[str]`) - The IP address range of the services IPs in this cluster (string)
* `ipPolicyServicesSecondaryRangeName` (`pulumi.Input[str]`) - The name of the secondary range to be used for the services CIDR block (string)
* `ipPolicySubnetworkName` (`pulumi.Input[str]`) - A custom subnetwork name to be used if createSubnetwork is true (string)
* `issueClientCertificate` (`pulumi.Input[bool]`) - Issue a client certificate. Default `false` (bool)
* `kubernetesDashboard` (`pulumi.Input[bool]`) - Enable the Kubernetes dashboard. Default `false` (bool)
* `labels` (`pulumi.Input[dict]`) - Labels for cluster registration token object (map)
* `localSsdCount` (`pulumi.Input[float]`) - The number of local SSD disks to be attached to the node. Default `0` (int)
* `locations` (`pulumi.Input[list]`) - Locations for GKE cluster (list)
* `machineType` (`pulumi.Input[str]`) - Machine type for GKE cluster (string)
* `maintenanceWindow` (`pulumi.Input[str]`) - Maintenance window for GKE cluster (string)
* `masterAuthorizedNetworkCidrBlocks` (`pulumi.Input[list]`) - Define up to 10 external networks that could access Kubernetes master through HTTPS (list)
* `masterIpv4CidrBlock` (`pulumi.Input[str]`) - The IP range in CIDR notation to use for the hosted master network (string)
* `masterVersion` (`pulumi.Input[str]`) - Master version for GKE cluster (string)
* `maxNodeCount` (`pulumi.Input[float]`) - Maximum number of nodes in the NodePool. Must be >= minNodeCount. There has to enough quota to scale up the cluster. Default `0` (int)
* `minNodeCount` (`pulumi.Input[float]`) - Minimmum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. Default `0` (int)
* `network` (`pulumi.Input[str]`) - Network for GKE cluster (string)
* `nodeCount` (`pulumi.Input[float]`) - Node count for GKE cluster. Default `3` (int)
* `nodePool` (`pulumi.Input[str]`) - The ID of the cluster node pool (string)
* `nodeVersion` (`pulumi.Input[str]`) - Node version for GKE cluster (string)
* `oauthScopes` (`pulumi.Input[list]`) - The set of Google API scopes to be made available on all of the node VMs under the default service account (list)
* `preemptible` (`pulumi.Input[bool]`) - Whether the nodes are created as preemptible VM instances. Default `false` (bool)
* `project_id` (`pulumi.Input[str]`) - Project ID to apply answer (string)
* `resourceLabels` (`pulumi.Input[dict]`) - The map of Kubernetes labels to be applied to each cluster (map)
* `serviceAccount` (`pulumi.Input[str]`) - The Google Cloud Platform Service Account to be used by the node VMs (string)
* `subNetwork` (`pulumi.Input[str]`) - Subnetwork for GKE cluster (string)
* `taints` (`pulumi.Input[list]`) - List of Kubernetes taints to be applied to each node (list)
* `useIpAliases` (`pulumi.Input[bool]`) - Whether alias IPs will be used for pod IPs in the cluster. Default `false` (bool)
* `zone` (`pulumi.Input[str]`) - Zone GKE cluster (string)
The **rke_config** object supports the following:
* `addonJobTimeout` (`pulumi.Input[float]`) - Duration in seconds of addon job (int)
* `addons` (`pulumi.Input[str]`) - Addons descripton to deploy on RKE cluster.
* `addonsIncludes` (`pulumi.Input[list]`) - Addons yaml manifests to deploy on RKE cluster (list)
* `authentication` (`pulumi.Input[dict]`) - Kubernetes cluster authentication (list maxitems:1)
* `sans` (`pulumi.Input[list]`) - RKE sans for authentication ([]string)
* `strategy` (`pulumi.Input[str]`) - RKE strategy for authentication (string)
* `authorization` (`pulumi.Input[dict]`) - Kubernetes cluster authorization (list maxitems:1)
* `mode` (`pulumi.Input[str]`) - RKE mode for authorization. `rbac` and `none` modes are available. Default `rbac` (string)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `bastionHost` (`pulumi.Input[dict]`) - RKE bastion host (list maxitems:1)
* `address` (`pulumi.Input[str]`) - Address ip for node (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`pulumi.Input[str]`) - Node SSH private key (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `cloudProvider` (`pulumi.Input[dict]`) - RKE options for Calico network provider (string)
* `awsCloudProvider` (`pulumi.Input[dict]`) - RKE AWS Cloud Provider config for Cloud Provider [rke-aws-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/aws/) (list maxitems:1)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `disableSecurityGroupIngress` (`pulumi.Input[bool]`) - Default `false` (bool)
* `disableStrictZoneCheck` (`pulumi.Input[bool]`) - Default `false` (bool)
* `elbSecurityGroup` (`pulumi.Input[str]`) - (string)
* `kubernetesClusterId` (`pulumi.Input[str]`) - (string)
* `kubernetesClusterTag` (`pulumi.Input[str]`) - (string)
* `roleArn` (`pulumi.Input[str]`) - (string)
* `routeTableId` (`pulumi.Input[str]`) - (string)
* `subnetId` (`pulumi.Input[str]`) - (string)
* `vpc` (`pulumi.Input[str]`) - (string)
* `zone` (`pulumi.Input[str]`) - Zone GKE cluster (string)
* `serviceOverrides` (`pulumi.Input[list]`) - (list)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `service` (`pulumi.Input[str]`) - (string)
* `signingMethod` (`pulumi.Input[str]`) - (string)
* `signingName` (`pulumi.Input[str]`) - (string)
* `signingRegion` (`pulumi.Input[str]`) - (string)
* `url` (`pulumi.Input[str]`) - Registry URL (string)
* `azureCloudProvider` (`pulumi.Input[dict]`) - RKE Azure Cloud Provider config for Cloud Provider [rke-azure-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/azure/) (list maxitems:1)
* `aadClientCertPassword` (`pulumi.Input[str]`) - (string)
* `aadClientCertPath` (`pulumi.Input[str]`) - (string)
* `aadClientId` (`pulumi.Input[str]`) - (string)
* `aadClientSecret` (`pulumi.Input[str]`) - (string)
* `cloud` (`pulumi.Input[str]`) - (string)
* `cloudProviderBackoff` (`pulumi.Input[bool]`) - (bool)
* `cloudProviderBackoffDuration` (`pulumi.Input[float]`) - (int)
* `cloudProviderBackoffExponent` (`pulumi.Input[float]`) - (int)
* `cloudProviderBackoffJitter` (`pulumi.Input[float]`) - (int)
* `cloudProviderBackoffRetries` (`pulumi.Input[float]`) - (int)
* `cloudProviderRateLimit` (`pulumi.Input[bool]`) - (bool)
* `cloudProviderRateLimitBucket` (`pulumi.Input[float]`) - (int)
* `cloudProviderRateLimitQps` (`pulumi.Input[float]`) - (int)
* `location` (`pulumi.Input[str]`) - Azure Kubernetes cluster location. Default `eastus` (string)
* `maximumLoadBalancerRuleCount` (`pulumi.Input[float]`) - (int)
* `primaryAvailabilitySetName` (`pulumi.Input[str]`) - (string)
* `primaryScaleSetName` (`pulumi.Input[str]`) - (string)
* `resourceGroup` (`pulumi.Input[str]`) - The name of the Cluster resource group (string)
* `routeTableName` (`pulumi.Input[str]`) - (string)
* `securityGroupName` (`pulumi.Input[str]`) - (string)
* `subnetName` (`pulumi.Input[str]`) - (string)
* `subscriptionId` (`pulumi.Input[str]`) - Subscription credentials which uniquely identify Microsoft Azure subscription (string)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `useInstanceMetadata` (`pulumi.Input[bool]`) - (bool)
* `useManagedIdentityExtension` (`pulumi.Input[bool]`) - (bool)
* `vmType` (`pulumi.Input[str]`) - (string)
* `vnetName` (`pulumi.Input[str]`) - (string)
* `vnetResourceGroup` (`pulumi.Input[str]`) - (string)
* `customCloudProvider` (`pulumi.Input[str]`) - RKE Custom Cloud Provider config for Cloud Provider (string) (string)
* `name` (`pulumi.Input[str]`) - Name of cluster registration token (string)
* `openstackCloudProvider` (`pulumi.Input[dict]`) - RKE Openstack Cloud Provider config for Cloud Provider [rke-openstack-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) (list maxitems:1)
* `blockStorage` (`pulumi.Input[dict]`) - (list maxitems:1)
* `bsVersion` (`pulumi.Input[str]`) - (string)
* `ignoreVolumeAz` (`pulumi.Input[bool]`) - (string)
* `trustDevicePath` (`pulumi.Input[bool]`) - (string)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `authUrl` (`pulumi.Input[str]`) - (string)
* `caFile` (`pulumi.Input[str]`) - (string)
* `domainId` (`pulumi.Input[str]`) - Required if `domain_name` not provided. (string)
* `domainName` (`pulumi.Input[str]`) - Required if `domain_id` not provided. (string)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `tenantName` (`pulumi.Input[str]`) - Required if `tenant_id` not provided. (string)
* `trustId` (`pulumi.Input[str]`) - (string)
* `username` (`pulumi.Input[str]`) - (string)
* `loadBalancer` (`pulumi.Input[dict]`) - (list maxitems:1)
* `createMonitor` (`pulumi.Input[bool]`) - (bool)
* `floatingNetworkId` (`pulumi.Input[str]`) - (string)
* `lbMethod` (`pulumi.Input[str]`) - (string)
* `lbProvider` (`pulumi.Input[str]`) - (string)
* `lbVersion` (`pulumi.Input[str]`) - (string)
* `manageSecurityGroups` (`pulumi.Input[bool]`) - (bool)
* `monitorDelay` (`pulumi.Input[str]`) - Default `60s` (string)
* `monitorMaxRetries` (`pulumi.Input[float]`) - Default 5 (int)
* `monitorTimeout` (`pulumi.Input[str]`) - Default `30s` (string)
* `subnetId` (`pulumi.Input[str]`) - (string)
* `useOctavia` (`pulumi.Input[bool]`) - (bool)
* `metadata` (`pulumi.Input[dict]`) - (list maxitems:1)
* `requestTimeout` (`pulumi.Input[float]`) - (int)
* `searchOrder` (`pulumi.Input[str]`) - (string)
* `route` (`pulumi.Input[dict]`) - (list maxitems:1)
* `routerId` (`pulumi.Input[str]`) - (string)
* `vsphereCloudProvider` (`pulumi.Input[dict]`) - RKE Vsphere Cloud Provider config for Cloud Provider [rke-vsphere-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/) Extra argument `name` is required on `virtual_center` configuration. (list maxitems:1)
* `disk` (`pulumi.Input[dict]`) - (list maxitems:1)
* `scsiControllerType` (`pulumi.Input[str]`) - (string)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `datacenters` (`pulumi.Input[str]`) - (string)
* `insecureFlag` (`pulumi.Input[bool]`) - (bool)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`pulumi.Input[float]`) - (int)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `network` (`pulumi.Input[dict]`) - Network for GKE cluster (string)
* `publicNetwork` (`pulumi.Input[str]`) - (string)
* `virtualCenters` (`pulumi.Input[list]`) - (List)
* `datacenters` (`pulumi.Input[str]`) - (string)
* `name` (`pulumi.Input[str]`) - Name of cluster registration token (string)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`pulumi.Input[float]`) - (int)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `workspace` (`pulumi.Input[dict]`) - (list maxitems:1)
* `datacenter` (`pulumi.Input[str]`) - (string)
* `defaultDatastore` (`pulumi.Input[str]`) - (string)
* `folder` (`pulumi.Input[str]`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `resourcepoolPath` (`pulumi.Input[str]`) - (string)
* `server` (`pulumi.Input[str]`) - (string)
* `dns` (`pulumi.Input[dict]`) - RKE dns add-on. Just for Rancher v2.2.x (list maxitems:1)
* `nodeSelector` (`pulumi.Input[dict]`) - Node selector for RKE Ingress (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `reverseCidrs` (`pulumi.Input[list]`) - DNS add-on reverse cidr (list)
* `upstreamNameservers` (`pulumi.Input[list]`) - DNS add-on upstream nameservers (list)
* `ignoreDockerVersion` (`pulumi.Input[bool]`) - Ignore docker version. Default `true` (bool)
* `ingress` (`pulumi.Input[dict]`) - Kubernetes ingress configuration (list maxitems:1)
* `dnsPolicy` (`pulumi.Input[str]`) - Ingress controller DNS policy. `ClusterFirstWithHostNet`, `ClusterFirst`, `Default`, and `None` are supported. [K8S dns Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `nodeSelector` (`pulumi.Input[dict]`) - Node selector for RKE Ingress (map)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `monitoring` (`pulumi.Input[dict]`) - Kubernetes cluster monitoring (list maxitems:1)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `network` (`pulumi.Input[dict]`) - Network for GKE cluster (string)
* `calicoNetworkProvider` (`pulumi.Input[dict]`) - Calico provider config for RKE network (list maxitems:1)
* `cloudProvider` (`pulumi.Input[str]`) - RKE options for Calico network provider (string)
* `canalNetworkProvider` (`pulumi.Input[dict]`) - Canal provider config for RKE network (list maxitems:1)
* `iface` (`pulumi.Input[str]`) - Iface config Flannel network provider (string)
* `flannelNetworkProvider` (`pulumi.Input[dict]`) - Flannel provider config for RKE network (list maxitems:1)
* `iface` (`pulumi.Input[str]`) - Iface config Flannel network provider (string)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `plugin` (`pulumi.Input[str]`) - Plugin for RKE network. `canal` (default), `flannel`, `calico` and `weave` are supported. (string)
* `weaveNetworkProvider` (`pulumi.Input[dict]`) - Weave provider config for RKE network (list maxitems:1)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `nodes` (`pulumi.Input[list]`) - RKE cluster nodes (list)
* `address` (`pulumi.Input[str]`) - Address ip for node (string)
* `dockerSocket` (`pulumi.Input[str]`) - Docker socket for node (string)
* `hostnameOverride` (`pulumi.Input[str]`) - Hostname override for node (string)
* `internalAddress` (`pulumi.Input[str]`) - Internal ip for node (string)
* `labels` (`pulumi.Input[dict]`) - Labels for cluster registration token object (map)
* `nodeId` (`pulumi.Input[str]`) - Id for the node (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `roles` (`pulumi.Input[list]`) - Roles for the node. `controlplane`, `etcd` and `worker` are supported. (list)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`pulumi.Input[str]`) - Node SSH private key (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `prefixPath` (`pulumi.Input[str]`) - Prefix to customize Kubernetes path (string)
* `privateRegistries` (`pulumi.Input[list]`) - private registries for docker images (list)
* `isDefault` (`pulumi.Input[bool]`) - Set as default registry. Default `false` (bool)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `url` (`pulumi.Input[str]`) - Registry URL (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `services` (`pulumi.Input[dict]`) - Kubernetes cluster services (list maxitems:1)
* `etcd` (`pulumi.Input[dict]`) - Etcd options for RKE services (list maxitems:1)
* `backup_config` (`pulumi.Input[dict]`) - Backup options for etcd service. Just for Rancher v2.2.x (list maxitems:1)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `intervalHours` (`pulumi.Input[float]`) - Interval hours for etcd backup. Default `12` (int)
* `retention` (`pulumi.Input[float]`) - Retention for etcd backup. Default `6` (int)
* `s3BackupConfig` (`pulumi.Input[dict]`) - S3 config options for etcd backup (list maxitems:1)
* `access_key` (`pulumi.Input[str]`) - The AWS Client ID to use (string)
* `bucketName` (`pulumi.Input[str]`) - Bucket name for S3 service (string)
* `customCa` (`pulumi.Input[str]`) - Base64 encoded custom CA for S3 service. Use filebase64(<FILE>) for encoding file. Available from Rancher v2.2.5 (string)
* `endpoint` (`pulumi.Input[str]`) - Endpoint for S3 service (string)
* `folder` (`pulumi.Input[str]`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`pulumi.Input[str]`) - The AWS Client Secret associated with the Client ID (string)
* `safeTimestamp` (`pulumi.Input[bool]`) - Safe timestamp for etcd backup. Default: `false` (bool)
* `caCert` (`pulumi.Input[str]`) - TLS CA certificate for etcd service (string)
* `cert` (`pulumi.Input[str]`) - TLS certificate for etcd service (string)
* `creation` (`pulumi.Input[str]`) - Creation option for etcd service (string)
* `externalUrls` (`pulumi.Input[list]`) - External urls for etcd service (list)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `gid` (`pulumi.Input[float]`) - Etcd service GID. Default: `0`. For Rancher v2.3.x or above (int)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `key` (`pulumi.Input[str]`) - TLS key for etcd service (string)
* `path` (`pulumi.Input[str]`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `retention` (`pulumi.Input[str]`) - Retention for etcd backup. Default `6` (int)
* `snapshot` (`pulumi.Input[bool]`) - Snapshot option for etcd service (bool)
* `uid` (`pulumi.Input[float]`) - Etcd service UID. Default: `0`. For Rancher v2.3.x or above (int)
* `kubeApi` (`pulumi.Input[dict]`) - Kube API options for RKE services (list maxitems:1)
* `admissionConfiguration` (`pulumi.Input[dict]`) - Admission configuration (map)
* `alwaysPullImages` (`pulumi.Input[bool]`) - Enable [AlwaysPullImages](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) Admission controller plugin. [Rancher docs](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options) Default: `false` (bool)
* `auditLog` (`pulumi.Input[dict]`) - K8s audit log configuration. (list maxitem: 1)
* `configuration` (`pulumi.Input[dict]`) - Event rate limit configuration. (map)
* `format` (`pulumi.Input[str]`) - Audit log format. Default: 'json' (string)
* `maxAge` (`pulumi.Input[float]`) - Audit log max age. Default: `30` (int)
* `maxBackup` (`pulumi.Input[float]`) - Audit log max backup. Default: `10` (int)
* `maxSize` (`pulumi.Input[float]`) - Audit log max size. Default: `100` (int)
* `path` (`pulumi.Input[str]`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `policy` (`pulumi.Input[str]`) - Audit log policy json formated string. `omitStages` and `rules` json fields are supported. Example: `policy = jsonencode({"rules":[{"level": "Metadata"}]})` (string)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `eventRateLimit` (`pulumi.Input[dict]`) - K8s event rate limit configuration. (list maxitem: 1)
* `configuration` (`pulumi.Input[dict]`) - Event rate limit configuration. (map)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `podSecurityPolicy` (`pulumi.Input[bool]`) - Pod Security Policy option for kube API service. Default `false` (bool)
* `secretsEncryptionConfig` (`pulumi.Input[dict]`) - [Encrypt k8s secret data configration](https://rancher.com/docs/rke/latest/en/config-options/secrets-encryption/). (list maxitem: 1)
* `customConfig` (`pulumi.Input[dict]`) - Secrets encryption configuration. (map)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `serviceClusterIpRange` (`pulumi.Input[str]`) - Service Cluster ip Range option for kube controller service (string)
* `serviceNodePortRange` (`pulumi.Input[str]`) - Service Node Port Range option for kube API service (string)
* `kubeController` (`pulumi.Input[dict]`) - Kube Controller options for RKE services (list maxitems:1)
* `clusterCidr` (`pulumi.Input[str]`) - Cluster CIDR option for kube controller service (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `serviceClusterIpRange` (`pulumi.Input[str]`) - Service Cluster ip Range option for kube controller service (string)
* `kubelet` (`pulumi.Input[dict]`) - Kubelet options for RKE services (list maxitems:1)
* `clusterDnsServer` (`pulumi.Input[str]`) - Cluster DNS Server option for kubelet service (string)
* `clusterDomain` (`pulumi.Input[str]`) - Cluster Domain option for kubelet service (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `failSwapOn` (`pulumi.Input[bool]`) - Enable or disable failing when swap on is not supported (bool)
* `generate_serving_certificate` [Generate a certificate signed by the kube-ca](https://rancher.com/docs/rke/latest/en/config-options/services/#kubelet-serving-certificate-requirements). Default `false` (bool)
* `generateServingCertificate` (`pulumi.Input[bool]`)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `infraContainerImage` (`pulumi.Input[str]`) - Infra container image for kubelet service (string)
* `kubeproxy` (`pulumi.Input[dict]`) - Kubeproxy options for RKE services (list maxitems:1)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `scheduler` (`pulumi.Input[dict]`) - Scheduler options for RKE services (list maxitems:1)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshCertPath` (`pulumi.Input[str]`) - Cluster level SSH certificate path (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["aks_config"] = aks_config
__props__["annotations"] = annotations
__props__["cluster_auth_endpoint"] = cluster_auth_endpoint
__props__["cluster_monitoring_input"] = cluster_monitoring_input
__props__["cluster_registration_token"] = cluster_registration_token
__props__["cluster_template_answers"] = cluster_template_answers
__props__["cluster_template_id"] = cluster_template_id
__props__["cluster_template_questions"] = cluster_template_questions
__props__["cluster_template_revision_id"] = cluster_template_revision_id
__props__["default_pod_security_policy_template_id"] = default_pod_security_policy_template_id
__props__["default_project_id"] = default_project_id
__props__["description"] = description
__props__["desired_agent_image"] = desired_agent_image
__props__["desired_auth_image"] = desired_auth_image
__props__["docker_root_dir"] = docker_root_dir
__props__["driver"] = driver
__props__["eks_config"] = eks_config
__props__["enable_cluster_alerting"] = enable_cluster_alerting
__props__["enable_cluster_istio"] = enable_cluster_istio
__props__["enable_cluster_monitoring"] = enable_cluster_monitoring
__props__["enable_network_policy"] = enable_network_policy
__props__["gke_config"] = gke_config
__props__["kube_config"] = kube_config
__props__["labels"] = labels
__props__["name"] = name
__props__["rke_config"] = rke_config
__props__["system_project_id"] = system_project_id
__props__["windows_prefered_cluster"] = windows_prefered_cluster
return Cluster(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
1627811 | <gh_stars>1-10
"""
백준 6764번 : Sounds fishy!
"""
nums = [int(input()) for _ in range(4)]
if nums[0] == nums[1] == nums[2] == nums[3]:
print("Fish At Constant Depth")
elif nums[0] < nums[1] < nums[2] < nums[3]:
print("Fish Rising")
elif nums[0] > nums[1] > nums[2] > nums[3]:
print("Fish Diving")
else:
print('No Fish') | StarcoderdataPython |
81066 | <reponame>likx2/HypeFans
import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
from core.utils.func import user_avatar
from unixtimestampfield.fields import UnixTimeStampField
from django_countries.fields import CountryField
from dateutil.relativedelta import relativedelta
import datetime
from django.db.models.signals import post_save
class User(AbstractUser):
email = models.EmailField(
'E-mail',
unique=True,
help_text='Required',
error_messages={
'unique': "A user with that E-mail already exists.",
},
null=True,
db_index=True
)
avatar = models.ImageField(
upload_to=user_avatar,
verbose_name='Аватар',
null=True,
blank=True
)
background_photo = models.ImageField(
upload_to=user_avatar,
verbose_name='Фото заднего плана',
null=True,
blank=True
)
username = models.CharField(
'Username', max_length=255, null=True, blank=True, unique=True)
first_name = models.CharField(
'First name', max_length=255, null=True, blank=True)
bio = models.TextField(verbose_name='БИО профиля', null=True, blank=True)
birthday_date = UnixTimeStampField(
verbose_name='День рождения', null=True, blank=True)
location = CountryField(null=True, blank=True)
subscribtion_price = models.IntegerField(verbose_name='Цена подписки', default=0)
subscribtion_duration = models.IntegerField(verbose_name='Длина подписки в днях', default=7)
post_amount = models.IntegerField(
verbose_name='Кол-во постов', default=0, blank=True, null=True)
fans_amount = models.IntegerField(
verbose_name='Кол-во фанатов', default=0, blank=True, null=True)
repheral_link = models.CharField(
verbose_name='Реферальная ссылка', max_length=255, null=True, blank=True)
repheral_users = models.ManyToManyField(
'self',
verbose_name='Реферальные пользователи',
related_name='referal_users', blank=True
)
blocked_users = models.ManyToManyField(
'self',
verbose_name='Заблокированные пользователи',
related_name='blocked_users', blank=True
)
my_subscribes = models.ManyToManyField(
'self',
verbose_name='Мои подписки',
related_name='my_subscribes', blank=True
)
email_notifications = models.BooleanField(
'Уведомления по почте', default=False)
push_notifications = models.BooleanField('Пуш уведомления', default=False)
hide_online = models.BooleanField('Скрывать онлайн', default=False)
allow_comments = models.BooleanField(
'Разрешить комментарии постов', default=True)
show_post_amount = models.BooleanField(
'Показывать кол-во постов', default=True)
show_fans_amount = models.BooleanField(
'Показывать кол-во фанов', default=True)
show_watermark = models.BooleanField(
'Показывать вотермарку', default=False)
validated_email = models.BooleanField(
'Подтвержденная почта', default=False)
validated_user = models.BooleanField(
'Подтвержденный профиль', default=False)
credit_amount = models.IntegerField(
verbose_name='Кредитный баланс', default=0)
earned_credits_amount = models.IntegerField(
verbose_name='Заработано', default=0)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [
'username'
]
@staticmethod
def _create_user(password, email, **extra_fields):
if not email:
raise ValueError('The given email must be set')
user = User.objects.create(
email=email,
**extra_fields
)
user.set_password(password)
user.save()
return user
def create_user(self, email, password=<PASSWORD>, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(password, email, **extra_fields)
def create_superuser(self, email, password=<PASSWORD>, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(password, email, **extra_fields)
def __str__(self):
return str(self.username)
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
class Subscription(models.Model):
source = models.ForeignKey(User, verbose_name='Кто подписался', related_name='source_user_subscribe', on_delete=models.CASCADE)
target = models.ForeignKey(User, verbose_name='На кого подписался', related_name='target_user_subscribe', on_delete=models.CASCADE)
start_date = UnixTimeStampField('Время подписки', auto_now_add=True)
end_date = UnixTimeStampField('Время конца подписки')
def __str__(self):
return self.source, '-', self.target
class Meta:
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
class Card(models.Model):
user = models.ForeignKey(User, related_name='user_card', on_delete=models.CASCADE,)
number = models.BigIntegerField(verbose_name='Номер карты')
date_year = models.CharField(verbose_name='Месяц/год', max_length=5)
cvc = models.CharField(verbose_name='CVC', max_length=3)
creator = models.BooleanField(verbose_name='Карта создателя', default=False)
class Meta:
verbose_name = 'Карта'
verbose_name_plural = 'Карты'
def __str__(self):
return f"{self.pk}-{self.user}"
class Donation(models.Model):
sender = models.ForeignKey(User, related_name='paid_user', on_delete=models.DO_NOTHING)
reciever = models.ForeignKey(User, related_name='recieved_user', on_delete=models.DO_NOTHING)
datetime = UnixTimeStampField('Время оплаты', auto_now_add=True)
amount = models.FloatField(verbose_name='Сумма', null=True, default=0)
class Meta:
verbose_name = 'Пожертвование'
verbose_name_plural = 'Пожертвования'
def __str__(self):
return f"{self.pk}-{self.sender}"
class Payment(models.Model):
card = models.ForeignKey(Card, verbose_name='Карта пополнения', related_name='card_payment', on_delete=models.DO_NOTHING)
datetime = UnixTimeStampField(verbose_name='Время пополнения')
amount = models.FloatField(verbose_name='Сумма пополнения')
class Meta:
verbose_name = 'Пополнение'
verbose_name_plural = 'Пополнения'
def __str__(self):
return f"{self.pk}-{self.card}"
class PendingUser(models.Model):
user= models.ForeignKey(User, verbose_name='Ожидающие верификации', on_delete=models.CASCADE)
photo = models.ImageField(verbose_name='Документы', upload_to='docs/')
verified = models.BooleanField(verbose_name='Верифицирован', null=True, blank=True)
class Meta:
verbose_name = 'Пользователь на верификацию'
verbose_name_plural = 'Пользователи на верификацию'
def __str__(self):
return f"{self.pk}-{self.card}"
class UserOnline(models.Model):
user = models.CharField(primary_key=True, max_length=255, verbose_name='Ю<NAME>', blank=True)
last_action = UnixTimeStampField(auto_now=True)
class Meta:
verbose_name = 'Последнее действие пользователя'
verbose_name_plural = 'Последние действия пользователей'
def __str__(self):
return f"{self.user}-{self.last_action}"
def update_verification(sender: PendingUser, instance: PendingUser, created: bool, **kwargs):
if not created:
if instance.verified:
instance.user.verified = True
instance.user.save()
post_save.connect(update_verification, sender=PendingUser)
| StarcoderdataPython |
30578 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MrTaskman worker script which executes MacOS commands."""
__author__ = '<EMAIL> (<NAME>)'
import cStringIO
import datetime
import httplib
import json
import logging
import os
import socket
import StringIO
import subprocess
import sys
import time
import urllib2
import gflags
from client import mrtaskman_api
from client import package_installer
from client import package_cache
from common import device_info
from common import http_file_upload
from common import parsetime
from common import split_stream
FLAGS = gflags.FLAGS
gflags.DEFINE_string('log_filename', '', 'Where to log stuff. Required.')
gflags.DEFINE_string('worker_name', '', 'Unique worker name.')
gflags.DEFINE_list('worker_capabilities', ['macos', 'android'],
'Things this worker can do.')
# Package cache flags.
gflags.DEFINE_boolean('use_cache', True, 'Whether or not to use package cache.')
gflags.DEFINE_string('cache_path',
'/usr/local/worker_cache',
'Where to cache packages.')
gflags.DEFINE_integer('min_duration_seconds', 60,
'Minimum time to cache something.')
gflags.DEFINE_integer('max_cache_size_bytes', 2 * 1024 * 1024 * 1024,
'Maximum size of the cache in bytes.')
gflags.DEFINE_float('low_watermark_percentage', 0.6,
'When cleaning up, keeps at least this much cache.')
gflags.DEFINE_float('high_watermark_percentage', 0.8,
'When cleaning up, deletes to below this line.')
class TaskError(Exception):
pass
class MrTaskmanUnrecoverableHttpError(TaskError):
pass
class MrTaskmanRecoverableHttpError(TaskError):
pass
def GetHostname():
return socket.gethostname()
class MacOsWorker(object):
"""Executes macos tasks."""
def __init__(self, worker_name, log_stream):
self.worker_name_ = worker_name
self.log_stream_ = log_stream
self.api_ = mrtaskman_api.MrTaskmanApi()
self.hostname_ = GetHostname()
self.capabilities_ = {'executor': self.GetCapabilities()}
self.executors_ = {}
for capability in self.capabilities_['executor']:
self.executors_[capability] = self.ExecuteTask
self.use_cache_ = FLAGS.use_cache
if self.use_cache_:
self.package_cache_ = package_cache.PackageCache(
FLAGS.min_duration_seconds,
FLAGS.max_cache_size_bytes,
FLAGS.cache_path,
FLAGS.low_watermark_percentage,
FLAGS.high_watermark_percentage)
def GetCapabilities(self):
capabilities = device_info.GetCapabilities()
capabilities.append('macos')
capabilities.append(self.worker_name_)
return capabilities
def AssignTask(self):
"""Makes a request to /tasks/assign to get assigned a task.
Returns:
Task if a task was assigned, or None.
"""
try:
task = self.api_.AssignTask(self.worker_name_, self.hostname_,
self.capabilities_)
return task
except urllib2.HTTPError, e:
logging.info('Got %d HTTP response from MrTaskman on AssignTask.',
e.code)
return None
except urllib2.URLError, e:
logging.info('Got URLError trying to reach MrTaskman: %s', e)
return None
def SendResponse(self, task_id, stdout, stderr, task_result):
while True:
try:
# TODO(jeff.carollo): Refactor.
device_sn = device_info.GetDeviceSerialNumber()
task_result['device_serial_number'] = device_sn
response_url = self.api_.GetTaskCompleteUrl(task_id)
if not response_url:
logging.info('No task complete url for task_id %s', task_id)
return
response_url = response_url.get('task_complete_url', None)
if not response_url:
logging.info('No task complete url for task_id %s', task_id)
return
self.api_.SendTaskResult(response_url, stdout, stderr, task_result)
logging.info('Successfully sent response for task %s: %s',
task_id, self.api_.MakeTaskUrl(task_id))
return
except urllib2.HTTPError, error_response:
body = error_response.read()
code = error_response.code
if code == 404:
logging.warning('TaskCompleteUrl timed out.')
continue
logging.warning('SendResponse HTTPError code %d\n%s',
code, body)
return
except urllib2.URLError, e:
logging.info(
'Got URLError trying to send response to MrTaskman: %s', e)
logging.info('Retrying in 10 seconds')
time.sleep(10)
continue
def GetTaskCompleteUrl(self, task_id):
try:
return self.api_.GetTaskCompleteUrl(task_id)
except urllib2.HTTPError, error_response:
body = error_response.read()
code = error_response.code
logging.warning('GetTaskCompleteUrl HTTPError code %d\n%s',
code, body)
def ShouldWaitForDevice(self):
"""Returns True iff this worker controls a device which is offline."""
if not device_info.DEVICE_SN:
return False
return not device_info.DeviceIsConnected()
def PollAndExecute(self):
logging.info('Polling for work...')
device_active = True
while True:
try:
if self.ShouldWaitForDevice():
if device_active:
logging.info('Device %s is offline. Waiting for it to come back.',
device_info.DEVICE_SN)
device_active = False
time.sleep(10)
continue
if not device_active:
logging.info('Device came back online.')
device_active = True
# TODO(jeff.carollo): Wrap this in a catch-all Excepion handler that
# allows us to continue executing in the face of various task errors.
task = self.AssignTask()
if not task:
time.sleep(10)
continue
except KeyboardInterrupt:
logging.info('Caught CTRL+C. Exiting.')
return
task_stream = cStringIO.StringIO()
task_logs = None
self.log_stream_.AddStream(task_stream)
try:
logging.info('Got a task:\n%s\n', json.dumps(task, 'utf-8', indent=2))
config = task['config']
task_id = int(task['id'])
attempt = task['attempts']
# Figure out which of our executors we can use.
executor = None
allowed_executors = config['task']['requirements']['executor']
for allowed_executor in allowed_executors:
try:
executor = self.executors_[allowed_executor]
except KeyError:
pass
if executor is not None:
break
if executor is None:
# TODO: Send error response to server.
# This is probably our fault - we said we could do something
# that we actually couldn't do.
logging.error('No matching executor from %s', allowed_executors)
raise Exception('No allowed executors matched our executors_:\n' +
'%s\nvs.%s\n' % (allowed_executors, self.executors_))
try:
# We've got a valid executor, so use it.
(results, stdout, stderr) = executor(task_id, attempt, task, config)
except MrTaskmanUnrecoverableHttpError:
logging.error(
'Unrecoverable MrTaskman HTTP error. Aborting task %d.', task_id)
continue
finally:
self.log_stream_.RemoveStream(task_stream)
task_logs = task_stream.getvalue().decode('utf-8')
task_stream.close()
try:
results['worker_log'] = task_logs.encode('utf-8')
self.SendResponse(task_id,
stdout,
stderr,
results)
except MrTaskmanUnrecoverableHttpError:
logging.error(
'Unrecoverable MrTaskman HTTP error. Aborting task %d.', task_id)
logging.info('Polling for work...')
# Loop back up and poll for the next task.
def ExecuteTask(self, task_id, attempt, task, config):
logging.info('Recieved task %s', task_id)
try:
tmpdir = package_installer.TmpDir()
# Download the files we need from the server.
files = config.get('files', [])
self.DownloadAndStageFiles(files)
# Install any packages we might need.
# TODO(jeff.carollo): Handle any exceptions raised here.
packages = config.get('packages', [])
self.DownloadAndInstallPackages(packages, tmpdir)
# We probably don't want to run forever. Default to 12 minutes.
timeout = config['task'].get('timeout', '12m')
timeout = parsetime.ParseTimeDelta(timeout)
# Get any environment variables to inject.
env = config['task'].get('env', {})
env = env.update(os.environ)
# Get our command and execute it.
command = config['task']['command']
logging.info('Running command %s', command)
(exit_code, stdout, stderr, execution_time, result_metadata) = (
self.RunCommandRedirectingStdoutAndStderrWithTimeout(
command, env, timeout, tmpdir.GetTmpDir()))
logging.info('Executed %s with result %d', command, exit_code)
results = {
'kind': 'mrtaskman#task_complete_request',
'task_id': task_id,
'attempt': attempt,
'exit_code': exit_code,
'execution_time': execution_time.total_seconds(),
'result_metadata': result_metadata
}
return (results, stdout, stderr)
finally:
tmpdir.CleanUp()
def RunCommandRedirectingStdoutAndStderrWithTimeout(
self, command, env, timeout, cwd):
command = ' '.join([command, '>stdout', '2>stderr'])
# TODO: More precise timing through process info.
begin_time = datetime.datetime.now()
timeout_time = begin_time + timeout
process = subprocess.Popen(args=command,
env=env,
shell=True,
cwd=cwd)
ret = None
while None == ret and (datetime.datetime.now() < timeout_time):
time.sleep(0.02)
ret = process.poll()
finished_time = datetime.datetime.now()
if finished_time >= timeout_time and (None == ret):
logging.info('command %s timed out.', command)
process.terminate()
process.wait()
ret = -99
execution_time = finished_time - begin_time
try:
stdout = file(os.path.join(cwd, 'stdout'), 'rb')
except IOError, e:
logging.error('stdout was not written.')
stdout = file(os.path.join(cwd, 'stdout'), 'w')
stdout.write('No stdout.')
stdout.flush()
stdout.close()
stdout = file(os.path.join(cwd, 'stdout'), 'rb')
try:
stderr = file(os.path.join(cwd, 'stderr'), 'rb')
except IOError, e:
logging.error('stderr was not written.')
stderr = file(os.path.join(cwd, 'stderr'), 'w')
stderr.write('No stderr.')
stderr.flush()
stderr.close()
stderr = file(os.path.join(cwd, 'stderr'), 'rb')
try:
result_metadata_file = file(os.path.join(cwd, 'result_metadata'), 'r')
result_metadata = json.loads(result_metadata_file.read().decode('utf-8'))
except:
result_metadata = None
return (ret, stdout, stderr, execution_time, result_metadata)
def DownloadAndStageFiles(self, files):
logging.info('Not staging files: %s', files)
# TODO: Stage files.
def DownloadAndInstallPackages(self, packages, tmpdir):
# TODO(jeff.carollo): Create a package cache if things take off.
for package in packages:
attempts = 0
while True:
try:
# TODO(jeff.carollo): Put package cache code here.
if self.use_cache_:
self.package_cache_.CopyToDirectory(
package, tmpdir.GetTmpDir(),
package_installer.DownloadAndInstallPackage)
else:
package_installer.DownloadAndInstallPackage(
package['name'], package['version'],
tmpdir.GetTmpDir())
break
except urllib2.HTTPError, e:
logging.error('Got HTTPError %d trying to grab package %s.%s: %s',
e.code, package['name'], package['version'], e)
raise MrTaskmanUnrecoverableHttpError(e)
except (urllib2.URLError, httplib.IncompleteRead,
httplib.BadStatusLine, httplib.HTTPException), e:
logging.error('Got URLError trying to grab package %s.%s: %s',
package['name'], package['version'], e)
logging.info('Retrying in 10')
attempts += 1
# TODO(jeff.carollo): Figure out a robust way to do this.
# Likely need to just try a few times to get around Internet blips
# then mark task as failed for package reasons.
if attempts < 10:
time.sleep(10)
continue
else:
logging.error('Failed to grab package for 100 attempts. Aborting.')
raise MrTaskmanUnrecoverableHttpError(e)
except IOError, e:
logging.error('Got IOError trying to grab package %s.%s: %s',
package['name'], package['version'], e)
raise MrTaskmanUnrecoverableHttpError(e)
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
sys.stderr.write('%s\n' % e)
sys.exit(1)
return
# Set default socket timeout to 2 hours so that we catch missing timeouts.
socket.setdefaulttimeout(2 * 60 * 60)
if not FLAGS.log_filename:
sys.stderr.write('Flag --log_filename is required.\n')
sys.exit(-9)
return
try:
from third_party import portalocker
log_file = file(FLAGS.log_filename, 'a+')
portalocker.lock(log_file, portalocker.LOCK_EX | portalocker.LOCK_NB)
except Exception, e:
logging.exception(e)
print 'Could not get exclusive lock.'
sys.exit(-10)
return
try:
FORMAT = '%(asctime)-15s %(message)s'
log_stream = split_stream.SplitStream(sys.stdout, log_file)
logging.basicConfig(format=FORMAT, level=logging.DEBUG,
stream=log_stream)
macos_worker = MacOsWorker(FLAGS.worker_name, log_stream=log_stream)
# Run forever, executing tasks from the server when available.
macos_worker.PollAndExecute()
finally:
logging.shutdown()
log_file.flush()
portalocker.unlock(log_file)
log_file.close()
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
157962 | <filename>mipqctool/controller/inferschema.py
import os
from mipqctool.model.qcfrictionless import QCtoDC, CdeDict, QcTable
from mipqctool.controller.tablereport import TableReport
class InferSchema(object):
def __init__(self, table, csvname, sample_rows=100, maxlevels=10, cdedict=None, na_empty_strings_only=False):
"""Class for infering a dataset's schema which comes in csv file.
Arguments:
:param table: a QcTable holding the dataset csv data.
:param csvname: a string with the filename of the dataset csv.
:param sample_rows: number of rows that are going to be used for dataset's schema inference
:param maxlevel: number of unique values in order to one infered variable to be considered as nominal(categorical)
above that number the variable will be considered as a text data type.
:param cdedict: A CdeDict object containg info about all CDE variables
"""
self.__table = table
self.__csvname = csvname
self.__table.infer(limit=sample_rows, maxlevels=maxlevels, na_empty_strings_only=na_empty_strings_only)
self.__suggestions = None
if cdedict:
self.__cdedict = cdedict
self.__tablereport = TableReport(self.__table, id_column=1)
@property
def tablereport(self):
return self.__tablereport
@property
def invalid_nominals(self) -> dict:
"""Returns nominal fields with invalid enumrations.
An enumration is invalid if it is an SQL keyword or is
a string and starts with a digit.
"""
return self.__table.invalid_nominals
def suggest_cdes(self, threshold):
"""Arguments:
:param threshold: 0-1 similarity threshold, below that not a cde is suggested """
if self.__cdedict:
suggestions = {}
for name, columnreport in self.__tablereport.columnreports.items():
var_name = columnreport.name
cde = self.__cdedict.suggest_cde(columnreport, threshold=threshold)
if cde:
suggestions[var_name] = [cde.code, cde.conceptpath]
else:
suggestions[var_name] = [None, None]
self.__suggestions = suggestions
else:
raise Exception('Error with the CDE dictionary')
def export2excel(self, filepath):
qctodc = QCtoDC(self.__table.schema.descriptor, self.__csvname, self.__suggestions)
qctodc.export2excel(filepath)
def expoct2qcjson(self, filename):
self.__table.schema.save(filename)
@classmethod
def from_disc(cls, csvpath, sample_rows=100, maxlevels=10, cdedict=None, na_empty_strings_only=False):
"""
Constructs an InferSchema from loading a csv file from local disc.
Arguments:
:param csvpath: string filepath of the csv
:param sample_rows: number of rows that are going to be used for dataset's schema inference
:param maxlevel: number of unique values in order to one infered variable to be considered as nominal(categorical)
above that number the variable will be considered as a text data type.
:param cdedict: A CdeDict object containg info about all CDE variables
"""
dataset = QcTable(csvpath, schema=None)
csvname = os.path.basename(csvpath)
return cls(table=dataset, csvname=csvname,
sample_rows=sample_rows,maxlevels=maxlevels,
cdedict=cdedict, na_empty_strings_only=na_empty_strings_only)
| StarcoderdataPython |
1762726 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Zinc dumping and parsing module
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import base64
import binascii
import datetime
import random
import string
import sys
import traceback
import six
import hszinc
from hszinc import VER_3_0, Grid, MODE_ZINC, MODE_JSON, XStr
from .pint_enable import to_pint
STR_CHARSET = string.ascii_letters + string.digits + '\n\r\t\f\b'
GENERATION_NUMBER = 1 # FIXME
PERCENT_RECURSIVE = 1
def gen_random_const():
return random.choice([True, False, None, hszinc.MARKER, hszinc.REMOVE, hszinc.NA])
def gen_random_ref():
# Generate a randomised reference.
name = gen_random_str(charset= \
string.ascii_letters + string.digits \
+ '_:-.~')
if random.choice([True, False]):
value = gen_random_str(charset= \
string.ascii_letters + string.digits + '_')
else:
value = None
return hszinc.Ref(name, value)
def gen_random_bin():
# Generate a randomized binary
return hszinc.Bin(random.choice([
'text/plain',
'text/html',
'text/zinc',
'application/json',
'application/octet-stream',
'image/png',
'image/jpeg',
]))
def gen_random_xstr():
# Generate a randomized binary
barray = bytearray(random.getrandbits(8) for _ in range(5))
return XStr(*random.choice([
('hex', binascii.hexlify(barray).decode('ascii')),
('b64', binascii.b2a_base64(barray)[:-1] if sys.version_info[0] <= 2
else binascii.b2a_base64(barray).decode("ascii")
)
]))
def gen_random_uri():
return hszinc.Uri(gen_random_str(charset= \
string.ascii_letters + string.digits))
def gen_random_str(min_length=1, max_length=20, charset=STR_CHARSET):
# Generate a random 20-character string
return ''.join([random.choice(charset) for c in range(0,
random.randint(min_length, max_length))])
def gen_random_date():
# This might generate an invalid date, we keep trying until we get one.
while True:
try:
return datetime.date(random.randint(1, 3000),
random.randint(1, 12), random.randint(1, 31))
except ValueError:
pass
def gen_random_time():
return datetime.time(random.randint(0, 23), random.randint(0, 59),
random.randint(0, 59), random.randint(0, 999999))
def gen_random_date_time():
# Pick a random timezone
tz_name = random.choice(list(hszinc.zoneinfo.get_tz_map().keys()))
tz = hszinc.zoneinfo.timezone(tz_name)
return tz.localize(datetime.datetime.combine(
gen_random_date(), gen_random_time()))
def gen_random_coordinate():
return hszinc.Coordinate( \
round(gen_random_num(360) - 180.0, 2),
round(gen_random_num(360) - 180.0, 2))
def gen_random_num(scale=1000, digits=2):
return round(random.random() * scale, digits)
def gen_random_quantity():
return hszinc.Quantity(gen_random_num(),
to_pint('percent'))
def gen_random_list():
return [gen_random_scalar() for x in range(0, random.randint(0, 2))]
def gen_random_map():
return {gen_random_name(): gen_random_scalar() for x in range(0, random.randint(0, 2))}
RANDOM_TYPES = [
# Only for v2.0 gen_random_bin,
gen_random_xstr,
# gen_random_const, gen_random_ref, gen_random_uri, gen_random_xstr,
# gen_random_str, gen_random_date, gen_random_time, gen_random_date_time,
# gen_random_coordinate, gen_random_num, gen_random_quantity
]
def gen_random_scalar():
if (random.randint(0, 100) < PERCENT_RECURSIVE):
return random.choice(RANDOM_RECURSIVE_TYPES)()
else:
return random.choice(RANDOM_TYPES)()
def gen_random_name(existing=None):
while True:
meta = random.choice(string.ascii_lowercase) \
+ gen_random_str(min_length=0, max_length=7, \
charset=string.ascii_letters + string.digits)
if (existing is None) or (meta not in existing):
return meta
def gen_random_meta():
meta = hszinc.MetadataObject()
names = set()
for n in range(0, random.randint(1, 5)):
name = gen_random_name(existing=names)
value = gen_random_scalar()
meta[name] = value
return meta
def gen_random_grid():
# Generate a randomised grid of values and try parsing it back.
grid = hszinc.Grid(version=VER_3_0)
grid.metadata.extend(gen_random_meta())
# Randomised columns
for n in range(0, random.randint(1, 5)):
col_name = gen_random_name(existing=grid.column)
if random.choice([True, False]):
grid.column[col_name] = gen_random_meta()
else:
grid.column[col_name] = {}
# Randomised rows
for n in range(0, random.randint(0, 20)):
row = {}
for c in grid.column.keys():
if random.choice([True, False]):
row[c] = gen_random_scalar()
grid.append(row)
return grid
RANDOM_RECURSIVE_TYPES = [gen_random_list, gen_random_map, gen_random_grid]
def dump_grid(g):
print('Version: %s' % g.version)
print('Metadata:')
for k, v in g.metadata.items():
print(' %s = %r' % (k, v))
print('Columns:')
for c, meta in g.column.items():
print(' %s:' % c)
for k, v in g.column[c].items():
print(' %s = %r' % (k, v))
print('Rows:')
for row in g:
print('---')
for c, v in row.items():
print(' %s = %r' % (c, v))
def approx_check(v1, v2):
# Check types match
if (isinstance(v1, six.string_types) \
and isinstance(v2, six.string_types)):
assert type(v1) == type(v2), '%s != %s' % (type(v1), type(v2))
if isinstance(v1, datetime.time):
assert v1.replace(microsecond=0) == v2.replace(microsecond=0)
elif isinstance(v1, datetime.datetime):
assert v1.tzinfo == v2.tzinfo
assert v1.date() == v2.date()
approx_check(v1.time(), v2.time())
elif isinstance(v1, hszinc.Quantity):
assert v1.unit == v2.unit
approx_check(v1.value, v2.value)
elif isinstance(v1, hszinc.Coordinate):
approx_check(v1.latitude, v2.latitude)
approx_check(v1.longitude, v2.longitude)
elif isinstance(v1, float) or isinstance(v2, float):
assert abs(v1 - v2) < 0.000001
elif isinstance(v1, Grid):
approx_check_grid(v1, v2)
else:
assert v1 == v2, '%r != %r' % (v1, v2)
def _try_dump_parse(ref_grid, mode):
try:
# Dump the randomised grid to a string
grid_str = hszinc.dump(ref_grid, mode=mode)
except:
# Dump some detail about the grid
print('Failed to dump grid.')
dump_grid(ref_grid)
raise
# Parse the grid string
try:
parsed_grid = hszinc.parse(grid_str, mode=mode, single=True)
except:
print('Failed to parse dumped grid')
dump_grid(ref_grid)
print('--- Parsed string ---')
print(grid_str)
raise
approx_check_grid(parsed_grid, ref_grid)
def try_dump_parse_json():
ref_grid = gen_random_grid()
_try_dump_parse(ref_grid, MODE_JSON)
def try_dump_parse_zinc():
ref_grid = gen_random_grid()
_try_dump_parse(ref_grid, MODE_ZINC)
def approx_check_grid(parsed_grid, ref_grid):
# Check metadata matches
try:
assert set(ref_grid.metadata.keys()) \
== set(parsed_grid.metadata.keys())
for key in ref_grid.metadata.keys():
approx_check(ref_grid.metadata[key], parsed_grid.metadata[key])
except:
print(traceback.format_exc())
print('Mismatch in metadata')
print('Reference grid')
dump_grid(ref_grid)
print('Parsed grid')
dump_grid(parsed_grid)
raise
try:
# Check column matches
assert set(ref_grid.column.keys()) \
== set(parsed_grid.column.keys())
except:
print(traceback.format_exc())
print('Mismatch in column')
print('Reference grid')
dump_grid(ref_grid)
print('Parsed grid')
dump_grid(parsed_grid)
raise
for col in ref_grid.column.keys():
try:
for key in ref_grid.column[col].keys():
approx_check(ref_grid.column[col][key], \
parsed_grid.column[col][key])
except:
print(traceback.format_exc())
print('Mismatch in metadata for column %s' % col)
print('Reference: %r' % ref_grid.column[col])
print('Parsed: %r' % parsed_grid.column[col])
raise
try:
# Check row matches
assert len(ref_grid) == len(parsed_grid)
except:
print(traceback.format_exc())
print('Mismatch in row count')
print('Reference grid')
dump_grid(ref_grid)
print('Parsed grid')
dump_grid(parsed_grid)
for (ref_row, parsed_row) in zip(ref_grid, parsed_grid):
try:
for col in ref_grid.column.keys():
approx_check(ref_row.get(col), parsed_row.get(col))
except:
print(traceback.format_exc())
print('Mismatch in row')
print('Reference:')
print(ref_row)
print('Parsed:')
print(parsed_row)
raise
assert parsed_grid == ref_grid
def test_loopback_zinc():
for trial in range(0, GENERATION_NUMBER):
try_dump_parse_zinc()
def test_loopback_json():
for trial in range(0, GENERATION_NUMBER):
try_dump_parse_json()
| StarcoderdataPython |
3325201 | <reponame>kids-first/kf-lib-data-ingest
import os
import pytest
from conftest import TEST_DATA_DIR
from kf_lib_data_ingest.etl.configuration.base_config import (
AbstractConfig,
ConfigValidationError,
PyModuleConfig,
YamlConfig,
)
from kf_lib_data_ingest.etl.configuration.ingest_package_config import (
IngestPackageConfig,
)
def test_config_abs_cls():
# Declare a concrete ingest stage class
class InvalidConfig(AbstractConfig):
pass
# Test that TypeError is raised if all abstract classes are not impl
with pytest.raises(TypeError) as e:
InvalidConfig()
for m in ["_read_file", "_validate"]:
assert m in str(e.value)
class Config(AbstractConfig):
def _read_file(self, filepath):
pass
def _validate(self):
pass
# Test that FileNotFoundError raised on file not exists
with pytest.raises(FileNotFoundError):
Config("foo")
Config(os.path.join(TEST_DATA_DIR, "valid_yaml_config.yml"))
def test_yaml_config():
schema_path = os.path.join(TEST_DATA_DIR, "yaml_schema.yml")
config_path = os.path.join(TEST_DATA_DIR, "invalid_yaml_config.yml")
with pytest.raises(ConfigValidationError) as e:
YamlConfig(config_path, schema_path=schema_path)
assert config_path in str(e.value)
config_path = os.path.join(TEST_DATA_DIR, "valid_yaml_config.yml")
YamlConfig(config_path, schema_path=schema_path)
def test_attr_forwarding():
pmc = PyModuleConfig(
os.path.join(TEST_DATA_DIR, "test_study", "transform_module.py")
)
assert pmc.contents.transform_function == pmc.transform_function
assert pmc.foo is None
config_path = os.path.join(TEST_DATA_DIR, "valid_yaml_config.yml")
schema_path = os.path.join(TEST_DATA_DIR, "yaml_schema.yml")
yc = YamlConfig(config_path, schema_path=schema_path)
assert yc.contents.get("params") == yc.params
assert yc.foo is None
def test_ingest_package_config(tmpdir):
bipcf_path = os.path.join(tmpdir, "bad_ingest_package_config.py")
with open(bipcf_path, "w") as bipcf:
bipcf.write("HI, LOL!")
with pytest.raises(ConfigValidationError):
IngestPackageConfig(bipcf_path) # not valid python (syntax)
with open(bipcf_path, "w") as bipcf:
bipcf.write("foo = 'HI, LOL!'")
with pytest.raises(ConfigValidationError):
IngestPackageConfig(bipcf_path) # missing required members
confdir = os.path.join(tmpdir, "extract_configs")
os.mkdir(confdir)
with open(bipcf_path, "w") as bipcf:
bipcf.write(
"\n".join(
[
f'extract_config_dir = "{confdir}"',
'project = "SD_12345678"',
"target_service_entities = []",
]
)
)
IngestPackageConfig(bipcf_path)
def test_extract_config():
pass
def test_dataservice_schema():
pass
def test_standard_model_schema():
pass
| StarcoderdataPython |
174299 | import datetime
from http import HTTPStatus
from sanic.response import json
from core.helpers import jsonapi
from apps.commons.errors import DataNotFoundError
from apps.news.models import News
from apps.news.repository import NewsRepo
from apps.news.services import UpdateService
async def update(request, id):
response = {}
status = HTTPStatus.OK
repo = NewsRepo(News)
service = UpdateService(id, request.json, repo)
try:
news = service.call()
response = {
'data': {
'id': str(news.id),
'type': 'news',
'attributes': {
'title': news.title,
'content': news.content,
'updated_at': str(news.updated_at)
}
}
}
except DataNotFoundError as not_found_err:
error = jsonapi.format_error(title='Data not found', detail=not_found_err.message)
response = jsonapi.return_an_error(error)
status = HTTPStatus.NOT_FOUND
return json(response, status=status)
| StarcoderdataPython |
4808294 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Arduino lock-in amplifier
"""
__author__ = "<NAME>"
__authoremail__ = "<EMAIL>"
__url__ = "https://github.com/Dennis-van-Gils/DvG_Arduino_lock-in_amp"
__date__ = "31-08-2021"
__version__ = "2.0.0"
# pylint: disable=invalid-name
import os
import sys
import time as Time
import psutil
from PyQt5 import QtCore
from PyQt5 import QtWidgets as QtWid
from PyQt5.QtCore import QDateTime
import numpy as np
from dvg_pyqt_filelogger import FileLogger
from dvg_debug_functions import dprint
from dvg_fftw_welchpowerspectrum import FFTW_WelchPowerSpectrum
from Alia_protocol_serial import Alia, Waveform
from Alia_qdev import Alia_qdev
from Alia_gui import MainWindow
# Show debug info in terminal? Warning: Slow! Do not leave on unintentionally.
DEBUG = False
DEBUG_TIMING = False
# Enable GPU-accelerated computations on an NVIDIA videocard with CUDA support?
# Affects the FIR filters.
USE_CUDA = False
# ------------------------------------------------------------------------------
# current_date_time_strings
# ------------------------------------------------------------------------------
def current_date_time_strings():
cur_date_time = QDateTime.currentDateTime()
return (
cur_date_time.toString("dd-MM-yyyy"),
cur_date_time.toString("HH:mm:ss"),
)
# ------------------------------------------------------------------------------
# Program termination routines
# ------------------------------------------------------------------------------
def stop_running():
app.processEvents()
alia_qdev.turn_off()
alia_qdev.quit()
logger.close()
@QtCore.pyqtSlot()
def notify_connection_lost():
stop_running()
excl = " ! ! ! ! ! ! ! ! "
window.qlbl_title.setText("%sLOST CONNECTION%s" % (excl, excl))
str_cur_date, str_cur_time = current_date_time_strings()
str_msg = "%s %s\nLost connection to Arduino on port %s.\n" % (
str_cur_date,
str_cur_time,
alia.ser.portstr,
)
print("\nCRITICAL ERROR @ %s" % str_msg)
reply = QtWid.QMessageBox.warning(
window, "CRITICAL ERROR", str_msg, QtWid.QMessageBox.Ok
)
if reply == QtWid.QMessageBox.Ok:
pass # Leave the GUI open for read-only inspection by the user
@QtCore.pyqtSlot()
def about_to_quit():
print("\nAbout to quit")
stop_running()
alia.close()
# ------------------------------------------------------------------------------
# Lock-in amplifier data-acquisition update function
# ------------------------------------------------------------------------------
def lockin_DAQ_update():
"""Listen for new data blocks send by the lock-in amplifier and perform the
main mathematical operations for signal processing. This function will run
in a dedicated thread (i.e. `worker_DAQ`), separated from the main program
thread that handles the GUI.
NOTE: NO GUI OPERATIONS ARE ALLOWED HERE. Otherwise it may affect the
`worker_DAQ` thread negatively, resulting in lost blocks of data.
"""
# Shorthands
c: Alia.Config = alia.config
state: Alia_qdev.State = alia_qdev.state
# Prevent throwings errors if just paused
if alia.lockin_paused:
return False
if DEBUG_TIMING:
tock = Time.perf_counter()
print("%.2f _DAQ" % (tock - alia.tick))
alia.tick = tock
# Listen for data buffers send by the lock-in
(
success,
_counter,
state.time,
state.ref_X,
state.ref_Y,
state.sig_I,
) = alia.listen_to_lockin_amp()
if not success:
dprint("@ %s %s" % current_date_time_strings())
return False
# Detect dropped blocks
# ---------------------
# TODO: Rethink this procedure. Might be easier done with the index of the
# block that also gets send by the Arduino. We either receive a full block,
# or we don't. There are no partial blocks that can be received.
alia_qdev.state.blocks_received += 1
last_time = state.rb_time[-1] if state.blocks_received > 1 else np.nan
dT = (state.time[0] - last_time) / 1e6 # [usec] to [sec]
if dT > c.SAMPLING_PERIOD * 1e6 * 1.10: # Allow a little clock jitter
N_dropped_samples = int(round(dT / c.SAMPLING_PERIOD) - 1)
dprint("Dropped samples: %i" % N_dropped_samples)
dprint("@ %s %s" % current_date_time_strings())
# Replace dropped samples with np.nan samples.
# As a result, the filter output will contain a continuous series of
# np.nan values in the output for up to `RingBuffer_FIR_Filter.
# T_settle_filter` seconds long after the occurrence of the last dropped
# sample.
state.rb_time.extend(
last_time
+ np.arange(1, N_dropped_samples + 1) * c.SAMPLING_PERIOD * 1e6
)
state.rb_ref_X.extend(np.full(N_dropped_samples, np.nan))
state.rb_ref_Y.extend(np.full(N_dropped_samples, np.nan))
state.rb_sig_I.extend(np.full(N_dropped_samples, np.nan))
# Stage 0
# -------
state.sig_I_min = np.min(state.sig_I)
state.sig_I_max = np.max(state.sig_I)
state.sig_I_avg = np.mean(state.sig_I)
state.sig_I_std = np.std(state.sig_I)
state.rb_time.extend(state.time)
state.rb_ref_X.extend(state.ref_X)
state.rb_ref_Y.extend(state.ref_Y)
state.rb_sig_I.extend(state.sig_I)
# Note: `ref_X` [non-dim] is transformed to `ref_X*` [V]
# Note: `ref_Y` [non-dim] is transformed to `ref_Y*` [V]
window.hcc_ref_X.extendData(
state.time, np.multiply(state.ref_X, c.ref_V_ampl_RMS) + c.ref_V_offset
)
window.hcc_ref_Y.extendData(
state.time, np.multiply(state.ref_Y, c.ref_V_ampl_RMS) + c.ref_V_offset
)
window.hcc_sig_I.extendData(state.time, state.sig_I)
# Stage 1
# -------
# fmt: off
# Apply filter 1 to sig_I
state.filt_I = alia_qdev.firf_1_sig_I.apply_filter(state.rb_sig_I)
if alia_qdev.firf_1_sig_I.filter_has_settled:
# Retrieve the block of original data from the past that aligns with
# the current filter output
valid_slice = alia_qdev.firf_1_sig_I.rb_valid_slice
state.time_1 = state.rb_time [valid_slice]
old_sig_I = state.rb_sig_I[valid_slice]
old_ref_X = state.rb_ref_X[valid_slice]
old_ref_Y = state.rb_ref_Y[valid_slice]
# Heterodyne mixing
np.multiply(state.filt_I, old_ref_X, out=state.mix_X)
np.multiply(state.filt_I, old_ref_Y, out=state.mix_Y)
else:
state.time_1.fill(np.nan)
old_sig_I = np.full(c.BLOCK_SIZE, np.nan)
state.mix_X.fill(np.nan)
state.mix_Y.fill(np.nan)
state.filt_I_min = np.min(state.filt_I)
state.filt_I_max = np.max(state.filt_I)
state.filt_I_avg = np.mean(state.filt_I)
state.filt_I_std = np.std(state.filt_I)
state.rb_time_1.extend(state.time_1)
state.rb_filt_I.extend(state.filt_I)
state.rb_mix_X .extend(state.mix_X)
state.rb_mix_Y .extend(state.mix_Y)
window.hcc_filt_1_in .extendData(state.time_1, old_sig_I)
window.hcc_filt_1_out.extendData(state.time_1, state.filt_I)
window.hcc_mix_X .extendData(state.time_1, state.mix_X)
window.hcc_mix_Y .extendData(state.time_1, state.mix_Y)
# fmt: on
# Stage 2
# -------
# Apply filter 2 to the mixer output
state.X = alia_qdev.firf_2_mix_X.apply_filter(state.rb_mix_X)
state.Y = alia_qdev.firf_2_mix_Y.apply_filter(state.rb_mix_Y)
if alia_qdev.firf_2_mix_X.filter_has_settled:
# Retrieve the block of time data from the past that aligns with
# the current filter output
valid_slice = alia_qdev.firf_1_sig_I.rb_valid_slice
state.time_2 = state.rb_time_1[valid_slice]
# Signal amplitude: R
np.sqrt(np.add(np.square(state.X), np.square(state.Y)), out=state.R)
# Signal phase: Theta
np.arctan2(state.Y, state.X, out=state.T)
np.multiply(state.T, 180 / np.pi, out=state.T) # [rad] to [deg]
else:
state.time_2.fill(np.nan)
state.R.fill(np.nan)
state.T.fill(np.nan)
state.X_avg = np.mean(state.X)
state.Y_avg = np.mean(state.Y)
state.R_avg = np.mean(state.R)
state.T_avg = np.mean(state.T)
state.rb_time_2.extend(state.time_2)
state.rb_X.extend(state.X)
state.rb_Y.extend(state.Y)
state.rb_R.extend(state.R)
state.rb_T.extend(state.T)
window.hcc_LIA_XR.extendData(
state.time_2, state.X if window.qrbt_XR_X.isChecked() else state.R
)
window.hcc_LIA_YT.extendData(
state.time_2, state.Y if window.qrbt_YT_Y.isChecked() else state.T
)
# Check if memory address of underlying buffer is still unchanged
# pylint: disable=pointless-string-statement
"""
test = np.asarray(state.rb_X)
print("%6i, mem: %i, cont?: %i, rb buf mem: %i, full? %i" % (
state.blocks_received,
test.__array_interface__['data'][0],
test.flags['C_CONTIGUOUS'],
state.rb_X._unwrap_buffer.__array_interface__['data'][0],
state.rb_X.is_full))
"""
# Power spectra
# -------------
calculate_PS_sig_I()
calculate_PS_filt_I()
calculate_PS_mix_X()
calculate_PS_mix_Y()
calculate_PS_R()
# Logging to file
logger.update(mode="w")
# Return success
return True
# ------------------------------------------------------------------------------
# Log functions
# ------------------------------------------------------------------------------
def write_header_to_log():
header = (
"\t".join(
(
"time[s]",
"ref_X*[V]",
"ref_Y*[V]",
"sig_I[V]",
"filt_I[V]",
"mix_X[V]",
"mix_Y[V]",
"X[V]",
"Y[V]",
"R[V]",
"T[deg]",
)
)
+ "\n"
)
logger.write(header)
def write_data_to_log():
if alia_qdev.firf_2_mix_X.filter_has_settled:
# All filters have settled --> green light
c = alia.config
N = c.BLOCK_SIZE
state = alia_qdev.state
idx_offset = alia_qdev.firf_1_sig_I.rb_valid_slice.start
# tick = Time.perf_counter()
# Note: `ref_X` [non-dim] is transformed to `ref_X*` [V]
# Note: `ref_Y` [non-dim] is transformed to `ref_Y*` [V]
data = np.asmatrix(
[
state.rb_time[:N] / 1e6,
np.multiply(state.rb_ref_X[:N], c.ref_V_ampl_RMS)
+ c.ref_V_offset,
np.multiply(state.rb_ref_Y[:N], c.ref_V_ampl_RMS)
+ c.ref_V_offset,
state.rb_sig_I[:N],
state.rb_filt_I[idx_offset : idx_offset + N],
state.rb_mix_X[idx_offset : idx_offset + N],
state.rb_mix_Y[idx_offset : idx_offset + N],
state.X[:N],
state.Y[:N],
state.R[:N],
state.T[:N],
# For debugging:
# state.rb_time_1[idx_offset : idx_offset + N] / 1e6,
# state.time_2[:N] / 1e6,
]
)
data = np.ma.transpose(data)
# tock = Time.perf_counter()
# print("%.4f" % (tock - tick), end=", ") # ~ 0.0001 s
logger.np_savetxt(data, fmt="%.5f", delimiter="\t")
# print("%.4f" % (Time.perf_counter() - tock)) # ~0.01 s
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set priority of this process to maximum in the operating system
print("PID: %s" % os.getpid())
try:
proc = psutil.Process(os.getpid())
if os.name == "nt":
proc.nice(psutil.HIGH_PRIORITY_CLASS) # Windows
else:
proc.nice(-20) # Other
except: # pylint: disable=bare-except
print("Warning: Could not set process to high priority.")
# --------------------------------------------------------------------------
# Arduino
# --------------------------------------------------------------------------
# Connect to Arduino
alia = Alia(read_timeout=4)
alia.auto_connect()
if not alia.is_alive:
print("\nCheck connection and try resetting the Arduino.")
print("Exiting...\n")
sys.exit(0)
if DEBUG_TIMING:
alia.tick = Time.perf_counter()
alia.begin(
waveform=Waveform.Sine,
freq=250,
V_offset=1.65,
V_ampl_RMS=0.5,
)
# Create workers and threads
alia_qdev = Alia_qdev(
dev=alia,
DAQ_function=lockin_DAQ_update,
N_blocks=21,
critical_not_alive_count=3,
use_CUDA=USE_CUDA,
debug=DEBUG,
)
alia_qdev.signal_connection_lost.connect(notify_connection_lost)
# --------------------------------------------------------------------------
# Create application and main window
# --------------------------------------------------------------------------
QtCore.QThread.currentThread().setObjectName("MAIN") # For DEBUG info
app = 0 # Work-around for kernel crash when using Spyder IDE
app = QtWid.QApplication(sys.argv)
app.aboutToQuit.connect(about_to_quit)
window = MainWindow(alia, alia_qdev)
# --------------------------------------------------------------------------
# File logger
# --------------------------------------------------------------------------
logger = FileLogger(
write_header_function=write_header_to_log,
write_data_function=write_data_to_log,
)
logger.signal_recording_started.connect(
lambda filepath: window.qpbt_record.setText(
"Recording to file: %s" % filepath
)
)
logger.signal_recording_stopped.connect(
lambda: window.qpbt_record.setText("Click to start recording to file")
)
window.qpbt_record.clicked.connect(
lambda state: logger.record(state) # pylint: disable=unnecessary-lambda
)
# --------------------------------------------------------------------------
# Create power spectrum FFTW objects
# --------------------------------------------------------------------------
p = {
"len_data": alia_qdev.state.rb_capacity,
"fs": alia.config.Fs,
"nperseg": alia.config.Fs,
}
# fmt: off
alia_qdev.fftw_PS_sig_I = FFTW_WelchPowerSpectrum(**p)
alia_qdev.fftw_PS_filt_I = FFTW_WelchPowerSpectrum(**p)
alia_qdev.fftw_PS_mix_X = FFTW_WelchPowerSpectrum(**p)
alia_qdev.fftw_PS_mix_Y = FFTW_WelchPowerSpectrum(**p)
alia_qdev.fftw_PS_R = FFTW_WelchPowerSpectrum(**p)
# fmt: on
# Only calculate the power spectrum when the curve is visible. Calculating
# spectra is CPU intensive and might impact the responsiveness of the GUI
# or, in the extreme case, cause dropped blocks of data.
def calculate_PS_sig_I():
state = alia_qdev.state
if window.pc_PS_sig_I.isVisible() and state.rb_sig_I.is_full:
window.pc_PS_sig_I.setData(
alia_qdev.fftw_PS_sig_I.freqs,
alia_qdev.fftw_PS_sig_I.compute_spectrum_dB(state.rb_sig_I),
)
def calculate_PS_filt_I():
state = alia_qdev.state
if window.pc_PS_filt_I.isVisible() and state.rb_filt_I.is_full:
window.pc_PS_filt_I.setData(
alia_qdev.fftw_PS_filt_I.freqs,
alia_qdev.fftw_PS_filt_I.compute_spectrum_dB(state.rb_filt_I),
)
def calculate_PS_mix_X():
state = alia_qdev.state
if window.pc_PS_mix_X.isVisible() and state.rb_mix_X.is_full:
window.pc_PS_mix_X.setData(
alia_qdev.fftw_PS_mix_X.freqs,
alia_qdev.fftw_PS_mix_X.compute_spectrum_dB(state.rb_mix_X),
)
def calculate_PS_mix_Y():
state = alia_qdev.state
if window.pc_PS_mix_Y.isVisible() and state.rb_mix_Y.is_full:
window.pc_PS_mix_Y.setData(
alia_qdev.fftw_PS_mix_Y.freqs,
alia_qdev.fftw_PS_mix_Y.compute_spectrum_dB(state.rb_mix_Y),
)
def calculate_PS_R():
state = alia_qdev.state
if window.pc_PS_R.isVisible() and state.rb_R.is_full:
window.pc_PS_R.setData(
alia_qdev.fftw_PS_R.freqs,
alia_qdev.fftw_PS_R.compute_spectrum_dB(state.rb_R),
)
# Special cases where the lock-in is paused: Clicking the legend checkboxes
# to unhide the PS curves should recalculate the PS based on the last known
# data. We must check if the lock-in is paused before calculating, because
# we might otherwise interfere with the other possible PS calculation
# already happening in the worker_DAQ thread if the lock-in is actually
# running at the moment of toggling the checkboxes.
#
# Ugly workaround, I know. All because we want the PS to be calculated only
# when the curve will be shown in order to reduce the cpu load.
@QtCore.pyqtSlot()
def update_paused_PS_sig_I():
if alia.lockin_paused:
calculate_PS_sig_I()
window.pc_PS_sig_I.update()
@QtCore.pyqtSlot()
def update_paused_PS_filt_I():
if alia.lockin_paused:
calculate_PS_filt_I()
window.pc_PS_filt_I.update()
@QtCore.pyqtSlot()
def update_paused_PS_mix_X():
if alia.lockin_paused:
calculate_PS_mix_X()
window.pc_PS_mix_X.update()
@QtCore.pyqtSlot()
def update_paused_PS_mix_Y():
if alia.lockin_paused:
calculate_PS_mix_Y()
window.pc_PS_mix_Y.update()
@QtCore.pyqtSlot()
def update_paused_PS_R():
if alia.lockin_paused:
calculate_PS_R()
window.pc_PS_R.update()
window.legend_PS.chkbs[0].clicked.connect(update_paused_PS_sig_I)
window.legend_PS.chkbs[1].clicked.connect(update_paused_PS_filt_I)
window.legend_PS.chkbs[2].clicked.connect(update_paused_PS_mix_X)
window.legend_PS.chkbs[3].clicked.connect(update_paused_PS_mix_Y)
window.legend_PS.chkbs[4].clicked.connect(update_paused_PS_R)
# --------------------------------------------------------------------------
# Start threads
# --------------------------------------------------------------------------
alia_qdev.start(DAQ_priority=QtCore.QThread.TimeCriticalPriority)
# --------------------------------------------------------------------------
# Start the main GUI event loop
# --------------------------------------------------------------------------
window.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3348675 | from typing import Tuple
from math import sqrt
class Algorithm:
"""A base levelling algorithm."""
@classmethod
def calc(cls, before: int, after: int, inc: int) -> Tuple[int, int, bool]:
"""Returns the level, xp required to level up, whether the current xp gain is a levelup."""
bl, _ = cls.get_level(before, inc)
al, nx = cls.get_level(after, inc)
return al, nx, al > bl
class Linear(Algorithm):
"""A fully linear levelling algorithm."""
@staticmethod
def get_level(xp: int, inc: int) -> tuple:
return xp // inc, inc - (xp % inc)
class LinearIncremental(Algorithm):
"""A linearly incremental levelling algorithm."""
@staticmethod
def get_level(xp: int, inc: int) -> tuple:
level = 0
sub = inc
while xp > sub:
xp -= sub
sub += inc
level += 1
return level, abs(sub - xp)
class Quadratic(Algorithm):
"""A ^0.5 based levelling algorithm."""
@staticmethod
def get_level(xp: int, thr: int) -> tuple:
level = int((1 + sqrt(1 + 8 * (xp / thr))) / 2)
x = ((level + 1) ** 2 * thr - (level + 1) * thr) * 0.5
return level, int(x - xp)
class Mee6(Algorithm):
"""A copy of Mee6's algorithm"""
@staticmethod
def get_level(xp: int, thr: int) -> tuple:
level = int(5 * (n ** 2) + 50 * n + 100)
# The equation is 5*(n**2)+50*n+100 idk Python I tried my best
return level, int(n) | StarcoderdataPython |
1699179 | <reponame>Arnaav-Singh/Beginner-code
x = int(input("Enter your Sales amount: "))
if x >= 500000:
print(x*10/100+x)
elif x <=500000:
print( x*5/100+x)
| StarcoderdataPython |
87367 | <reponame>shitchell/rpi-server
#!/usr/bin/env python3
from importlib.machinery import SourceFileLoader
import readline
import glob
import http.server
import socketserver
import urllib
import glob
import random
import time
import sys
import os
SCRIPT_FILEPATH = os.path.realpath(__file__)
SCRIPT_DIRPATH = os.path.dirname(SCRIPT_FILEPATH) + os.path.sep
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 9000
NOLOG = False
def print(msg, *args, **kwargs):
if not NOLOG:
timestamp = time.strftime('[%Y-%M-%d %H:%M.%S]')
args = ' '.join(list(map(lambda x: str(x), args)))
sys.stdout.write('%s %s %s\n' % (timestamp, str(msg), args))
sys.stdout.flush()
class Commands:
modules = dict()
def _find_action(action):
# See if the action is attached to a particular module
if action.count(".") == 1:
module, action = action.split(".")
module = Commands.modules.get(module)
if module:
try:
func = getattr(module, "do_" + action)
except:
pass
else:
return func
else:
try:
# Search the default commands
func = getattr(Commands, "do_" + action)
except:
pass
else:
return func
def _list_actions():
if Commands._action_cache:
return Commands._action_cache
# Loop through the default and external commands
actions = list()
modules = [Commands] + list(Commands.modules.values())
for module in modules:
mod_actions = filter(lambda x: x.startswith("do_"), dir(module))
# Filter hidden commands
mod_actions = filter(lambda x: not getattr(getattr(module, x), "hidden", False), mod_actions)
if module.__name__ == "Commands":
mod_actions = map(lambda x: x[3:], mod_actions)
else:
## prepend the command with the module name
mod_actions = map(lambda x: "%s.%s" % (module.__name__, x[3:]), mod_actions)
actions.extend(list(mod_actions))
# Store the list of commands for efficiency's sake
Commands._action_cache = actions
return actions
def _load_base_template(filepath=SCRIPT_DIRPATH + 'base.html'):
f = open(filepath, 'r')
Commands._base_template = f.read()
f.close()
def _page_template(title, content, subtitle=""):
if not Commands._base_template:
# Load the base template
Commands._load_base_template()
# Substitute stuffs out
base = Commands._base_template % {"title": title, "subtitle": subtitle, "content": content}
return bytes(base, "UTF-8")
def _help_template(func):
action = func.__name__[3:]
# Parse samples
sample = ''
if hasattr(func, "samples"):
sample = '<span class="sample">' + '</span><span class="sample">'.join(func.samples) + '</span>'
return Commands._page_template(action, func.__doc__ or "No documentation", sample)
def do_help(req, *args, **kwargs):
"""Provides documentation on commands"""
if args:
func = Commands._find_action(args[0])
if func:
req.wfile.write(Commands._help_template(func))
return
# Create links for each command
action_links = map(lambda x: '<a href="/help/%s">%s</a><br />' % (x, x), Commands._list_actions())
action_links = "\n".join(list(action_links))
help = Commands._page_template("Commands", action_links)
req.wfile.write(help)
do_help.samples = ["/help/reload", "/help/foo.bar"]
def do_reload(req, *args, **kwargs):
"""Refreshes external modules"""
load_modules()
req.wfile.write(bytes('<meta http-equiv="refresh" content="0;URL=/">', "UTF-8"))
def load_modules():
# Dump old modules
Commands.modules = dict()
# Dump old command cache
Commands._action_cache = list()
# Reload base template
Commands._load_base_template()
# Load external command modules
for path in glob.glob(SCRIPT_DIRPATH + 'rpi_*.py'):
# load the individual module
name = os.path.basename(path).split('.')[0][4:]
try:
module = SourceFileLoader(name, path).load_module()
# Change the format for module output
module.__builtins__["print"] = print
except Exception as e:
print("! Error loading module '%s': %s" % (name, e))
else:
Commands.modules[name] = module
print("loaded module '%s'" % name)
load_modules()
class Server(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
# Send initial headers
self.protocol_version='HTTP/1.1'
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.end_headers()
# Get the kwargs
path = self.path.split('?')
kwargs = dict()
if len(path) > 1:
for kwarg in path[1].split('&'):
kw, arg = kwarg.split('=')
kwargs[kw] = urllib.parse.unquote(arg)
# Get the action and args
path = self.path.split('?')[0]
path = path.rstrip('/').split('/')[1:]
if path:
action = path.pop(0)
args = list(map(lambda x: urllib.parse.unquote(x), path))
else:
action = None
args = None
# Find the command to execute
if action:
print("(action) %s (args) %s (kwargs) %s" % (action, args, kwargs))
func = Commands._find_action(action)
if func:
try:
func(self, *args, **kwargs)
except Exception as e:
print("! Error executing function '%s'" % action)
print("!!", e)
return
Commands.do_help(self)
def log_message(self, format, *args):
pass
if __name__ == "__main__":
while True:
try:
server = socketserver.TCPServer(('', port), Server)
print("Serving on port %i" % port)
server.serve_forever()
except KeyboardInterrupt:
NOLOG = True
print("^C one more time to exit")
while True:
try:
x = input("> ")
except EOFError:
break
except KeyboardInterrupt:
server.socket.close()
sys.exit(1)
try:
exec(x)
except Exception as e:
print(e)
NOLOG = False
| StarcoderdataPython |
3314698 | <gh_stars>1-10
from sandbox import Scene
from sandbox.property import LinesCoincidenceProperty, PointInsideAngleProperty
from .base import ExplainerTest
class InsideTriangle1(ExplainerTest):
def createScene(self):
scene = Scene()
A, B, C = scene.nondegenerate_triangle(labels=('A', 'B', 'C')).points
D = A.segment(B).free_point(label='D')
E = A.segment(C).free_point(label='E')
X = D.line_through(C).intersection_point(E.line_through(B), label='X')
return scene
def testPointOnLine(self):
A = self.scene.get('A')
B = self.scene.get('B')
C = self.scene.get('C')
D = self.scene.get('D')
E = self.scene.get('E')
X = self.scene.get('X')
self.assertIn(LinesCoincidenceProperty(C.segment(D), B.segment(E), False), self.explainer.context)
self.assertIn(PointInsideAngleProperty(X, A.angle(B, C)), self.explainer.context)
self.assertIn(PointInsideAngleProperty(X, B.angle(C, A)), self.explainer.context)
self.assertIn(PointInsideAngleProperty(X, C.angle(A, B)), self.explainer.context)
class InsideTriangle2(ExplainerTest):
def createScene(self):
scene = Scene()
A, B, C = scene.nondegenerate_triangle(labels=('A', 'B', 'C')).points
D = A.segment(B).free_point(label='D')
E = A.segment(C).free_point(label='E')
F = B.segment(C).free_point(label='F')
X = D.line_through(E).intersection_point(A.line_through(F), label='X')
return scene
def testPointOnLine(self):
A = self.scene.get('A')
B = self.scene.get('B')
C = self.scene.get('C')
D = self.scene.get('D')
E = self.scene.get('E')
F = self.scene.get('F')
X = self.scene.get('X')
self.assertIn(LinesCoincidenceProperty(D.segment(E), A.segment(F), False), self.explainer.context)
self.assertIn(PointInsideAngleProperty(X, A.angle(B, C)), self.explainer.context)
self.assertIn(PointInsideAngleProperty(X, B.angle(C, A)), self.explainer.context)
self.assertIn(PointInsideAngleProperty(X, C.angle(A, B)), self.explainer.context)
| StarcoderdataPython |
4829738 | <filename>server/inventario/managers.py
import hashlib
import uuid
from datetime import timedelta, datetime
from time import time
from sqlalchemy.exc import IntegrityError
from server.common.managers import SuperManager
from .models import *
from ..user.models import User
from openpyxl import load_workbook, Workbook
class CompanyManager(SuperManager):
def __init__(self, db):
super().__init__(Company, db)
def getCompanyByID(self, id):
return self.db.query(Company).filter(Company.id == id).first()
def list_all(self):
return self.db.query(Company).filter(Company.id > 0)
def import_excel(self, cname):
try:
wb = load_workbook(filename="server/common/resources/uploads/" + cname)
sheet = wb.get_sheet_by_name(name='Hoja1') # parametrizable......
colnames = ['IdEmpresa', 'NomEmpresa', 'NitEmpresa', 'ActEmpresa',
'PagEmpresa', 'TxtEmpresa', 'TcEmpresa']
min_row = 1
indices = {cell[0].value: n - min_row for n, cell in
enumerate(sheet.iter_cols(min_row=min_row, max_row=min_row), start=min_row) if
cell[0].value in colnames}
for row in sheet.iter_rows(min_row=min_row + 1):
if row[indices['IdEmpresa']].value is not None and \
row[indices['NomEmpresa']].value is not None and \
row[indices['NitEmpresa']].value is not None and \
row[indices['ActEmpresa']].value is not None and \
row[indices['PagEmpresa']].value is not None and \
row[indices['TxtEmpresa']].value is not None and \
row[indices['TcEmpresa']].value is not None:
if CompanyManager(self.db).getCompanyByID(row[indices['IdEmpresa']].value) is None:
empresa = Company(
id=row[indices['IdEmpresa']].value,
name=row[indices['NomEmpresa']].value,
nit=row[indices['NitEmpresa']].value,
activity=row[indices['ActEmpresa']].value,
payment=row[indices['PagEmpresa']].value,
text=row[indices['PagEmpresa']].value,
coin=row[indices['TcEmpresa']].value
)
self.db.add(empresa)
self.db.flush()
self.db.commit()
return {'message': 'Importado Todos Correctamente.', 'success': True}
except IntegrityError as e:
self.db.rollback()
if 'UNIQUE constraint failed: rrhh_persona.dni' in str(e):
return {'message': 'CI duplicado', 'success': False}
if 'UNIQUE constraint failed: rrhh_empleado.codigo' in str(e):
return {'message': 'codigo de empleado duplicado', 'success': False}
return {'message': str(e), 'success': False}
class OfficeManager(SuperManager):
def __init__(self, db):
super().__init__(Office, db)
def getOfficeByID(self, id):
return self.db.query(Office).filter(Office.id == id).first()
def getOfficeByName(self, name):
return self.db.query(Office).filter(Office.name == name).first()
def getOfficeByPromoter(self, fk_promoter):
return self.db.query(Office).filter(Office.fk_promoter == fk_promoter).first()
def listAll(self):
return self.db.query(Office).filter(Office.id > 0)
def import_excel(self, cname):
try:
wb = load_workbook(filename="server/common/resources/uploads/" + cname)
sheet = wb.get_sheet_by_name(name='Hoja1') # parametrizable......
colnames = ['idEmpresa', 'idSucursal', 'NomSucursal', 'DirSucusal',
'TelSucursal', 'DepSucursal', 'LdoSucursal',
'NauSucursal', 'DoiSucursal', 'DofSucursal', 'FliSucursal', 'idPromotor']
min_row = 1
indices = {cell[0].value: n - min_row for n, cell in
enumerate(sheet.iter_cols(min_row=min_row, max_row=min_row), start=min_row) if
cell[0].value in colnames}
for row in sheet.iter_rows(min_row=min_row + 1):
if row[indices['idEmpresa']].value is not None and \
row[indices['idSucursal']].value is not None and \
row[indices['NomSucursal']].value is not None and \
row[indices['DirSucusal']].value is not None and \
row[indices['TelSucursal']].value is not None and \
row[indices['DepSucursal']].value is not None and \
row[indices['LdoSucursal']].value is not None and \
row[indices['NauSucursal']].value is not None and \
row[indices['DoiSucursal']].value is not None and \
row[indices['DofSucursal']].value is not None and \
row[indices['FliSucursal']].value is not None and \
row[indices['idPromotor']].value is not None:
if OfficeManager(self.db).getOfficeByID(row[indices['idSucursal']].value) is None:
sucursal = Office(
id=row[indices['idSucursal']].value,
name=row[indices['NomSucursal']].value,
address=row[indices['DirSucusal']].value,
phone=row[indices['TelSucursal']].value,
department=row[indices['DepSucursal']].value,
key=row[indices['LdoSucursal']].value,
auth=row[indices['NauSucursal']].value,
dsfini=row[indices['DoiSucursal']].value,
dsffin=row[indices['DofSucursal']].value,
limdate=row[indices['FliSucursal']].value,
fk_company=row[indices['idEmpresa']].value,
fk_promoter=row[indices['idPromotor']].value
)
self.db.add(sucursal)
self.db.flush()
self.db.commit()
return {'message': 'Importado Todos Correctamente.', 'success': True}
except IntegrityError as e:
self.db.rollback()
if 'UNIQUE constraint failed: rrhh_persona.dni' in str(e):
return {'message': 'CI duplicado', 'success': False}
if 'UNIQUE constraint failed: rrhh_empleado.codigo' in str(e):
return {'message': 'codigo de empleado duplicado', 'success': False}
return {'message': str(e), 'success': False}
class ProductManager(SuperManager):
def __init__(self, db):
super().__init__(Product, db)
# def get_groupsByFamily(self, family):
# return self.db.query(Product).filter(Product.fk_family == family)
def getProductByID(self, id):
return self.db.query(Product).filter(Product.id == id).first()
def getProductByName(self, name):
return self.db.query(Product).filter(Product.name == name).first()
def listAll(self):
return self.db.query(Product).filter(Product.id > 0)
def import_excel(self, cname):
try:
wb = load_workbook(filename="server/common/resources/uploads/" + cname)
sheet = wb.get_sheet_by_name(name='Hoja1') # parametrizable......
colnames = ['idEmpresa', 'idProducto', 'NomProducto', 'uniProducto',
'PreProducto', 'desProducto', 'salProducto']
min_row = 1
indices = {cell[0].value: n - min_row for n, cell in
enumerate(sheet.iter_cols(min_row=min_row, max_row=min_row), start=min_row) if
cell[0].value in colnames}
for row in sheet.iter_rows(min_row=min_row + 1):
if row[indices['idEmpresa']].value is not None and \
row[indices['idProducto']].value is not None and \
row[indices['NomProducto']].value is not None and \
row[indices['uniProducto']].value is not None and \
row[indices['PreProducto']].value is not None and \
row[indices['desProducto']].value is not None and \
row[indices['salProducto']].value is not None:
if ProductManager(self.db).getProductByID(row[indices['idProducto']].value) is None:
producto = Product(
id=row[indices['idProducto']].value,
name=row[indices['NomProducto']].value,
unit=row[indices['uniProducto']].value,
price=row[indices['PreProducto']].value,
discount=row[indices['desProducto']].value,
stock=row[indices['salProducto']].value,
fk_company=row[indices['idEmpresa']].value
)
self.db.add(producto)
self.db.flush()
self.db.commit()
return {'message': 'Importado Todos Correctamente.', 'success': True}
except IntegrityError as e:
self.db.rollback()
if 'UNIQUE constraint failed: rrhh_persona.dni' in str(e):
return {'message': 'CI duplicado', 'success': False}
if 'UNIQUE constraint failed: rrhh_empleado.codigo' in str(e):
return {'message': 'codigo de empleado duplicado', 'success': False}
return {'message': str(e), 'success': False}
class CustomerManager(SuperManager):
def __init__(self, db):
super().__init__(Customer, db)
def getCustomerByID(self, id):
return self.db.query(Customer).filter(Customer.id == id).first()
def listAll(self):
return self.db.query(Customer).filter(Customer.id > 0)
def getCustomerByName(self, line):
return self.db.query(Customer).filter(Customer.name == line).first()
def getCustomerByNit(self, family):
return self.db.query(Customer).filter(Customer.nit == family)
def import_excel(self, cname):
try:
print("init flaco")
print(str(datetime.now()))
wb = load_workbook(filename="server/common/resources/uploads/" + cname)
sheet = wb.get_sheet_by_name(name='Hoja1') # parametrizable......
colnames = ['idEmpresa', 'idCliente', 'NomCliente', 'NitCliente']
min_row = 1
indices = {cell[0].value: n - min_row for n, cell in
enumerate(sheet.iter_cols(min_row=min_row, max_row=min_row), start=min_row) if
cell[0].value in colnames}
# print(str(datetime.now()))
for row in sheet.iter_rows(min_row=min_row + 1):
if row[indices['idEmpresa']].value is not None and \
row[indices['idCliente']].value is not None and \
row[indices['NomCliente']].value is not None and \
row[indices['NitCliente']].value is not None:
if CustomerManager(self.db).getCustomerByID(row[indices['idCliente']].value) is None:
cliente = Customer(
id=row[indices['idCliente']].value,
name=row[indices['NomCliente']].value,
nit=row[indices['NitCliente']].value,
fk_company=row[indices['idEmpresa']].value
)
self.db.add(cliente)
self.db.flush()
self.db.commit()
print("end flaco")
print(str(datetime.now()))
return {'message': 'Importado Todos Correctamente.', 'success': True}
except IntegrityError as e:
self.db.rollback()
if 'UNIQUE constraint failed: rrhh_persona.dni' in str(e):
return {'message': 'CI duplicado', 'success': False}
if 'UNIQUE constraint failed: rrhh_empleado.codigo' in str(e):
return {'message': 'codigo de empleado duplicado', 'success': False}
return {'message': str(e), 'success': False}
class BillManager(SuperManager):
def __init__(self, db):
super().__init__(Bill, db)
def getBillByID(self, id):
return self.db.query(Bill).filter(Bill.id == id).first()
# def get_subgroupByLine(self, line):
# return self.db.query(SubGroup).filter(SubGroup.subline == line).first()
#
# def get_subgroupsByGroup(self, group):
# return self.db.query(SubGroup).filter(SubGroup.fk_group == group)
def insertBills(self, data):
# data es una lista de bills :v
try:
for bill in data:
managedBill = Bill(ctrlcode=bill['code'],
id_bill=bill['idBill'],
fk_company=bill['fkCompany'],
fk_customer=bill['fkCustomer'],
date=datetime.strptime(bill['date'], '%Y/%m/%d').date(),
hour=datetime.strptime(bill['hour'], '%H:%M:%S').time(),
nit=bill['nit'],
name=bill['name'],
fk_promoter=bill['fkPromoter']
)
for detail in bill['detailList']:
managedDetail = Detail(fk_bill=detail['fkBill'],
fk_product=detail['fkProduct'],
quantity=detail['quantity'],
oriprize=detail['iprice'],
finprize=detail['fprice'],
total=detail['total'])
managedBill.details.append(managedDetail)
self.db.add(managedBill)
self.db.commit()
return True
except IntegrityError as e:
print(str(e))
self.db.rollback()
return False
def listAll(self):
return self.db.query(Bill).filter(Bill.id > 0)
class DetailManager(SuperManager):
def __init__(self, db):
super().__init__(Detail, db)
def getDetailByID(self, code):
return self.db.query(Detail).filter(Detail.id == code).first()
def listAll(self):
return self.db.query(Detail).filter(Detail.id > 0)
# def get_productsBySubgroup(self, subgroup):
# return self.db.query(Detail).filter(Detail.fk_subgroup == subgroup)
#
# def get_productsByGroup(self, group):
# return self.db.query(Detail).filter(Detail.fk_group == group.id_group).filter(
# Detail.fk_family == group.fk_family)
#
# def get_productsByFamily(self, family):
# return self.db.query(Detail).filter(Detail.fk_family == family)
#
# def get_productsByWarehouse(self, warehouse):
# return self.db.query(Detail).filter(Detail.fk_warehouse == warehouse)
#
# def get_productByCode(self, code):
# return self.db.query(Detail).filter(Detail.code == code).first()
#
# def get_productByOrigin(self, origin):
# return self.db.query(Detail).filter(Detail.code_origin == origin)
#
# def get_productsByText(self, text):
# return self.db.query(Detail).filter(self.colums_like(self.entity, text))
# def import_excel(self, cname):
# try:
# wb = load_workbook(filename="server/common/resources/uploads/" + cname)
# sheet = wb.get_sheet_by_name(name='Hoja1') # parametrizable......
# colnames = ['almacen', 'nomb_alma', 'id_fami', 'nomb_fami', 'id_grup',
# 'line', 'nomb_grup', 'id_subg', 'subline', 'nomb_subg', 'codigo',
# 'codorigen', 'nomb_cata', 'fechacaduc', 'nrolote', 'nrofabri', 'saldf_ini', 'impor_ini',
# 'ingresos', 'salidas',
# 'saldf_fin']
# min_row = 1
# indices = {cell[0].value: n - min_row for n, cell in
# enumerate(sheet.iter_cols(min_row=min_row, max_row=min_row), start=min_row) if
# cell[0].value in colnames}
# for row in sheet.iter_rows(min_row=min_row + 1):
# if row[indices['almacen']].value is not None and row[indices['nomb_alma']].value is not None and \
# row[indices['id_fami']].value is not None and \
# row[indices['nomb_fami']].value is not None and \
# row[indices['id_grup']].value is not None and \
# row[indices['line']].value is not None and \
# row[indices['nomb_grup']].value is not None and \
# row[indices['id_subg']].value is not None and \
# row[indices['subline']].value is not None and \
# row[indices['nomb_subg']].value is not None and \
# row[indices['codigo']].value is not None and \
# row[indices['codorigen']].value is not None and \
# row[indices['saldf_fin']].value is not None and \
# row[indices['nomb_cata']].value is not None:
#
# if WarehouseManager(self.db).get_warehouseByID(row[indices['almacen']].value) is None:
# almacen = Warehouse(
# id=row[indices['almacen']].value,
# name=row[indices['nomb_alma']].value
# )
#
# if FamilyManager(self.db).get_familyByID(row[indices['id_fami']].value) is None:
# familia = Family(
# id=row[indices['id_fami']].value,
# name=row[indices['nomb_fami']].value
# )
#
# if GroupManager(self.db).get_groupByLine(row[indices['line']].value) is None:
# grupo = Group(
# id_group=row[indices['id_grup']].value,
# line=row[indices['line']].value,
# name=row[indices['nomb_grup']].value
# )
# if SubGroupManager(self.db).get_subgroupByLine(row[indices['subline']].value) is None:
# subgrupo = SubGroup(
# id_subgroup=row[indices['id_subg']].value,
# subline=row[indices['subline']].value,
# name=row[indices['nomb_subg']].value
# )
# if ProductManager(self.db).get_product(row[indices['codigo']].value) is None:
# producto = InvProduct(
# code=row[indices['codigo']].value,
# code_origin=row[indices['codorigen']].value,
# name=row[indices['nomb_cata']].value,
# stock=row[indices['saldf_fin']].value
# )
# else:
# producto = ProductManager(self.db).get_product(row[indices['codigo']].value)
# producto.stock = producto.stock + row[indices['saldf_fin']].value
#
# if FamilyGroupManager(self.db).get_familygroup(familia.id, grupo.line) is None:
# familiagrupo = FamilyGroup()
# else:
# familiagrupo = FamilyGroupManager(self.db).get_familygroup(familia.id, grupo.line)
#
# if GroupSubgroupManager(self.db).get_groupsub(grupo.line, subgrupo.subline) is None:
# gruposubgrupo = GroupSubGroup()
# else:
# gruposubgrupo = GroupSubgroupManager(self.db).get_groupsub(grupo.line, subgrupo.subline)
# producto.subgroup = subgrupo
# # producto.group = grupo
# # producto.family = familia
# # producto.warehouse = almacen
# # subgrupo.group = grupo
# # grupo.family = familia
# familiagrupo.family = familia
# familiagrupo.group = grupo
# gruposubgrupo.group = grupo
# gruposubgrupo.subgroup = subgrupo
# familia.warehouse = almacen
#
# self.db.add(producto)
# self.db.add(familiagrupo)
# self.db.add(gruposubgrupo)
# self.db.flush()
#
# self.db.commit()
# return {'message': 'Importado Todos Correctamente.', 'success': True}
# except IntegrityError as e:
# self.db.rollback()
# if 'UNIQUE constraint failed: rrhh_persona.dni' in str(e):
# return {'message': 'CI duplicado', 'success': False}
# if 'UNIQUE constraint failed: rrhh_empleado.codigo' in str(e):
# return {'message': 'codigo de empleado duplicado', 'success': False}
# return {'message': str(e), 'success': False}
| StarcoderdataPython |
129436 | <reponame>Debagboola/Django-Portfolio-and-Blog-App<gh_stars>0
from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=20)
class Post(models.Model):
title = models.CharField(max_length=255)
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
categories = models.ManyToManyField('Category', related_name='posts')
class Comment(models.Model):
author = models.CharField(max_length=60)
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey('Post', on_delete=models.CASCADE)
| StarcoderdataPython |
3260192 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .forms import ProfileForm
from django.contrib import messages
# Create your views here.
@login_required
def profile_view(request):
return render(request, 'oauth/profile.html')
@login_required
def change_profile_view (request):
if request.method =='POST':
# Uploading files requires request.FILES
form = ProfileForm (request.POST, request.FILES, instance = request.user)
if form.is_valid ():
form.save ()
# Add a message, the form is redirected to the personal information page
messages.add_message (request, messages.SUCCESS, 'Personal information updated successfully!')
return redirect ('oauth:profile')
else:
# Return empty form instead of POST request
form = ProfileForm (instance = request.user)
return render (request, 'oauth/change_profile.html', context = {'form': form}) | StarcoderdataPython |
1604806 | <reponame>Tejas-Nanaware/Learning-OpenCV<filename>haar cascades/own haar cascade/get files.py
import urllib.request
import cv2
import numpy as np
import os
print("Hi")
def store_raw_images():
print("getting url")
# neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n00007846'
# neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'
neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n00523513'
print("Got URL")
neg_image_urls = urllib.request.urlopen(neg_images_link).read().decode()
pic_num = 1
print(pic_num)
if not os.path.exists('neg'):
os.makedirs('neg')
print("made dir")
for i in neg_image_urls.split('\n'):
try:
print(str(pic_num) + "\t" + i)
urllib.request.urlretrieve(i, "neg/"+str(pic_num)+".jpg")
img = cv2.imread("neg/"+str(pic_num)+".jpg",cv2.IMREAD_GRAYSCALE)
# should be larger than samples / pos pic (so we can place our image on it)
resized_image = cv2.resize(img, (500, 500))
cv2.imwrite("neg/"+str(pic_num)+".jpg",resized_image)
pic_num += 1
except Exception as e:
print(str(e))
store_raw_images() | StarcoderdataPython |
3380332 | <filename>tests/test_validator.py<gh_stars>10-100
import yaml
from dbd.db.db_schema import DbSchema
def test_schema_validation():
with open('./tests/fixtures/schemas/schema1.yaml', 'r') as f:
code = yaml.safe_load(f.read())
result, errors = DbSchema.validate_code(code)
assert result
with open('./tests/fixtures/schemas/schema2.yaml', 'r') as f:
code = yaml.safe_load(f.read())
result, errors = DbSchema.validate_code(code)
assert not result
with open('./tests/fixtures/schemas/schema3.yaml', 'r') as f:
code = yaml.safe_load(f.read())
result, errors = DbSchema.validate_code(code)
assert not result
with open('./tests/fixtures/schemas/schema4.yaml', 'r') as f:
code = yaml.safe_load(f.read())
result, errors = DbSchema.validate_code(code)
assert not result
with open('./tests/fixtures/schemas/schema5.yaml', 'r') as f:
code = yaml.safe_load(f.read())
result, errors = DbSchema.validate_code(code)
assert not result
| StarcoderdataPython |
3255387 | <reponame>PDA-UR/DIPPID-py<filename>DIPPID.py
import sys
import json
from threading import Thread
from time import sleep
from datetime import datetime
import signal
# those modules are imported dynamically during runtime
# they are imported only if the corresponding class is used
#import socket
#import serial
#import wiimote
class Sensor():
# class variable that stores all instances of Sensor
instances = []
def __init__(self):
# list of strings which represent capabilites, such as 'buttons' or 'accelerometer'
self._capabilities = []
# for each capability, store a list of callback functions
self._callbacks = {}
# for each capability, store the last value as an object
self._data = {}
self._receiving = False
Sensor.instances.append(self)
# stops the loop in _receive() and kills the thread
# so the program can terminate smoothly
def disconnect(self):
self._receiving = False
Sensor.instances.remove(self)
if self._connection_thread:
self._connection_thread.join()
# runs as a thread
# receives json formatted data from sensor,
# stores it and notifies callbacks
def _update(self, data):
try:
data_json = json.loads(data)
except json.decoder.JSONDecodeError:
# incomplete data
return
for key, value in data_json.items():
self._add_capability(key)
# do not notify callbacks on initialization
if self._data[key] == []:
self._data[key] = value
continue
# notify callbacks only if data has changed
if self._data[key] != value:
self._data[key] = value
self._notify_callbacks(key)
# checks if capability is available
def has_capability(self, key):
return key in self._capabilities
def _add_capability(self, key):
if not self.has_capability(key):
self._capabilities.append(key)
self._callbacks[key] = []
self._data[key] = []
# returns a list of all current capabilities
def get_capabilities(self):
return self._capabilities
# get last value for specified capability
def get_value(self, key):
try:
return self._data[key]
except KeyError:
# notification when trying to get values for a non-existent capability
#raise KeyError(f'"{key}" is not a capability of this sensor.')
return None
# register a callback function for a change in specified capability
def register_callback(self, key, func):
self._add_capability(key)
self._callbacks[key].append(func)
# remove already registered callback function for specified capability
def unregister_callback(self, key, func):
if key in self._callbacks:
self._callbacks[key].remove(func)
return True
else:
# in case somebody wants to check if the callback was present before
return False
def _notify_callbacks(self, key):
for func in self._callbacks[key]:
func(self._data[key])
# sensor connected via WiFi/UDP
# initialized with a UDP port
# listens to all IPs by default
# requires the socket module
class SensorUDP(Sensor):
def __init__(self, port, ip='0.0.0.0'):
Sensor.__init__(self)
self._ip = ip
self._port = port
self._connect()
def _connect(self):
import socket
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.bind((self._ip, self._port))
self._connection_thread = Thread(target=self._receive)
self._connection_thread.start()
def _receive(self):
self._receiving = True
while self._receiving:
data, addr = self._sock.recvfrom(1024)
try:
data_decoded = data.decode()
except UnicodeDecodeError:
continue
self._update(data_decoded)
# sensor connected via serial connection (USB)
# initialized with a path to a TTY (e.g. /dev/ttyUSB0)
# default baudrate is 115200
# requires pyserial
class SensorSerial(Sensor):
def __init__(self, tty, baudrate=115200):
Sensor.__init__(self)
self._tty = tty
self._baudrate = baudrate
self._connect()
def _connect(self):
import serial
self._serial = serial.Serial(self._tty)
self._serial.baudrate = self._baudrate
self._connection_thread = Thread(target=self._receive)
self._connection_thread.start()
def _receive(self):
self._receiving = True
try:
while self._receiving:
data = self._serial.readline()
try:
data_decoded = data.decode()
except UnicodeDecodeError:
continue
self._update(data)
except:
# connection lost, try again
self._connect()
# uses a Nintendo Wiimote as a sensor (connected via Bluetooth)
# initialized with a Bluetooth address
# requires wiimote.py (https://github.com/RaphaelWimmer/wiimote.py)
# and pybluez
class SensorWiimote(Sensor):
def __init__(self, btaddr):
Sensor.__init__(self)
self._btaddr = btaddr
self._connect()
def _connect(self):
import wiimote
self._wiimote = wiimote.connect(self._btaddr)
self._connection_thread = Thread(target=self._receive)
self._connection_thread.start()
def _receive(self):
self._receiving = True
buttons = self._wiimote.buttons.BUTTONS.keys()
while self._receiving:
x = self._wiimote.accelerometer[0]
y = self._wiimote.accelerometer[1]
z = self._wiimote.accelerometer[2]
data_string = f'{{"x":{x},"y":{y},"z":{z}}}'
self._update('accelerometer', data_string)
for button in buttons:
state = int(self._wiimote.buttons[button])
self._update(f'button_' + button.lower(), state)
sleep(0.001)
def _update(self, key, value):
self._add_capability(key)
# do not notify callbacks on initialization
if self._data[key] == []:
self._data[key] = value
return
# notify callbacks only if data has changed
if self._data[key] != value:
self._data[key] = value
self._notify_callbacks(key)
# close the program softly when ctrl+c is pressed
def handle_interrupt_signal(signal, frame):
for sensor in Sensor.instances:
sensor.disconnect()
sys.exit(0)
signal.signal(signal.SIGINT, handle_interrupt_signal)
| StarcoderdataPython |
3312554 | import time
import os
import sys
from pathlib import Path
import numpy as nump
import pandas as panda
import uuid
import csv
import inspect
import re
import platform
import requests
import json
from datetime import datetime
from tir.technologies.core.config import ConfigLoader
from tir.technologies.core.logging_config import logger
class Log:
"""
This class is instantiated to create the log file and to append the results and failures to it.
Usage:
>>> # Instanted inside base.py:
>>> self.log = Log()
"""
def __init__(self, suite_datetime="", user="", station="", program="", program_date=("19800101"), version="", release="", database="", issue="", execution_id="", country="", folder="", test_type="TIR", config_path=""):
self.timestamp = time.strftime("%Y%m%d%H%M%S")
today = datetime.today()
self.config = ConfigLoader(config_path)
self.user = user
self.station = station
self.program = program
self.program_date = program_date
self.version = version
self.release = release
self.database = database
self.initial_time = datetime.today()
self.testcase_initial_time = datetime.today()
self.seconds = 0
self.testcase_seconds = 0
self.suite_datetime = suite_datetime
self.table_rows = []
self.test_case_log = []
self.csv_log = []
self.invalid_fields = []
self.table_rows.append(self.generate_header())
self.folder = folder
self.test_type = test_type
self.issue = self.config.issue
self.execution_id = self.config.execution_id
self.country = country
self.start_time = None
self.end_time = None
self.ct_method = ""
self.ct_number = ""
self.so_type = platform.system()
self.so_version = f"{self.so_type} {platform.release()}"
self.build_version = ""
self.lib_version = ""
self.webapp_version = ""
self.date = today.strftime('%Y%m%d')
self.hour = today.strftime('%H:%M:%S')
self.last_exec = today.strftime('%Y%m%d%H%M%S%f')[:-3]
self.hash_exec = ""
self.test_case = self.list_of_testcases()
self.finish_testcase = []
def generate_header(self):
"""
Generates the header line on the log file.
Usage:
>>> # Calling the method:
>>> self.log.generate_header()
"""
return ['Data','Usuário','Estação','Programa','Data Programa','Total CTs','Passou','Falhou', 'Segundos','Versão','Release', 'CTs Falhou', 'Banco de dados','Chamado','ID Execução','Pais', "Tipo de Teste"]
def new_line(self, result, message):
"""
Appends a new line with data on log file.
:param result: The result of the case.
:type result: bool
:param message: The message to be logged..
:type message: str
Usage:
>>> # Calling the method:
>>> self.log.new_line(True, "Success")
"""
line = []
total_cts = 1
passed = 1 if result else 0
failed = 0 if result else 1
printable_message = self.printable_message(message)
if not self.suite_datetime:
self.suite_datetime = time.strftime("%d/%m/%Y %X")
if self.get_testcase_stack() not in self.test_case_log:
line.extend([self.suite_datetime, self.user, self.station, self.program, self.program_date, total_cts, passed, failed, self.seconds, self.version, self.release, printable_message, self.database, self.issue, self.execution_id, self.country, self.test_type])
self.table_rows.append(line)
self.test_case_log.append(self.get_testcase_stack())
def save_file(self):
"""
Writes the log file to the file system.
Usage:
>>> # Calling the method:
>>> self.log.save_file()
"""
log_file = f"{self.user}_{uuid.uuid4().hex}_auto.csv"
if len(self.table_rows) > 0:
try:
if self.folder:
path = Path(self.folder, self.station+"_v6")
os.makedirs(path)
else:
path = Path("Log", self.station)
os.makedirs(path)
except OSError:
pass
if self.config.smart_test:
open("log_exec_file.txt", "w")
if ((len(self.table_rows[1:]) == len(self.test_case) and self.get_testcase_stack() not in self.csv_log) or (self.get_testcase_stack() == "setUpClass") and self.checks_empty_line()) :
with open( Path(path, log_file), mode="w", newline="", encoding="windows-1252") as csv_file:
csv_writer_header = csv.writer(csv_file, delimiter=';', quoting=csv.QUOTE_NONE)
csv_writer_header.writerow(self.table_rows[0])
csv_writer = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
for line in self.table_rows[1:]:
csv_writer.writerow(line)
logger().debug(f"Log file created successfully: {os.path.join(path, log_file)}")
self.csv_log.append(self.get_testcase_stack())
def set_seconds(self, initial_time):
"""
Sets the seconds variable through a calculation of current time minus the execution start time.
Usage:
>>> # Calling the method:
>>> self.log.set_seconds()
"""
delta = datetime.today() - initial_time
return round(delta.total_seconds(), 2)
def list_of_testcases(self):
"""
Returns a list of test cases from suite
"""
runner = next(iter(list(filter(lambda x: "runner.py" in x.filename, inspect.stack()))), None)
if runner:
try:
return list(filter(lambda x: x is not None, list(runner.frame.f_locals['test']._tests)))
except KeyError:
return []
else:
return []
def get_testcase_stack(self):
"""
Returns a string with the current testcase name
[Internal]
"""
return next(iter(list(map(lambda x: x.function, filter(lambda x: re.search('setUpClass', x.function) or re.search('test_', x.function), inspect.stack())))), None)
def checks_empty_line(self):
"""
Checks if the log file is not empty.
03 - 'Programa' 10 - 'Release' 14 - 'ID Execução' 15 - 'Pais'
[Internal]
"""
table_rows_has_line = False
if self.table_rows[1][3] == '':
self.table_rows[1][3] = 'NO PROGRAM'
if self.table_rows[1][10] == '':
self.table_rows[1][10] = '12.1.27'
if self.table_rows[1][15] == '':
self.table_rows[1][15] = 'BRA'
if self.table_rows[1][11] == '':
self.table_rows[1][11] = 'TIMEOUT'
if len(self.table_rows) > 1:
for x in [ 3, 10, 15 ]:
if (self.table_rows[1][x]):
table_rows_has_line = True
else:
table_rows_has_line = False
break
if self.config.smart_test and self.table_rows[1][14] and table_rows_has_line:
table_rows_has_line = True
elif self.config.smart_test:
table_rows_has_line = False
return table_rows_has_line
def generate_result(self, result, message):
"""
Generate a result of testcase and export to a json.
:param result: The result of the case.
:type result: bool
:param message: The message to be logged..
:type message: str
Usage:
>>> # Calling the method:
>>> self.log.generate_result(True, "Success")
"""
printable_message = self.printable_message(message)
if not self.suite_datetime:
self.suite_datetime = time.strftime("%d/%m/%Y %X")
self.generate_json(self.generate_dict(result, printable_message))
def get_file_name(self, file_name):
"""
Returns a Testsuite name
"""
testsuite_stack = next(iter(list(filter(lambda x: file_name in x.filename.lower(), inspect.stack()))), None)
if testsuite_stack:
if '/' in testsuite_stack.filename:
split_character = '/'
else:
split_character = '\\'
return testsuite_stack.filename.split(split_character)[-1].split(".")[0]
else:
return ""
def generate_dict(self, result, message):
"""
Returns a dictionary with the log information
"""
log_version = "20200814"
dict_key = {
"APPVERSION": self.build_version,
"CLIVERSION": self.webapp_version,
"COUNTRY": self.country,
"CTMETHOD": self.ct_method,
"CTNUMBER": self.ct_number,
"DBACCESS": "",
"DBTYPE": self.database,
"DBVERSION": "",
"EXECDATE": self.date,
"EXECTIME": self.hour,
"FAIL": 0 if result else 1,
"FAILMSG": message,
"IDENTI": self.issue,
"IDEXEC": self.config.execution_id,
"LASTEXEC": self.last_exec,
"LIBVERSION": self.lib_version,
"OBSERV": "",
"PASS": 1 if result else 0,
"PROGDATE": self.program_date,
"PROGRAM": self.program,
"PROGTIME": "00:00:00",
"RELEASE": self.release,
"SECONDSCT": self.testcase_seconds,
"SOTYPE": self.so_type,
"SOVERSION": self.so_version,
"STATION": self.station,
"STATUS": "", # ???
"TESTCASE": self.get_file_name('testcase'),
"TESTSUITE": self.get_file_name('testsuite'),
"TESTTYPE": "1",
"TOKEN": "TI<PASSWORD>", # ???
"TOOL": self.test_type,
"USRNAME": self.user,
"VERSION": self.version
}
return dict_key
def generate_json(self, dictionary):
"""
"""
server_address1 = self.config.logurl1
server_address2 = self.config.logurl2
success = False
data = dictionary
json_data = json.dumps(data)
endtime = time.time() + 120
while (time.time() < endtime and not success):
success = self.send_request(server_address1, json_data)
if not success:
success = self.send_request(server_address2, json_data)
time.sleep(10)
if not success:
self.save_json_file(json_data)
def send_request(self, server_address, json_data):
"""
Send a post request to server
"""
success = False
response = None
headers = {'content-type': 'application/json'}
try:
response = requests.post(server_address.strip(), data=json_data, headers=headers)
except:
pass
if response is not None:
if response.status_code == 200:
logger().debug("Log de execucao enviado com sucesso!")
success = True
elif response.status_code == 201 or response.status_code == 204:
logger().debug("Log de execucao enviado com sucesso!")
success = True
else:
self.save_response_log(response, server_address, json_data)
return False
else:
return False
return success
def save_response_log(self, response, server_address, json_data):
"""
"""
today = datetime.today()
try:
path = Path(self.folder, "new_log", self.station)
os.makedirs(path)
except OSError:
pass
try:
with open( Path(path, "response_log.csv"), mode="a", encoding="utf-8", newline='') as response_log:
csv_write = csv.writer(response_log, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_write.writerow([f"Time: {today.strftime('%Y%m%d%H%M%S%f')[:-3]}", f"URL: {server_address}", f"CT: {json.loads(json_data)['CTMETHOD']}",
{f"Status Code: {response.status_code}"}, f"Message: {response.text}"])
except:
pass
def save_json_file(self, json_data):
"""
Writes the log file to the file system.
Usage:
>>> # Calling the method:
>>> self.log.save_json_file()
"""
try:
if self.folder:
path = Path(self.folder, "new_log", self.station)
os.makedirs(path)
else:
path = Path("Log", self.station)
os.makedirs(path)
except OSError:
pass
log_file = f"{self.user}_{uuid.uuid4().hex}.json"
if self.config.smart_test:
open("log_exec_file.txt", "w")
with open( Path(path, log_file), mode="w", encoding="utf-8") as json_file:
json_file.write(json_data)
logger().debug(f"Log file created successfully: {Path(path, log_file)}")
def ident_test(self):
"""
:return:
"""
ct_method = self.get_testcase_stack()
ct_number = ''.join(list(filter(str.isdigit, f"{ct_method.split('_')[-1]}"))) if ct_method else ""
return (ct_method, ct_number)
def take_screenshot_log(self, driver, stack_item="", test_number=""):
"""
[Internal]
Takes a screenshot and saves on the log screenshot folder defined in config.
:param driver: The selenium driver.
:type: Selenium Driver
:param stack_item: test case stack
:type: str
:param test_number: test case number
:type: str
Usage:
>>> # Calling the method:
>>> self.log.take_screenshot_log()
"""
if not stack_item:
stack_item = self.get_testcase_stack()
if stack_item == "setUpClass":
stack_item = self.get_file_name("testsuite")
if not test_number:
test_number = f"{stack_item.split('_')[-1]} -" if stack_item else ""
if not self.release:
self.release = self.config.release
testsuite = self.get_file_name("testsuite")
today = datetime.today()
if self.search_stack("log_error"):
screenshot_file = self.screenshot_file_name("error", stack_item)
elif self.search_stack("CheckResult"):
screenshot_file = self.screenshot_file_name("CheckResult_result_divergence", stack_item)
else:
screenshot_file = self.screenshot_file_name(stack_item)
if self.config.debug_log:
logger().debug(f"take_screenshot_log in:{datetime.now()}\n")
try:
if self.config.log_http:
folder_path = Path(self.config.log_http, self.config.country, self.release, self.config.issue, self.config.execution_id, testsuite)
path = Path(folder_path, screenshot_file)
os.makedirs(Path(folder_path))
else:
path = Path("Log", self.station, screenshot_file)
os.makedirs(Path("Log", self.station))
except OSError:
pass
try:
driver.save_screenshot(str(path))
logger().debug(f"Screenshot file created successfully: {path}")
except Exception as e:
logger().exception(f"Warning Log Error save_screenshot exception {str(e)}")
def screenshot_file_name(self, description="", stack_item=""):
"""
:param name:
:return:
"""
today = datetime.today()
if description:
return f"{self.user}_{today.strftime('%Y%m%d%H%M%S%f')[:-3]}_{stack_item}_{description}.png"
else:
return f"{self.user}_{today.strftime('%Y%m%d%H%M%S%f')[:-3]}_{stack_item}.png"
def printable_message(self, string):
"""
:param string:
:return:
"""
return re.sub(';', ',', ''.join(filter(lambda x: x.isprintable(), string))[:600])
def search_stack(self, function):
"""
Returns True if passed function is present in the call stack.
:param function: Name of the function
:type function: str
:return: Boolean if passed function is present or not in the call stack.
:rtype: bool
Usage:
>>> # Calling the method:
>>> is_present = self.search_stack("MATA020")
"""
return len(list(filter(lambda x: x.function == function, inspect.stack()))) > 0
| StarcoderdataPython |
3249920 | <reponame>alekseystryukov/quarterly_report<gh_stars>0
from django.shortcuts import render, get_object_or_404
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Q
from companies.models import Company
import json
def index(request):
context = {
"companies": None,
"search": request.GET.get('search', '')
}
if context["search"]:
context["companies"] = Company.objects.filter(
Q(cik__icontains=context["search"]) |
Q(symbol__icontains=context["search"]) |
Q(name__icontains=context["search"])
)
return render(request, 'index.html', context)
def detail(request, cik):
company = get_object_or_404(Company, cik=cik)
data = json.dumps(
list(
company.reports.all().values(
"date", "shares", "revenue", "cost_of_revenue",
"net_income", "convertible_notes", "equity"
)
),
cls=DjangoJSONEncoder
)
return render(request, 'detail.html', {'company': company, 'chart_data': data})
| StarcoderdataPython |
3326577 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
def test():
one = awkward1.Array([999, 123, 1, 2, 3, 4, 5])
two = awkward1.Array([999])[:0]
three = awkward1.Array([])
assert awkward1.to_list(one[[None, None]]) == [None, None]
assert awkward1.to_list(one[[None, 0, None]]) == [None, 999, None]
assert awkward1.to_list(two[[None, None]]) == [None, None]
assert awkward1.to_list(two[[None, None, None]]) == [None, None, None]
assert awkward1.to_list(three[[None, None]]) == [None, None]
assert awkward1.to_list(three[[None, None, None]]) == [None, None, None]
array = awkward1.Array([[[0, 1, 2], []], [[], [3, 4]], [[5], [6, 7, 8, 9]]])
assert awkward1.to_list(array[:, [None, 1, None]]) == [[None, [], None], [None, [3, 4], None], [None, [6, 7, 8, 9], None]]
assert awkward1.to_list(array[:2, [None, 1, None]]) == [[None, [], None], [None, [3, 4], None]]
assert awkward1.to_list(array[1:, [None, 1, None]]) == [[None, [3, 4], None], [None, [6, 7, 8, 9], None]]
assert awkward1.to_list(array[:0, [None, 1, None]]) == []
| StarcoderdataPython |
128466 | <gh_stars>0
import sys
import pickle
import json
import os
import math
import networkx as nx
from collections import defaultdict
from net_init import load_network
from net_init import generate_random_outs_conns_with_oracle as gen_rand_outs_with_oracle
from network.sparse_table import SparseTable
from network.communicator import Communicator
from network import comm_network
from network.oracle import SimpleOracle
from sec_hop.selector import Selector
from mat_complete.mat_comp_solver import construct_table
import random
import numpy as np
class Experiment:
def __init__(self, topo, in_lim, out_lim, name, num_keep, num_2hop, num_rand, num_epoch, adapts, num_msg, churn_rate):
self.in_lim = in_lim
self.out_lim = out_lim
self.num_out = out_lim
self.outdir = os.path.dirname(name)
self.loc, self.ld, self.roles, self.proc_delay, self.pub_prob = load_network(topo)
self.num_node = len(self.loc)
self.num_epoch = num_epoch
self.num_msg = num_msg
self.churn_rate = churn_rate
self.selectors = {i: Selector(i, num_keep, num_rand, num_msg, self.num_node)
for i in range(self.num_node)}
# elf.num_cand = num_node # could be less if num_node is large
self.snapshots = []
# self.pools = Pool(processes=num_thread)
self.directions = ['incoming', 'outgoing', 'bidirect']
self.nodes = {i: Communicator(i, self.proc_delay[i], in_lim, out_lim, [])
for i in range(self.num_node)}
self.oracle = SimpleOracle(in_lim, out_lim, self.num_node)
self.out_hist = []
self.sparse_tables = {i: SparseTable(i) for i in range(self.num_node)}
# self.conns_snapshot = []
# self.broad_nodes = [] # hist of broadcasting node
# self.timer = time.time()
# self.pubs = [k for k,v in self.roles.items() if v=='PUB']
self.adapts = adapts
self.pub_hist = []
self.dists_hist = defaultdict(list)
self.dist_file = name
# log setting
# self.use_logger = use_logger
# self.logdir = self.outdir + '/' + 'logs'
# if not os.path.exists(self.logdir):
# os.makedirs(self.logdir)
# self.loggers = {}
# self.init_logger()
self.init_graph_conn = os.path.join(self.outdir, 'init.json')
self.snapshot_dir = os.path.join(self.outdir, 'snapshots')
self.snapshot_exploit_dir = os.path.join(self.outdir, 'snapshots-exploit')
self.write_adapts_node(os.path.join(self.outdir, 'adapts'))
if not os.path.exists(self.snapshot_dir):
os.makedirs(self.snapshot_dir)
if not os.path.exists(self.snapshot_dir):
os.makedirs(self.snapshot_dir)
self.num_keep = num_keep
self.num_2hop = num_2hop
self.num_rand = num_rand
assert(num_keep + num_2hop + num_rand == self.out_lim)
def construct_graph(self):
G = nx.Graph()
for i, node in self.nodes.items():
for u in node.outs:
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
return G
def construct_exploit_graph(self, curr_outs):
G = nx.Graph()
for i, node in self.nodes.items():
out_peers = []
if i in self.adapts:
out_peers = curr_outs[i][:self.num_out-self.num_rand]
else:
out_peers = curr_outs[i]
for u in out_peers:
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
return G
def write_cost(self, outpath):
G = self.construct_graph()
with open(outpath, 'w') as w:
length = dict(nx.all_pairs_dijkstra_path_length(G))
for i in range(self.num_node):
for j in range(self.num_node):
cost = length[i][j] - self.proc_delay[i]/2.0 + self.proc_delay[j]/2.0
w.write(str(cost) + ' ')
w.write('\n')
def write_exploit_cost(self, outpath, curr_outs):
G = self.construct_exploit_graph(curr_outs)
with open(outpath, 'w') as w:
length = dict(nx.all_pairs_dijkstra_path_length(G))
for i in range(self.num_node):
for j in range(self.num_node):
cost = length[i][j] - self.proc_delay[i]/2.0 + self.proc_delay[j]/2.0
w.write(str(cost) + ' ')
w.write('\n')
def write_adapts_node(self, filename):
with open(filename, 'w') as w:
sorted_stars = sorted(self.adapts)
for star in sorted_stars:
w.write(str(star) + '\n')
def get_truth_distance(self, star_i, interested_peers, epoch):
# construct graph
G = nx.Graph()
for i, node in self.nodes.items():
if i == star_i:
for u in interested_peers:
# only connect interested edge from the interested node
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
else:
for u in node.outs:
# not connecting incoming edge to the interested node
if u != star_i:
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
dists = {} # key is the target pub, value is the best peer and length
pubs = [k for k,v in self.roles.items() if v=='PUB']
for m in pubs:
# the closest distance
length, path = nx.single_source_dijkstra(G, source=star_i, target=m, weight='weight')
assert(len(path)>=0)
topo_length = None
line_len = None
j = None
if len(path) == 1:
# itself
assert(star_i == m)
topo_length = 0
line_len = 0
j = star_i
else:
j = path[1]
topo_length = length - self.proc_delay[j]/2.0 + self.proc_delay[m]/2.0
line_len = self.ld[star_i][m] + self.proc_delay[m]
# line_len = (math.sqrt(
# (self.loc[star_i][0]-self.loc[m][0])**2+
# (self.loc[star_i][1]-self.loc[m][1])**2 ) +self.proc_delay[m])
dists[m] = (j, round(topo_length, 3), round(line_len, 3))
self.dists_hist[star_i].append((epoch, dists))
def save_dists_hist(self):
if self.dist_file == 'None':
return
with open(self.dist_file, 'wb') as w:
pickle.dump(self.dists_hist, w)
def write_init_graph(self):
with open(self.init_graph_conn, 'w') as w:
graph_json = []
for u in range(self.num_node):
node = self.nodes[u]
outs = sorted([int(i) for i in node.outs])
ins = sorted([int(i) for i in node.ins])
peer = {
'node': int(u),
'outs': outs,
'ins': ins
}
graph_json.append(peer)
json.dump(graph_json, w, indent=4)
def take_snapshot(self, epoch, curr_outs):
name = "epoch"+str(epoch)+".txt"
outpath = os.path.join(self.snapshot_dir, name)
self.write_cost(outpath)
outpath_exploit = os.path.join(self.snapshot_exploit_dir, name)
self.write_exploit_cost(outpath_exploit, curr_outs)
# def init_selectors(self, out_conns, in_conns):
# for u in range(self.num_node):
# # if smaller then it is adv
# if u in self.adversary.sybils:
# self.selectors[u] = Selector(u, True, out_conns[u], in_conns[u], None)
# else:
# self.selectors[u] = Selector(u, False, out_conns[u], in_conns[u], None)
def broadcast_msgs(self, num_msg):
time_tables = {i:defaultdict(list) for i in range(self.num_node)}
abs_time_tables = {i:defaultdict(list) for i in range(self.num_node)}
broads = []
pubs = []
probs = []
for k, v in self.pub_prob.items():
pubs.append(k)
probs.append(v)
for _ in range(num_msg):
# p = random.choice(self.pubs)
p = np.random.choice(pubs, size=1, replace=False, p=probs)[0]
self.pub_hist.append(p)
broads.append(p)
comm_network.broadcast_msg(
p,
self.nodes,
self.ld,
time_tables,
abs_time_tables
)
for i in range(self.num_node):
self.sparse_tables[i].append_time(abs_time_tables[i], num_msg, 'abs_time')
self.sparse_tables[i].append_time(time_tables[i], num_msg, 'rel_time')
return broads
def update_selectors(self, outs_conns, ins_conn):
for i in range(self.num_node):
self.selectors[i].update(outs_conns[i], ins_conn[i])
def get_curr_ins(self, curr_outs):
curr_ins = defaultdict(list)
for u in range(self.num_node):
for o in curr_outs[u]:
curr_ins[o].append(u)
return curr_ins
def setup_conn_graph(self, curr_outs):
curr_ins = self.get_curr_ins(curr_outs)
for u in range(self.num_node):
self.nodes[u].update_conns(curr_outs[u], curr_ins[u])
def run_2hop(self, adapt_i, curr_out, e):
slots = self.sparse_tables[adapt_i].table[-self.num_msg:]
incomplete_table,M,nM,max_time,ids,ids_direct = construct_table(slots, adapt_i, self.directions)
selected, rands = self.selectors[adapt_i].run(self.oracle, curr_out, ids, slots)
return selected + rands
def run(self):
curr_outs = gen_rand_outs_with_oracle(self.num_out, self.num_node, self.oracle)
self.oracle.check(curr_outs)
self.setup_conn_graph(curr_outs)
self.write_init_graph()
for e in range(self.num_epoch):
self.take_snapshot(e, curr_outs)
self.oracle.check(curr_outs)
ps = self.broadcast_msgs(self.num_msg)
churn_adapts = comm_network.get_network_churning_nodes(self.churn_rate, self.adapts)
for adapt_i in np.random.permutation(churn_adapts):
curr_outs[adapt_i] = self.run_2hop(adapt_i, curr_outs[adapt_i], e)
self.setup_conn_graph(curr_outs)
for adapt_i in self.adapts:
self.get_truth_distance(adapt_i, curr_outs[adapt_i][:self.num_keep], e)
self.save_dists_hist()
# while True:
# network_state.reset(self.num_node, self.in_lim)
# if num_snapshot == len(record_epochs):
# break
# if self.method == 'mc':
# outs_conns, start_mc = self.run_mc(max_epoch,record_epochs, num_msg, epoch, network_state)
# self.conns_snapshot.append(outs_conns)
# if epoch in record_epochs:
# self.take_snapshot(epoch)
# num_snapshot += 1
# elif self.method == '2hop':
# outs_conns = self.run_2hop(num_msg, epoch, network_state)
# self.conns_snapshot.append(outs_conns)
# if epoch in record_epochs:
# self.take_snapshot(epoch)
# num_snapshot += 1
# epoch += 1
# def select_nodes(nodes, ld, num_msg, selectors, oracle, update_nodes, time_tables, in_lim, out_lim, network_state, num_keep, num_2hop, num_random):
# outs_neighbors = {} # output container
# num_invalid_compose = 0
# # direct peers
# num_rand_1hop = 0
# for i in update_nodes:
# keep_candidates = list(nodes[i].outs | nodes[i].ins )
# composes = comb_subset.get_config(
# num_keep,
# keep_candidates,
# len(keep_candidates),
# network_state,
# i)
# num_invalid_compose += math.comb(len(keep_candidates), num_keep) - len(composes)
# if len(composes) == 0:
# peers = selectors[i].select_random_peers(nodes, num_keep, network_state)
# num_rand_1hop += 1
# # oracle needs to know the connection
# oracle.update_1_hop_peers(i, peers)
# outs_neighbors[i] = peers
# else:
# for compose in composes:
# if len(compose) != len(set(compose)):
# print('repeat in compose')
# print(i)
# print('composes', compose)
# print(keep_candidates)
# print('in', list(nodes[i].outs))
# print('out', list(nodes[i].ins))
# sys.exit(1)
# peers = selectors[i].select_1hops(time_tables[i], composes, num_msg, network_state)
# # oracle needs to know the connection
# oracle.update_1_hop_peers(i, peers)
# outs_neighbors[i] = peers
# num_added_2hop = 0
# num_added_3hop = 0
# num_added_random = 0
# tot_not_seen = 0
# random.shuffle(update_nodes)
# # two hop peers
# if num_2hop > 0:
# for u in update_nodes:
# peers_info = oracle.get_multi_hop_info(u)
# peers, num_not_seen = selectors[u].select_peers(
# config.num_2_hop, nodes, peers_info.two_hops, network_state)
# oracle.update_2_hop_peers(u, peers)
# outs_neighbors[u] += peers
# num_added_2hop += len(peers)
# tot_not_seen += num_not_seen
# # add 3hops
# if out_lim - len(outs_neighbors[u]) > num_random:
# num_3_hop = out_lim - len(outs_neighbors[u]) - num_random
# peers_info = oracle.get_multi_hop_info(u)
# peers, num_not_seen = selectors[u].select_peers(num_3_hop, nodes, peers_info.three_hops, network_state)
# oracle.update_3_hop_peers(u, peers)
# outs_neighbors[u] += peers
# num_added_3hop += len(peers)
# tot_not_seen += num_not_seen
# # add random
# for u in update_nodes:
# num_random = out_lim - len(outs_neighbors[u])
# num_added_random += num_random
# peers = selectors[u].select_random_peers(nodes, num_random, network_state)
# for p in peers:
# if p in outs_neighbors[u]:
# print(p, 'in neigbors', outs_neighbors[u])
# sys.exit(1)
# outs_neighbors[u] += peers
# # debug
# for u in update_nodes:
# if len(set(outs_neighbors[u])) != out_lim:
# print(u, "has less out neighbors")
# print(outs_neighbors[u])
# print(selectors[u].desc_conn)
# sys.exit(1)
# print('num_rand_1hop', num_rand_1hop,'num_invalid_compose', num_invalid_compose )
# # print('Finish. num2hop', num_added_2hop, 'num3hop', num_added_3hop, 'num rand', num_added_random, 'num no seen', tot_not_seen)
# return outs_neighbors
| StarcoderdataPython |
3277730 | <reponame>SpiderOak/enkube<gh_stars>0
# Copyright 2018 SpiderOak, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
import unittest
from unittest.mock import patch, MagicMock, sentinel, call
import curio
from .util import AsyncTestCase, apatch, dummy_coro
from enkube.api.types import *
from enkube.api import client
class TestStreamIter(AsyncTestCase):
def setUp(self):
Kind.instances.clear()
class FooKind(Kind):
apiVersion = 'v1'
self.FooKind = FooKind
self.objects = [
{'foo': 1},
{'bar': 2},
{'baz': 3},
{"apiVersion": "v1", "kind": "FooKind", "spec": "foospec"},
]
class aiter:
async def __anext__(it):
try:
return self.chunks.pop(0)
except IndexError:
raise StopAsyncIteration() from None
async def close_coro():
pass
self.resp = MagicMock()
self.resp.body.__aiter__ = aiter
self.resp.body.close.side_effect = close_coro
async def kindify(obj):
try:
return Kind.getKind(obj['apiVersion'], obj['kind'])(obj)
except KeyError:
return obj
self.api = MagicMock(**{'_kindify.side_effect': kindify})
self.si = client.StreamIter(self.api, self.resp)
def test_iter(self):
self.chunks = [b'\n'.join(json.dumps(o).encode('utf-8') for o in self.objects)]
res = list(self.si)
self.assertEqual(res, self.objects)
self.assertTrue(isinstance(res[-1], self.FooKind))
def test_iter_with_trailing_newline(self):
self.chunks = [b'\n'.join(json.dumps(o).encode('utf-8') for o in self.objects) + b'\n']
res = list(self.si)
self.assertEqual(res, self.objects)
self.assertTrue(isinstance(res[-1], self.FooKind))
def test_iter_chunks(self):
s = b'\n'.join(json.dumps(o).encode('utf-8') for o in self.objects)
n = s.find(b'\n') + 3
self.chunks = [s[:n], s[n:]]
res = list(self.si)
self.assertEqual(res, self.objects)
self.assertTrue(isinstance(res[-1], self.FooKind))
def test_context_manager(self):
with self.si as ret:
pass
self.assertIs(ret, self.si)
self.resp.body.close.assert_called_once_with()
class TestApiClient(AsyncTestCase):
def setUp(self):
Kind.instances.clear()
self.api = client.ApiClient(MagicMock())
self.api.log = MagicMock()
self.api.session = MagicMock(**{'close.side_effect': dummy_coro})
async def test_close(self):
await self.api.close()
self.api.session.close.assert_called_once_with()
def test_context_manager(self):
with patch.object(self.api, 'close') as c:
c.side_effect = dummy_coro
with self.api:
pass
c.assert_called_once_with()
async def test_request(self):
resp = MagicMock(status_code=200, headers={'content-type': 'application/json'})
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
res = await self.api.request('GET', '/', foo='bar')
self.assertIs(res, resp.json.return_value)
self.api.session.request.assert_called_once_with(method='GET', path='/', foo='bar')
resp.json.assert_called_once_with()
async def test_request_non_json(self):
resp = MagicMock(status_code=200)
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
res = await self.api.request('GET', '/', foo='bar')
self.assertIs(res, resp.text)
self.api.session.request.assert_called_once_with(method='GET', path='/', foo='bar')
self.assertFalse(resp.json.called)
async def test_request_non_2xx_raises_apierror(self):
resp = MagicMock(status_code=500)
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
with self.assertRaises(client.ApiError) as err:
await self.api.request('GET', '/', foo='bar')
self.assertIs(err.exception.resp, resp)
async def test_request_non_2xx_raises_apierror_with_reason(self):
resp = MagicMock(status_code=500, headers={'content-type': 'application/json'})
resp.json.return_value = {'message': sentinel.reason}
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
with self.assertRaises(client.ApiError) as err:
await self.api.request('GET', '/', foo='bar')
self.assertIs(err.exception.resp, resp)
self.assertIs(err.exception.reason, sentinel.reason)
async def test_request_resource_not_found(self):
resp = MagicMock(status_code=404)
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
with self.assertRaises(client.ResourceNotFoundError) as err:
await self.api.request('GET', '/', foo='bar')
self.assertIs(err.exception.resp, resp)
@apatch('enkube.api.client.ApiClient.getKind')
async def test_request_kindtype(self, gk):
class FooKind(Kind):
apiVersion = 'v1'
async def gk_coro(*args, **kw):
return FooKind
gk.side_effect = gk_coro
resp = MagicMock(status_code=200, headers={'content-type': 'application/json'})
resp.json.return_value = {'apiVersion': 'v1', 'kind': 'FooKind', 'spec': 'foospec'}
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
res = await self.api.request('GET', '/')
self.assertEqual(res, resp.json.return_value)
gk.assert_called_once_with('v1', 'FooKind')
self.assertTrue(isinstance(res, FooKind))
@apatch('enkube.api.client.ApiClient.getKind')
async def test_request_kindtype_not_found(self, gk):
class FooKind(Kind):
apiVersion = 'v1'
async def gk_coro(*args, **kw):
raise client.ResourceKindNotFoundError()
gk.side_effect = gk_coro
resp = MagicMock(status_code=200, headers={'content-type': 'application/json'})
resp.json.return_value = {'apiVersion': 'v1', 'kind': 'FooKind', 'spec': 'foospec'}
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
res = await self.api.request('GET', '/')
self.assertEqual(res, resp.json.return_value)
gk.assert_called_once_with('v1', 'FooKind')
self.assertFalse(isinstance(res, FooKind))
@apatch('enkube.api.client.ApiClient.getKind')
async def test_request_kindtype_other_error(self, gk):
class FooKind(Kind):
apiVersion = 'v1'
exc = client.ApiError(resp=MagicMock(status_code=500))
async def gk_coro(*args, **kw):
raise exc
gk.side_effect = gk_coro
resp = MagicMock(status_code=200, headers={'content-type': 'application/json'})
resp.json.return_value = {'apiVersion': 'v1', 'kind': 'FooKind', 'spec': 'foospec'}
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
with self.assertRaises(client.ApiError) as err:
await self.api.request('GET', '/')
self.assertIs(err.exception, exc)
@apatch('enkube.api.client.StreamIter')
async def test_request_stream(self, si):
resp = MagicMock(status_code=200)
async def req_coro(*args, **kw):
return resp
self.api.session.request.side_effect = req_coro
res = await self.api.request('GET', '/', foo='bar', stream=True)
self.assertIs(res, si.return_value)
self.api.session.request.assert_called_once_with(
method='GET', path='/', foo='bar', stream=True)
si.assert_called_once_with(self.api, resp)
async def test_get_apiversion_v1(self):
v = {
'apiVersion': 'v1',
'kind': 'APIResourceList',
'groupVersion': 'v1',
'resources': [
{
'kind': 'FooKind',
'name': 'foos',
'singularName': '',
'namespaced': True,
'verbs': ['get', 'list'],
},
],
}
async def get_coro(*args, **kw):
return v
self.api.get = MagicMock(side_effect=get_coro)
res = await self.api._get_apiVersion('v1')
self.api.get.assert_called_once_with('/api/v1')
self.assertTrue(isinstance(res, APIResourceList))
self.assertEqual(res, v)
self.assertIs(res, self.api._apiVersion_cache['/api/v1'])
self.assertEqual(self.api._kind_cache['v1', 'FooKind'], v['resources'][0])
async def test_get_apiversion(self):
v = {
'apiVersion': 'v1',
'kind': 'APIResourceList',
'groupVersion': 'v1',
'resources': [
{
'kind': 'FooKind',
'name': 'foos',
'singularName': '',
'namespaced': True,
'verbs': ['get', 'list'],
},
],
}
async def get_coro(*args, **kw):
return v
self.api.get = MagicMock(side_effect=get_coro)
res = await self.api._get_apiVersion('apps/v1')
self.api.get.assert_called_once_with('/apis/apps/v1')
self.assertTrue(isinstance(res, APIResourceList))
self.assertEqual(res, v)
self.assertIs(res, self.api._apiVersion_cache['/apis/apps/v1'])
self.assertEqual(self.api._kind_cache['apps/v1', 'FooKind'], v['resources'][0])
async def test_get_apiversion_cached(self):
self.api.get = MagicMock()
self.api._apiVersion_cache['/apis/apps/v1'] = sentinel.result
res = await self.api._get_apiVersion('apps/v1')
self.api.get.assert_not_called()
self.assertIs(res, sentinel.result)
async def test_get_apiversion_apierror(self):
async def get_coro(*args, **kw):
raise client.ApiError(MagicMock(status_code=400))
self.api.get = MagicMock(side_effect=get_coro)
with self.assertRaises(client.ApiError) as err:
await self.api._get_apiVersion('apps/v1')
self.assertIs(err.exception.reason, None)
async def test_get_apiversion_not_found(self):
async def get_coro(*args, **kw):
raise client.ApiError(MagicMock(status_code=404))
self.api.get = MagicMock(side_effect=get_coro)
with self.assertRaises(client.ApiVersionNotFoundError) as err:
await self.api._get_apiVersion('apps/v1')
self.assertEqual(err.exception.reason, 'apiVersion not found')
async def test_get_resourcekind(self):
v = {
'apiVersion': 'v1',
'kind': 'APIResourceList',
'groupVersion': 'v1',
'resources': [
{
'kind': 'FooKind',
'name': 'foos',
'singularName': '',
'namespaced': True,
'verbs': ['get', 'list'],
},
],
}
async def get_coro(*args, **kw):
return v
self.api.get = MagicMock(side_effect=get_coro)
res = await self.api._get_resourceKind('apps/v1', 'FooKind')
self.api.get.assert_called_once_with('/apis/apps/v1')
self.assertEqual(res, v['resources'][0])
self.assertTrue(isinstance(res, APIResource))
async def test_get_resourcekind_not_found(self):
v = {
'apiVersion': 'v1',
'kind': 'APIResourceList',
'groupVersion': 'v1',
'resources': [
{
'kind': 'FooKind',
'name': 'foos',
'singularName': '',
'namespaced': True,
'verbs': ['get', 'list'],
},
],
}
async def get_coro(*args, **kw):
return v
self.api.get = MagicMock(side_effect=get_coro)
with self.assertRaises(client.ResourceKindNotFoundError) as err:
await self.api._get_resourceKind('apps/v1', 'BarKind')
self.assertEqual(err.exception.reason, 'resource kind not found')
async def test_get_resourcekind_ignores_subresources(self):
v = {
'apiVersion': 'v1',
'kind': 'APIResourceList',
'groupVersion': 'v1',
'resources': [
{
'kind': 'FooKind',
'name': 'foos/status',
'singularName': '',
'namespaced': True,
'verbs': ['get', 'list'],
},
],
}
async def get_coro(*args, **kw):
return v
self.api.get = MagicMock(side_effect=get_coro)
with self.assertRaises(client.ApiError) as err:
await self.api._get_resourceKind('apps/v1', 'FooKind')
self.assertEqual(err.exception.reason, 'resource kind not found')
@apatch('enkube.api.types.Kind.from_apiresource')
@apatch('enkube.api.client.ApiClient._get_resourceKind')
async def test_getkind(self, fa, gr):
async def gr_coro(*args, **kw):
return sentinel.rk
gr.side_effect = gr_coro
fa.return_value = sentinel.fookind
FooKind = await self.api.getKind('v1', 'FooKind')
self.assertIs(FooKind, sentinel.fookind)
fa.assert_called_once_with('v1', sentinel.rk)
gr.assert_called_once_with('v1', 'FooKind')
async def test_getkind_local_kind(self):
class FooKind(Kind):
apiVersion = 'v1'
res = await self.api.getKind('v1', 'FooKind')
self.assertIs(res, FooKind)
async def test_check_health_ok(self):
async def get_coro(path):
return 'ok'
self.api.get = MagicMock(side_effect=get_coro)
self.assertTrue(await self.api.check_health())
self.assertTrue(self.api.healthy.is_set())
self.api.get.assert_called_once_with('/healthz')
async def test_check_health_error(self):
await self.api.healthy.set()
async def get_coro(path):
raise client.ApiError()
self.api.get = MagicMock(side_effect=get_coro)
self.assertFalse(await self.api.check_health())
self.assertFalse(self.api.healthy.is_set())
async def test_check_health_gibberish(self):
await self.api.healthy.set()
async def get_coro(path):
return 'foo'
self.api.get = MagicMock(side_effect=get_coro)
self.assertFalse(await self.api.check_health())
self.assertFalse(self.api.healthy.is_set())
@apatch('curio.sleep')
async def test_wait_until_healthy(self, sleep):
sleep.side_effect = dummy_coro
responses = [client.ApiError(), 'ok']
async def get_coro(path):
return responses.pop(0)
self.api.get = MagicMock(side_effect=get_coro)
await self.api.wait_until_healthy()
self.assertEqual(responses, [])
sleep.assert_called_once_with(client.ApiClient._health_check_interval)
self.assertTrue(self.api.healthy.is_set())
async def test_ensure_object(self):
self.api.create = MagicMock(side_effect=dummy_coro)
await self.api.ensure_object(sentinel.obj)
self.api.create.assert_called_once_with(sentinel.obj)
async def test_ensure_object_ignores_conflict(self):
async def conflict_coro(*args, **kw):
raise client.ApiError(MagicMock(status_code=409))
self.api.create = MagicMock(side_effect=conflict_coro)
await self.api.ensure_object(sentinel.obj)
self.api.create.assert_called_once_with(sentinel.obj)
async def test_ensure_object_raises_non_conflict_errors(self):
async def conflict_coro(*args, **kw):
raise client.ApiError(MagicMock(status_code=500))
self.api.create = MagicMock(side_effect=conflict_coro)
with self.assertRaises(client.ApiError):
await self.api.ensure_object(sentinel.obj)
async def test_ensure_objects(self):
self.api.ensure_object = MagicMock(side_effect=dummy_coro)
await self.api.ensure_objects([sentinel.obj1, sentinel.obj2])
self.api.ensure_object.assert_has_calls([
call(sentinel.obj1),
call(sentinel.obj2),
])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
34433 | from datetime import datetime
import timebomb.models as models
def test_Notification():
notif = models.Notification("message")
assert notif.content == "message"
assert notif.read is False
assert str(notif) == "message"
def test_Player():
player = models.Player("name", "id")
assert player.name == "name"
assert player.id == "id"
assert player.team is None
assert player.hand is None
player = models.Player("name", "id", "team", ("A", "B"), "roomid")
assert player.name == "name"
assert player.id == "id"
assert player.team == "team"
assert player.hand == ("A", "B")
assert player.roomId == "roomid"
def test_Message():
now = datetime.now()
message = models.Message("player", "message")
assert message.player_name == "player"
assert message.content == "message"
assert message.timestamp and isinstance(message.timestamp, datetime)
assert str(message) == f"[{now:%H:%M}] player: message"
def test_Room():
player = models.Player("player", "player_id")
room = models.Room("room", "room_id", (player,))
assert room.name == "room" and room.id == "room_id"
assert len(room.players) == 1 and room.players[0] is player
assert room.cutter is None and room.winning_team is None and room.status == ""
assert isinstance(room.cards_found, dict) and isinstance(room.cards_left, dict)
assert not room.cards_found and not room.cards_left
def test_GameState():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
def test_GameState_new_message():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
message = state.new_message({"player": "player", "message": "test_message"})
assert len(state.messages) == 1 and state.messages[0] is message
assert message.player_name == "player" and message.content == "test_message"
for i in range(99):
last = state.new_message(
{"player": f"player{i}", "message": f"test_message{i}"}
)
assert len(state.messages) == 100
assert state.messages[0] is message and state.messages[99] is last
assert last.player_name == "player98" and last.content == "test_message98"
last = state.new_message({"player": "player99", "message": "test_message99"})
assert len(state.messages) == 100
assert state.messages[0] is not message and state.messages[99] is last
assert (
state.messages[0].player_name == "player0"
and state.messages[0].content == "test_message0"
)
assert last.player_name == "player99" and last.content == "test_message99"
res = state.new_message({"message": "test_message100"})
assert res is None
assert state.messages[99] is last
def test_GameState_new_notification():
state = models.GameState()
assert state.notification is None
notif1 = state.new_notification({"message": "notif1"})
assert state.notification is notif1 and notif1.content == "notif1"
notif2 = state.new_notification({"message": "notif2"})
assert state.notification is notif2 and notif2.content == "notif2"
notif3 = state.new_notification({"unknown": "notif2"})
assert notif3 is None and state.notification is notif2
def test_GameState_update_room():
state = models.GameState()
assert state.room is None
players_data = [{"name": "player1", "id": "id1"}]
room_data = {"name": "roomname", "id": "roomid", "players": players_data}
room = state.update_room(room_data)
assert state.room is room and room.name == "roomname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
new_data = {"name": "newname", "cutter": {"name": "cutter", "id": "cutterid"}}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
assert (
isinstance(room.cutter, models.Player)
and room.cutter.id == "cutterid"
and room.cutter.name == "cutter"
)
new_data = {
"players": [{"name": "player1", "id": "id1"}, {"name": "player2", "id": "id2"}]
}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 2
def test_GameState_update_me():
state = models.GameState()
assert state.me is None
player = state.update_me({"name": "player1", "id": "id1"})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand is None
player = state.update_me({"hand": ("A", "A", "B", "A")})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand == ("A", "A", "B", "A")
def test_GameState_reset():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
state.messages = ["m1", "m2"]
state.room = "Room"
state.me = "Me"
state.notification = "Notification"
state.reset()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
| StarcoderdataPython |
171545 | from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime, date
@login_manager.user_loader
def load_user(userName):
return User.query.get(str(userName))
class User(UserMixin, db.Model):
def get_id(self):
return (self.username)
__tablename__ = 'users'
username = db.Column(db.String(255),unique = True, primary_key = True)
firstname = db.Column(db.String(255))
secondname = db.Column(db.String(255))
email = db.Column(db.String(255), unique = True, index = True)
profile_picture = db.Column(db.String())
profile_bio = db.Column(db.String(255))
secured_password = db.Column(db.String(255))
pitches = db.relationship('Pitch', backref = 'moto', lazy = 'dynamic')
commentsByMe = db.relationship('PitchComment', backref = 'userzs', lazy = 'dynamic')
@property
def password(self):
raise AttributeError('You cannot view a users password')
@password.setter
def password(self, password):
self.secured_password = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.secured_password, password)
class Category(db.Model):
__tablename__ = 'cats'
id = db.Column(db.Integer, primary_key = True)
category_name = db.Column(db.String(255))
pitch = db.relationship('Pitch', backref = 'categ', lazy = "dynamic")
class Pitch(db.Model):
__tablename__ = 'pitch'
id = db.Column(db.Integer, primary_key = True)
pitch = db.Column(db.String)
categoryOfPitch = db.Column(db.Integer, db.ForeignKey("cats.id"))
date_posted = db.Column(db.DateTime, default = date.today)
user = db.Column(db.String, db.ForeignKey("users.username"))
upvote = db.Column(db.Integer, default = 0)
downvote = db.Column(db.Integer, default = 0)
comments = db.relationship('PitchComment', backref = 'pitch', lazy = "dynamic")
def save_pitch(self):
db.session.add(self)
db.session.commit()
def delete_pitch(self):
db.session.delete(self)
db.sesion.commit()
@classmethod
def pitch_by_id(cls, id):
pitches = Pitch.query.filter_by(id = id).first()
return pitches
@classmethod
def all_pitches(cls, inputUserName):
pitches = Pitch.query.filter_by(user = inputUserName).all()
return pitches
class PitchComment(db.Model):
__tablename__ = 'pitchcomments'
id = db.Column(db.Integer, primary_key = True)
pitch_id = db.Column(db.Integer, db.ForeignKey("pitch.id"))
comment = db.Column(db.String)
user = db.Column(db.String, db.ForeignKey("users.username"))
date_posted = db.Column(db.DateTime, default = date.today)
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
db.session.delete(self)
db.sesion.commit()
@classmethod
def all_comments(cls, inputUser):
comments = PitchComment.query.filter_by(user = inputUser).all()
return comments
| StarcoderdataPython |
3261075 | import unittest
import logging
from common import loginit
from mock import Mock, patch, mock_open
from sensors.temperature.ds18b20 import Ds18b20
class TempSensorTest(unittest.TestCase):
goodData = "93 01 4b 46 7f ff 0d 10 32 : crc=32 YES\n93 01 4b 46 7f ff 0d 10 32 t=25187"
badCrc = "93 01 4b 46 7f ff 0d 10 32 : crc=32 NO\n93 01 4b 46 7f ff 0d 10 32 t=25187"
zeroTemp = "93 01 4b 46 7f ff 0d 10 32 : crc=32 YES\n93 01 4b 46 7f ff 0d 10 32 t=0"
noTemp = "93 01 4b 46 7f ff 0d 10 32 : crc=32 YES\n93 01 4b 46 7f ff 0d 10 32 t="
invalidTemp = "93 01 4b 46 7f ff 0d 10 32 : crc=32 YES\n93 01 4b 46 7f ff 0d 10 32 t=dfhg"
empty = ""
name = "testSensorName"
@classmethod
def setUpClass(cls):
loginit.initTestLogging()
TempSensorTest.logger = logging.getLogger(__name__)
@patch('os.path.isfile')
def test_getDataC(self, osMock):
osMock.return_value = True
sensor = Ds18b20(TempSensorTest.name, 'C', "/file")
openMock = mock_open(read_data=TempSensorTest.goodData)
with patch('builtins.open', openMock) as mockFile:
self.assertEqual(sensor.getData(), 25.187)
@patch('os.path.isfile')
def test_getDataF(self, osMock):
osMock.return_value = True
sensor = Ds18b20(TempSensorTest.name, 'F', "/file")
openMock = mock_open(read_data=TempSensorTest.goodData)
with patch('builtins.open', openMock) as mockFile:
self.assertEqual(sensor.getData(), 77.3366)
@patch('os.path.isfile')
def test_getDataBad(self, osMock):
osMock.return_value = True
sensor = Ds18b20(TempSensorTest.name, 'Q', "/file")
openMock = mock_open(read_data=TempSensorTest.goodData)
with patch('builtins.open', openMock) as mockFile:
with self.assertRaises(ValueError):
sensor.getData()
| StarcoderdataPython |
154966 | """
Modified from https://github.com/facebookresearch/fvcore
"""
__all__ = ["Registry"]
class Registry:
"""A registry providing name -> object mapping, to support
custom modules.
To create a registry (e.g. a backbone registry):
.. code-block:: python
BACKBONE_REGISTRY = Registry('BACKBONE')
To register an object:
.. code-block:: python
@BACKBONE_REGISTRY.register()
class MyBackbone(nn.Module):
...
Or:
.. code-block:: python
BACKBONE_REGISTRY.register(MyBackbone)
"""
def __init__(self, name):
self._name = name
self._obj_map = dict()
def _do_register(self, name, obj, force=False):
if name in self._obj_map and not force:
raise KeyError(
'An object named "{}" was already '
'registered in "{}" registry'.format(name, self._name)
)
self._obj_map[name] = obj
def register(self, obj=None, force=False):
if obj is None:
# Used as a decorator
def wrapper(fn_or_class):
name = fn_or_class.__name__
self._do_register(name, fn_or_class, force=force)
return fn_or_class
return wrapper
# Used as a function call
name = obj.__name__
self._do_register(name, obj, force=force)
def get(self, name):
if name not in self._obj_map:
raise KeyError(
'Object name "{}" does not exist '
'in "{}" registry'.format(name, self._name)
)
return self._obj_map[name]
def registered_names(self):
return list(self._obj_map.keys())
| StarcoderdataPython |
99422 | <reponame>kotofey97/yatube_project_finale
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (PasswordChangeForm, PasswordResetForm,
SetPasswordForm, UserCreationForm)
User = get_user_model()
class CreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('first_name', 'last_name', 'username', 'email')
class PasswordChangingForm(PasswordChangeForm):
class Meta:
model = User
fields = ('old_password', 'new_<PASSWORD>', '<PASSWORD>')
class PasswordResForm(PasswordResetForm):
class Meta:
model = User
fields = ('email')
class PasswordResConfirmForm(SetPasswordForm):
class Meta:
model = User
fields = ('new_password1', 'new_<PASSWORD>')
| StarcoderdataPython |
1750387 | <reponame>fabric-testbed/core-api
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Model200OkPaginatedLinks(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, first: str=None, last: str=None, next: str=None, prev: str=None): # noqa: E501
"""Model200OkPaginatedLinks - a model defined in Swagger
:param first: The first of this Model200OkPaginatedLinks. # noqa: E501
:type first: str
:param last: The last of this Model200OkPaginatedLinks. # noqa: E501
:type last: str
:param next: The next of this Model200OkPaginatedLinks. # noqa: E501
:type next: str
:param prev: The prev of this Model200OkPaginatedLinks. # noqa: E501
:type prev: str
"""
self.swagger_types = {
'first': str,
'last': str,
'next': str,
'prev': str
}
self.attribute_map = {
'first': 'first',
'last': 'last',
'next': 'next',
'prev': 'prev'
}
self._first = first
self._last = last
self._next = next
self._prev = prev
@classmethod
def from_dict(cls, dikt) -> 'Model200OkPaginatedLinks':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The 200_ok_paginated_links of this Model200OkPaginatedLinks. # noqa: E501
:rtype: Model200OkPaginatedLinks
"""
return util.deserialize_model(dikt, cls)
@property
def first(self) -> str:
"""Gets the first of this Model200OkPaginatedLinks.
:return: The first of this Model200OkPaginatedLinks.
:rtype: str
"""
return self._first
@first.setter
def first(self, first: str):
"""Sets the first of this Model200OkPaginatedLinks.
:param first: The first of this Model200OkPaginatedLinks.
:type first: str
"""
self._first = first
@property
def last(self) -> str:
"""Gets the last of this Model200OkPaginatedLinks.
:return: The last of this Model200OkPaginatedLinks.
:rtype: str
"""
return self._last
@last.setter
def last(self, last: str):
"""Sets the last of this Model200OkPaginatedLinks.
:param last: The last of this Model200OkPaginatedLinks.
:type last: str
"""
self._last = last
@property
def next(self) -> str:
"""Gets the next of this Model200OkPaginatedLinks.
:return: The next of this Model200OkPaginatedLinks.
:rtype: str
"""
return self._next
@next.setter
def next(self, next: str):
"""Sets the next of this Model200OkPaginatedLinks.
:param next: The next of this Model200OkPaginatedLinks.
:type next: str
"""
self._next = next
@property
def prev(self) -> str:
"""Gets the prev of this Model200OkPaginatedLinks.
:return: The prev of this Model200OkPaginatedLinks.
:rtype: str
"""
return self._prev
@prev.setter
def prev(self, prev: str):
"""Sets the prev of this Model200OkPaginatedLinks.
:param prev: The prev of this Model200OkPaginatedLinks.
:type prev: str
"""
self._prev = prev
| StarcoderdataPython |
1706999 | <reponame>kaiwinut/hateyugemu
import argparse
def show_banner():
banner = """
=================================================\n\n
はぁって言うゲーム\n\n
=================================================\n\n
"""
print(banner)
def show_players(players):
player_list = "Players: "
for i, name in enumerate(players):
player_list += name
if not i == len(players) - 1:
player_list += " / "
player_list += "\n\n"
print(player_list)
def parse_user_option():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--newname', action='store_true', help='define new players for this game (new players will not be saved after the game)')
parser.add_argument('-s', '--skip', action='store_true', help='skip through the process where the assigned action of each player will be displayed on the screen in turns')
return parser.parse_args()
def print_topic(topic):
print("====================================", end="\n\n")
print("Topic:", topic['Topic'], end="\n\n")
print("Notes:", topic['Notes'], end="\n\n")
print("Act A:", topic['A'], end="\n\n")
print("Act B:", topic['B'], end="\n\n")
print("Act C:", topic['C'], end="\n\n")
print("Act D:", topic['D'], end="\n\n")
print("Act E:", topic['E'], end="\n\n")
print("Act F:", topic['F'], end="\n\n")
print("Act G:", topic['G'], end="\n\n")
print("Act H:", topic['H'], end="\n\n")
print("====================================", end="\n\n")
def get_topic_string(topic):
return f"""\n
=======================\n
テーマ: {topic['Topic']}\n
指示: {topic['Notes']}\n
A: {topic['A']}\n
B: {topic['B']}\n
C: {topic['C']}\n
D: {topic['D']}\n
E: {topic['E']}\n
F: {topic['F']}\n
G: {topic['G']}\n
H: {topic['H']}\n
=======================\n\n
""" | StarcoderdataPython |
3232622 |
import matplotlib.pyplot as plt
import pandas as pd
import pathlib
import sys
source = "res_floor.csv" if len(sys.argv) < 2 else sys.argv[1]
ds = pd.read_csv(source, index_col=None)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
# problem: tol was 0.001 but mean error is close to 0.015
ax1.boxplot((ds.true_scale - ds.pred_scale) / ds.true_scale)
ax1.set_title("Scale Relative Error\n(true - prediction) / true")
ax1.get_xaxis().set_visible(False)
ax2.boxplot(ds.true_y - ds.pred_y)
ax2.set_title("Coordinate (y) Error\n(true - prediction)")
ax2.get_xaxis().set_visible(False)
ax3.boxplot(ds.true_x - ds.pred_x)
ax3.set_title("Coordinate (x) Error\n(true - prediction)")
ax3.get_xaxis().set_visible(False)
fig.tight_layout()
fig.savefig(pathlib.Path(source).with_suffix(".png"))
| StarcoderdataPython |
4814313 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
from .. import base
from . import space
class Marker(base.OpticalObject):
annotate_as_space = False
def port_chain(self, p, pname):
bmap = {
"+A-t": (None, "+B"),
"+B-t": (None, "+A"),
}.get(pname, None)
if bmap is not None:
return bmap
return super().port_chain(p, pname)
@classmethod
def visit_port_information(cls, manip):
manip.gen_optical_port("+A", "A")
manip.gen_optical_port("+B", "B")
return
def visit_mode_matching_linkage(self, manip):
manip.add_link("B!i", "A!o", None)
manip.add_link("A!i", "B!o", None)
# def visit_mode_matching_transport(self, manip):
# length_m = manip.p['length[m]']
# #no need to check these since the space can only be called on proper
# #links and the two directions are identical
# #manip.lport_fr
# #manip.lport_to
# #the P-builders are for fast optimization solving
# def p_builderXY(p):
# length_m = p['length[m]']
# return matrix_stack([[1, length_m], [0, 1]])
# manip.set_XYpropagator(p_builderXY)
# manip.set_Zpropagator({'length[m]' : 1})
# matrix = p_builderXY(manip.p)
# def inc_builder(z):
# return matrix_stack([[1, z], [0, 1]])
# manip.set_XYincremental([
# (length_m, inc_builder, matrix)
# ])
# return
class MaterialMarker(space.Space):
annotate_as_space = False
# TODO, need to apply substrate to propagation
| StarcoderdataPython |
1676215 | <filename>python_liftbridge/python_liftbridge.py
from logging import getLogger
from logging import NullHandler
import python_liftbridge.api_pb2
from python_liftbridge.base import BaseClient
from python_liftbridge.errors import handle_rpc_errors, handle_rpc_errors_in_generator, ErrDeadlineExceeded, ErrChannelClosed
from python_liftbridge.message import Message # noqa: F401
from python_liftbridge.stream import Stream # noqa: F401
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class Lift(BaseClient):
def fetch_metadata(self):
# TODO
return self._fetch_metadata(self._fetch_metadata_request())
def subscribe(self, stream, timeout=None):
"""
Subscribe creates an ephemeral subscription for the given stream. It begins
receiving messages starting at the configured position and waits for new
messages when it reaches the end of the stream. The default start position
is the end of the stream. It returns an ErrNoSuchStream if the given stream
does not exist.
"""
logger.debug('Creating a new subscription to: %s' % stream)
try:
for message in self._subscribe(self._subscribe_request(stream), timeout):
yield message
except ErrDeadlineExceeded:
return
def create_stream(self, stream):
"""
CreateStream creates a new stream attached to a NATS subject. Subject is the
NATS subject the stream is attached to, and name is the stream identifier,
unique per subject. It returns ErrStreamExists if a stream with the given
subject and name already exists.
"""
logger.debug('Creating a new stream: %s' % stream)
return self._create_stream(self._create_stream_request(stream))
def delete_stream(self, stream):
"""
DeleteStream deletes a stream and all of its partitions. Name is the stream
identifier, globally unique.
"""
logger.debug('Delete stream: %s' % stream)
return self._delete_stream(self._delete_stream_request(stream))
def publish(self, message):
"""
Publish publishes a new message to the Liftbridge stream.
"""
logger.debug('Publishing a new message to the Liftbridge stream: %s' % message)
return self._publish(
self._create_publish_request(message._build_message()),
)
def publish_to_subject(self, message):
"""
Publish publishes a new message to the NATS subject.
"""
logger.debug('Publishing a new message to the NATS subject: %s' % message)
return self._publish_to_subject(
self._create_publish_to_subject_request(message._build_message()),
)
@handle_rpc_errors
def _fetch_metadata(self, metadata_request):
response = self.stub.FetchMetadata(metadata_request)
return response
@handle_rpc_errors_in_generator
def _subscribe(self, subscribe_request, timeout=None):
# The first message in a subscription tells us if the subscribe succeeded.
# From the docs:
# """When the subscription stream is created, the server sends an
# empty message to indicate the subscription was successfully created.
# Otherwise, an error is sent on the stream if the subscribe failed.
# This handshake message must be handled and should not be exposed
# to the user."""
subscription = self.stub.Subscribe(subscribe_request, timeout)
first_message = next(subscription)
if first_message.value:
raise ErrChannelClosed(first_message.value)
for message in subscription:
yield Message(
message.value,
message.subject,
offset=message.offset,
timestamp=message.timestamp,
key=message.key,
partition=message.partition
)
@handle_rpc_errors
def _create_stream(self, stream_request):
response = self.stub.CreateStream(stream_request)
return response
@handle_rpc_errors
def _delete_stream(self, stream_request):
response = self.stub.DeleteStream(stream_request)
return response
@handle_rpc_errors
def _publish(self, publish_request):
response = self.stub.Publish(publish_request)
return response
@handle_rpc_errors
def _publish_to_subject(self, publish_to_subject_request):
response = self.stub.PublishToSubject(publish_to_subject_request)
return response
def _fetch_metadata_request(self):
return python_liftbridge.api_pb2.FetchMetadataRequest()
def _create_stream_request(self, stream):
response = python_liftbridge.api_pb2.CreateStreamRequest(
subject=stream.subject,
name=stream.name,
group=stream.group,
replicationFactor=stream.replication_factor,
partitions=stream.partitions
)
return response
def _delete_stream_request(self, stream):
response = python_liftbridge.api_pb2.DeleteStreamRequest(
name=stream.name,
)
return response
def _subscribe_request(self, stream):
if stream.start_offset:
return python_liftbridge.api_pb2.SubscribeRequest(
stream=stream.name,
startPosition=stream.start_position,
startOffset=stream.start_offset,
partition=stream.subscribe_to_partition
)
elif stream.start_timestamp:
return python_liftbridge.api_pb2.SubscribeRequest(
stream=stream.name,
startPosition=stream.start_position,
startTimestamp=stream.start_timestamp,
partition=stream.subscribe_to_partition
)
else:
return python_liftbridge.api_pb2.SubscribeRequest(
stream=stream.name,
startPosition=stream.start_position,
partition=stream.subscribe_to_partition
)
def _create_publish_request(self, message):
return python_liftbridge.api_pb2.PublishRequest(
key=message.key,
value=message.value,
stream=message.stream,
headers=message.headers,
partition=message.partition,
ackInbox=message.ackInbox,
correlationId=message.correlationId,
ackPolicy=message.ackPolicy,
)
def _create_publish_to_subject_request(self, message):
return python_liftbridge.api_pb2.PublishToSubjectRequest(
key=message.key,
value=message.value,
subject=message.subject,
headers=message.headers,
ackInbox=message.ackInbox,
correlationId=message.correlationId,
ackPolicy=message.ackPolicy,
)
| StarcoderdataPython |
162166 | """Some utility functions for working with TfJobs."""
import datetime
import logging
import time
from kubernetes import client as k8s_client
from kubeflow.testing import util
GROUP = "argoproj.io"
VERSION = "v1alpha1"
PLURAL = "workflows"
KIND = "Workflow"
def log_status(workflow):
"""A callback to use with wait_for_workflow."""
logging.info("Workflow %s in namespace %s; phase=%s",
workflow["metadata"]["name"],
workflow["metadata"]["namespace"],
workflow["status"]["phase"])
def wait_for_workflows(client, namespace, names,
timeout=datetime.timedelta(minutes=30),
polling_interval=datetime.timedelta(seconds=30),
status_callback=None):
"""Wait for multiple workflows to finish.
Args:
client: K8s api client.
namespace: namespace for the workflow.
names: Names of the workflows to wait for.
timeout: How long to wait for the workflow.
polling_interval: How often to poll for the status of the workflow.
status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the job. Callable takes a single argument which
is the job.
Returns:
results: A list of the final status of the workflows.
Raises:
TimeoutError: If timeout waiting for the job to finish.
"""
crd_api = k8s_client.CustomObjectsApi(client)
end_time = datetime.datetime.now() + timeout
while True:
all_results = []
for n in names:
results = crd_api.get_namespaced_custom_object(
GROUP, VERSION, namespace, PLURAL, n)
all_results.append(results)
if status_callback:
status_callback(results)
done = True
for results in all_results:
if results["status"]["phase"] not in ["Failed", "Succeeded"]:
done = False
if done:
return all_results
if datetime.datetime.now() + polling_interval > end_time:
raise util.TimeoutError(
"Timeout waiting for workflows {0} in namespace {1} to finish.".format(
",".join(names), namespace))
time.sleep(polling_interval.seconds)
return []
def wait_for_workflow(client, namespace, name,
timeout=datetime.timedelta(minutes=30),
polling_interval=datetime.timedelta(seconds=30),
status_callback=None):
"""Wait for the specified workflow to finish.
Args:
client: K8s api client.
namespace: namespace for the workflow.
name: Name of the workflow
timeout: How long to wait for the workflow.
polling_interval: How often to poll for the status of the workflow.
status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the job. Callable takes a single argument which
is the job.
Raises:
TimeoutError: If timeout waiting for the job to finish.
"""
results = wait_for_workflows(client, namespace, [name],
timeout, polling_interval, status_callback)
return results[0]
| StarcoderdataPython |
3337058 | from typing import List, Optional, Type
from vaccine.base_application import BaseApplication
from vaccine.models import Message, User
class AppTester:
DEFAULT_USER_ADDRESS = "27820001001"
DEFAULT_CHANNEL_ADDRESS = "27820001002"
DEFAULT_TRANSPORT_NAME = "test_transport"
DEFAULT_TRANSPORT_TYPE = Message.TRANSPORT_TYPE.HTTP_API
DEFAULT_SESSION_ID = 1
def __init__(self, app_class: Type[BaseApplication]):
self.user = User(addr=self.DEFAULT_USER_ADDRESS)
self.application = app_class(self.user)
def setup_state(self, name: str):
"""
Sets the current state that the user is in
"""
self.user.state.name = name
def setup_answer(self, answer_name: str, answer_value: str):
"""
Sets an answer for the user
"""
self.user.answers[answer_name] = answer_value
def setup_user_address(self, address: str):
"""
Sets an address for the user
"""
self.user.addr = address
async def user_input(
self,
content: Optional[str] = None,
session=Message.SESSION_EVENT.RESUME,
transport_metadata: Optional[dict] = None,
):
"""
User input into the application
"""
self.application.messages = []
message = Message(
to_addr=self.DEFAULT_CHANNEL_ADDRESS,
from_addr=self.user.addr,
transport_name=self.DEFAULT_TRANSPORT_NAME,
transport_type=self.DEFAULT_TRANSPORT_TYPE,
content=content,
session_event=session,
transport_metadata=transport_metadata or {},
)
if session in (Message.SESSION_EVENT.RESUME, Message.SESSION_EVENT.CLOSE):
self.user.session_id = self.DEFAULT_SESSION_ID
await self.application.process_message(message)
def assert_state(self, name: Optional[str]):
"""
Asserts that the current user state matches `name`
"""
assert (
self.user.state.name == name
), f"User is in state {self.user.state.name}, not in {name}"
def assert_answer(self, answer_name: str, answer_value: str):
"""
Assert that a user's answer matches the given value
"""
assert answer_name in self.user.answers, f"{answer_name} not in user answers"
assert (
self.user.answers[answer_name] == answer_value
), f"{answer_name} is {self.user.answers[answer_name]}, not {answer_value}"
def assert_no_answer(self, answer_name: str):
"""
Assert that the user does not have a value stored for the answer
"""
assert (
self.user.answers.get(answer_name) is None
), f"{answer_name} has a value {self.user.answers[answer_name]}"
def assert_num_messages(self, num: int):
"""
Assert that the application sent a specific number of messages. Useful for if
we don't want to test the content of the messages
"""
assert (
len(self.application.messages) == num
), f"{len(self.application.messages)} messages sent, not {num}"
def assert_message(
self,
content: Optional[str] = None,
session: Optional[Message.SESSION_EVENT] = None,
buttons: Optional[List[str]] = None,
header: Optional[str] = None,
):
"""
Asserts that the application sent a single message, with the provided parameters
"""
self.assert_num_messages(1)
[message] = self.application.messages
if content is not None:
assert (
message.content == content
), f"Message content is {message.content}, not {content}"
if session is not None:
assert (
message.session_event == session
), f"Message session is {message.session_event}, not {session}"
if buttons is not None:
btns = message.helper_metadata.get("buttons")
assert btns == buttons, f"Buttons are {btns}, not {buttons}"
if header is not None:
hdr = message.helper_metadata.get("header")
assert hdr == header, f"Header is {hdr}, not {header}"
| StarcoderdataPython |
1757394 | # Given a sorted array containing only 0s and 1s, find the transition point. Transition point is where 0 ends and 1 begins
# https://www.geeksforgeeks.org/find-transition-point-binary-array/
# https://practice.geeksforgeeks.org/problems/find-transition-point-1587115620/1/
# time is O(logn) | Space is O(1)
def transitionPoint(arr, n):
lower_bound = 0
upper_bound = n-1
while (lower_bound <= upper_bound):
mid = (lower_bound + upper_bound) // 2
if (arr[mid] == 0):
lower_bound = mid + 1
elif arr[mid] == 1:
if (mid == 0 or (arr[mid - 1] == 0)):
return mid
upper_bound = mid - 1
return -1
if __name__=='__main__':
t=int(input())
for i in range(t):
n = int(input())
arr = list(map(int, input().strip().split()))
print(transitionPoint(arr, n))
| StarcoderdataPython |
167793 | <filename>bot/plugins/inline.py
# © its-leo-bitch
from bot import bot
from bot.utils import langs, lang_names
from pyrogram import types, errors
from piston import Piston
import asyncio
import time
piston = Piston()
execute = {}
NEXT_OFFSET = 25
@bot.on_inline_query()
async def inline_exec(client, query):
string = query.query
offset = int(query.offset or 0)
answers = []
if string == '':
for l in langs[offset: offset + NEXT_OFFSET]:
answers.append(
types.InlineQueryResultArticle(
title=l.language,
description=l.version or None,
input_message_content=types.InputTextMessageContent(
"**Language:** `{}`{}\nPress the button below to Execute your code:".format(
l.language,
'\n**Version:** `{}`'.format(l.version) or ''
)
),
reply_markup=types.InlineKeyboardMarkup(
[
[
types.InlineKeyboardButton(
'Execute',
switch_inline_query_current_chat=l.language + " "
)
]
]
)
)
)
elif string.split()[0] in lang_names:
if len(string.split()) == 1:
await client.answer_inline_query(
query.id,
results=answers,
switch_pm_text=f'Give a code to Excute in {string.split()[0]}',
switch_pm_parameter='help_inline',
)
return
source = string.split(None, 1)[1]
start_time = time.time()
for l in langs:
if string.split()[0] == l.language:
out = await piston.execute(
language=string.split()[0],
version=l.version,
source=source
)
try:
msg = f"**Language:** `{out.language}-{out.version}`\n\n**Code:**\n```{source}```\n\n"
if out.run:
msg += f"**Output:**\n```{out.run.output}```\n\n"
answers.append(
types.InlineQueryResultArticle(
"Output:",
description=out.run.stdout or out.run.stderr,
input_message_content=types.InputTextMessageContent(
msg,
parse_mode='markdown'
),
reply_markup=types.InlineKeyboardMarkup(
[
[
types.InlineKeyboardButton(
'stats',
callback_data=f'stats-{start_time}-{time.time()}'
)
],
[
types.InlineKeyboardButton(
'Fork',
switch_inline_query_current_chat=f'{out.language} {source}'
),
types.InlineKeyboardButton(
'Try Again',
switch_inline_query_current_chat=f'{out.language} '
),
]
]
)
)
)
execute[query.from_user.id] = True
except AttributeError as err:
answers.append(
types.InlineQueryResultArticle(
"Error",
description=str(err),
input_message_content=types.InputTextMessageContent(
str(err),
)
)
)
return await client.answer_inline_query(
query.id,
results=answers,
cache_time=0,
)
try:
await client.answer_inline_query(
query.id,
results=answers,
next_offset=str(offset + NEXT_OFFSET),
cache_time=0,
)
except errors.exceptions.bad_request_400.QueryIdInvalid:
return
| StarcoderdataPython |
1770838 | """Module tiktalik.connection"""
# Copyright (c) 2013 Techstorage sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding: utf8 -*-
import time
import http.client
import hmac
import base64
from urllib import parse
import json
import string
from hashlib import sha1, md5
from .error import TiktalikAPIError
class TiktalikAuthConnection:
"""
Simple wrapper for HTTPConnection. Adds authentication information to requests.
"""
def __init__(
self, api_key, api_secret_key, host="tiktalik.com", port=443, use_ssl=True
):
self.api_key = api_key
self.api_secret_key = api_secret_key
self.host = host
self.port = port
# backward compability: secret_key is known as a base64 string, but it's used
# internally as a binary decoded string. A long time ago this function as input
# needed secret key decoded to binary string, so now try to handle both input
# forms: deprecated decoded one and "normal" encoded as base64.
try:
if (
len(
self.api_secret_key.lstrip(
string.ascii_letters + string.digits + "+/="
)
)
== 0
):
self.api_secret_key = base64.standard_b64decode(self.api_secret_key)
except TypeError:
pass
if use_ssl:
self.conn_cls = http.client.HTTPSConnection
else:
self.conn_cls = http.client.HTTPConnection
self.use_ssl = use_ssl
self.timeout = 20
self.conn = None
def _encode_param(self, value):
if isinstance(value, list):
return list(map(self._encode_param, value))
elif isinstance(value, str):
return value.encode("utf8")
return value
def request(self, method, path, params=None, query_params=None):
"""
Send a request over HTTP. The inheriting class must override self.base_url().
:type method: string
:param method: HTTP method to use (GET, POST etc.)
:type path: string
:param path: path to be requested from server
:type params: dict
:param params: a dictionary of parameters sent in request body
:type query_params: dict
:param query_params: a dictionary of parameters sent in request path
:rtype: dict, string or None
:return: a JSON dict if the server replied with "application/json".
Raw data otherwise. None, if the reply was empty.
"""
response = self.make_request(
method, self.base_url() + path, params=params, query_params=query_params
)
data = response.read()
if response.getheader("Content-Type", "").startswith("application/json"):
data = json.loads(data)
if response.status != 200:
raise TiktalikAPIError(response.status, data)
return data
def base_url(self):
"""
:rtype: string
:return: base URL for API requests, eg. "/api/v1/computing".
Must NOT include trailing slash.
"""
raise NotImplementedError()
def make_request(
self, method, path, headers=None, body=None, params=None, query_params=None
):
"""
Sends request, returns httplib.HTTPResponse.
If `params` is provided, it should be a dict that contains form parameters.
Content-Type is forced to "application/x-www-form-urlencoded" in this case.
"""
if params and body:
raise ValueError("Both `body` and `params` can't be provided.")
headers = headers or {}
if params:
params = dict(
(k.encode("utf8"), self._encode_param(v))
for (k, v) in params.items()
)
body = parse.urlencode(params, True)
headers["content-type"] = "application/x-www-form-urlencoded"
path = parse.quote(path.encode("utf8"))
if query_params:
qp = {}
for key, value in query_params.items():
if isinstance(value, bool):
qp[key] = "true" if value else "false"
else:
# assert isinstance(value, (str, int))
qp[key.encode("utf8")] = self._encode_param(value)
qp = parse.urlencode(qp, True)
path = "%s?%s" % (path, qp)
if body:
m = md5(body.encode("utf-8"))
headers["content-md5"] = m.hexdigest()
conn = self.conn_cls(self.host, self.port, timeout=self.timeout)
headers = self._add_auth_header(method, path, headers or {})
# conn.set_debuglevel(3)
conn.request(method, path, body, headers)
response = conn.getresponse()
return response
def _add_auth_header(self, method, path, headers):
if "date" not in headers:
headers["date"] = time.strftime("%a, %d %b %Y %X GMT", time.gmtime())
S = self._canonical_string(method, path, headers)
headers["Authorization"] = "TKAuth %s:%s" % (self.api_key, self._sign_string(S))
return headers
def _canonical_string(self, method, path, headers):
S = "\n".join(
(
method,
headers.get("content-md5", ""),
headers.get("content-type", ""),
headers["date"],
path,
)
)
return S
def _sign_string(self, S):
digest = base64.b64encode(
hmac.new(self.api_secret_key, S.encode("utf-8"), sha1).digest()
)
return digest.decode("utf-8")
| StarcoderdataPython |
195823 | import logging
from kafka.errors import KafkaError, KafkaTimeoutError
from kafka import KafkaProducer
from data.logs_model.shared import epoch_ms
from data.logs_model.logs_producer.interface import LogProducerInterface
from data.logs_model.logs_producer.util import logs_json_serializer
from data.logs_model.logs_producer import LogSendException
logger = logging.getLogger(__name__)
DEFAULT_MAX_BLOCK_SECONDS = 5
class KafkaLogsProducer(LogProducerInterface):
""" Log producer writing log entries to a Kafka stream. """
def __init__(self, bootstrap_servers=None, topic=None, client_id=None, max_block_seconds=None):
self.bootstrap_servers = bootstrap_servers
self.topic = topic
self.client_id = client_id
self.max_block_ms = (max_block_seconds or DEFAULT_MAX_BLOCK_SECONDS) * 1000
self._producer = KafkaProducer(bootstrap_servers=self.bootstrap_servers,
client_id=self.client_id,
max_block_ms=self.max_block_ms,
value_serializer=logs_json_serializer)
def send(self, logentry):
try:
# send() has a (max_block_ms) timeout and get() has a (max_block_ms) timeout
# for an upper bound of 2x(max_block_ms) before guaranteed delivery
future = self._producer.send(self.topic, logentry.to_dict(), timestamp_ms=epoch_ms(logentry.datetime))
record_metadata = future.get(timeout=self.max_block_ms)
assert future.succeeded
except KafkaTimeoutError as kte:
logger.exception('KafkaLogsProducer timeout sending log to Kafka: %s', kte)
raise LogSendException('KafkaLogsProducer timeout sending log to Kafka: %s' % kte)
except KafkaError as ke:
logger.exception('KafkaLogsProducer error sending log to Kafka: %s', ke)
raise LogSendException('KafkaLogsProducer error sending log to Kafka: %s' % ke)
except Exception as e:
logger.exception('KafkaLogsProducer exception sending log to Kafka: %s', e)
raise LogSendException('KafkaLogsProducer exception sending log to Kafka: %s' % e)
| StarcoderdataPython |
3390353 | <reponame>AsiaLi/rust
#coding: utf8
from rust.command.base_command import BaseCommand
from rust.resources.db.user import models as user_models
from rust.resources.business.user.login_service import LoginService
MANAGER_USER_NAME = 'manager'
class Command(BaseCommand):
def handle(self, *args):
"""
创建系统管理员
"""
if user_models.User.select().dj_where(username=MANAGER_USER_NAME).count() == 0:
user_models.User.create(
username = MANAGER_USER_NAME,
password = LoginService().encrypt_password('<PASSWORD>'),
nickname = u'管理员'
)
#todo | StarcoderdataPython |
3303063 | <filename>number_chart.py
n = int(input("Enter the value of n : "))
size = n + (n - 1)
center = n - 1
temp = []
answer = []
for i in range(size):
temp.append("T")
for i in range(size):
answer.append(temp[:])
for digit in range(1, n + 1):
expansion = digit + (digit - 1)
position = [center - digit + 1, center - digit + 1]
for i in range(expansion):
answer[position[0]][position[1] + i] = str(digit)
for i in range(expansion):
answer[position[0] + i] [position[1]] = str(digit)
position = [center + digit - 1, center + digit - 1]
for i in range(expansion):
answer[position[0]] [position[1] - i] = str(digit)
for i in range(expansion):
answer[position[0] - i] [position[1]] = str(digit)
for arr in answer:
print(" ".join(arr)) | StarcoderdataPython |
3354873 | import threading
from time import sleep
def intervalExecute(interval, func, *args, **argd):
''' @param interval: execute func(*args, **argd) each interval
@return: a callable object to enable you terminate the timer.
'''
cancelled = threading.Event()
def threadProc(*args, **argd):
while True:
cancelled.wait(interval)
if cancelled.isSet():
break
func(*args, **argd) #: could be a lenthy operation
th = threading.Thread(target=threadProc, args=args, kwargs=argd)
th.start()
def close(block=True, timeout=3):
''' @param block: if True, block the caller until the thread
is closed or time out
@param timout: if blocked, timeout is used
@return: if block, True -> close successfully; False -> timeout
if non block, always return False
'''
if not block:
cancelled.set()
return False
else:
cancelled.set()
th.join(timeout)
isClosed = not th.isAlive()
return isClosed
return close
if __name__=='__main__':
# sample usage is as follow....
def testFunc(identifier, txt=''):
print('test func entered')
sleep(2)
print(identifier, txt)
cancellObj = intervalExecute(2.0, testFunc, 1, 'haha')
help(cancellObj)
sleep(5.2)
print(cancellObj()) #: cancel the intervalExecute timer.
print('after calling close')
| StarcoderdataPython |
3398278 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 13:34:59 2019
@author: atekawade
"""
import numpy as np
def get_patches(img, patch_size = None, steps = None):
stepy, stepx = steps
my, mx = img.shape
py, px = patch_size[0], patch_size[1]
nx, ny = int(np.ceil(mx/px)), int(np.ceil(my/py))
img = np.asarray([img[ii*stepy:ii*stepy+py] for ii in range(ny)])
img = np.asarray([[img[jj,:,ii*stepx:ii*stepx+px] for ii in range(nx)] for jj in range(img.shape[0])])
return img
def get_stepsize(img_shape, patch_size):
# Find optimum number of patches to cover full image
my, mx = img_shape
py, px = patch_size
nx, ny = int(np.ceil(mx/px)), int(np.ceil(my/py))
stepx = (mx-px) // (nx-1) if mx != px else 0
stepy = (my-py) // (ny-1) if my != py else 0
return (stepy, stepx)
def recon_patches(img, img_shape = None, steps = None):
if img.ndim != 4:
raise ValueError("Input must be 4D array.")
ny, nx, py, px = img.shape
stepy, stepx = steps
new_img = np.zeros((img_shape))
for ii in range(ny):
for jj in range(nx):
new_img[ii*stepy:ii*stepy+py,jj*stepx:jj*stepx+px] = img[ii,jj]
return new_img
def calc_resdrop(img_shape, patch_size, n_max = 3):
y_orig, x_orig, = img_shape
yres = 1
y_new = y_orig
while y_new > patch_size[0]*n_max:
yres += 1
y_new = int(np.ceil(y_orig/yres))
xres = 1
x_new = x_orig
while x_new > patch_size[1]*n_max:
xres += 1
x_new = int(np.ceil(x_orig/xres))
return yres, xres #, y_new, x_new
def ssrecon_patches(img, img_shape = None, steps = None):
ny, nx = img.shape[:2]
stepy, stepx = steps
p = img.shape[-1]
new_img = np.zeros((img_shape))
for ii in range(ny):
for jj in range(nx):
new_img[ii*stepy:ii*stepy+p,jj*stepx:jj*stepx+p] = img[ii,jj]
return new_img
| StarcoderdataPython |
3213710 | """Test functions related to the model creation, loading, saving and prediction."""
import os
import shutil
import pytest
from autopylot.cameras import Camera
from autopylot.datasets import preparedata
from autopylot.models import architectures, utils
from autopylot.utils import memory, settings
dirpath = os.path.join(settings.settings.MODELS_PATH, "test", "test")
@pytest.mark.models
def test_create_model_save():
"""Test the creation and the saving of a model."""
model = architectures.Models.test_model(
[
# testing with "list" shape
["steering", [1, 1]],
# testing with "tuple" shape
("test_output", (1, 20)),
]
)
model.summary()
utils.save_model(model, "test")
assert (
os.path.exists(dirpath + ".h5")
and os.path.exists(dirpath + ".tflite")
and os.path.exists(dirpath + ".info")
)
@pytest.mark.models
def test_input_shapes():
"""Test the expected input and output shape."""
model, model_info = utils.load_model("test/test.tflite")
for input_detail, (_, shape) in zip(model.input_details, model_info["inputs"]):
assert tuple(input_detail["shape"][1:]) == tuple(shape)
for output_detail, (_, shape) in zip(model.output_details, model_info["outputs"]):
assert tuple(output_detail["shape"][1:]) == tuple(shape)
@pytest.mark.models
def test_missing_data():
"""If the memory doens't have the right data, it should raise an Exception."""
model, model_info = utils.load_model("test/test.tflite")
prepare_data = preparedata.PrepareData(model_info)
with pytest.raises(ValueError):
prepare_data(memory.mem)
@pytest.mark.models
def test_tflite_predict():
"""Test the prediction on the .tflite model."""
model, model_info = utils.load_model("test/test.tflite")
prepare_data = preparedata.PrepareData(model_info)
camera = Camera(camera_type="dummy")
camera.update()
memory.mem["speed"] = 0.123
input_data = prepare_data(memory.mem)
predictions = model.predict(input_data)
assert predictions != {}
@pytest.mark.models
def test_tf_predict():
"""Test the prediction on the .h5 model."""
model, model_info = utils.load_model("test/test.h5")
prepare_data = preparedata.PrepareData(model_info)
camera = Camera(camera_type="dummy")
camera.update()
memory.mem["speed"] = 2.3
input_data = prepare_data(memory.mem)
predictions = model.predict(input_data)
assert predictions != {}
@pytest.mark.models
def test_delete_directory():
"""Deletes the created models."""
shutil.rmtree(os.path.join(settings.settings.MODELS_PATH, "test"))
assert os.path.exists(dirpath) is False
| StarcoderdataPython |
3234069 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Modelagem em tempo real | COVID-19 no Brasil
--------------------------------------------
Ideias e modelagens desenvolvidas pela trinca:
. <NAME>
. <NAME>
. <NAME>
Esta modelagem possui as seguintes características:
a) NÃO seguimos modelos paramétricos => Não existem durante a epidemia dados
suficientes ou confiáveis para alimentar modelos epidemiológicos como a excelente
calaculadora http://gabgoh.github.io/COVID/index.html (ela serve para gerar cená-
rios e para modelar a epidemia DEPOIS que ela passar). Além disso, a natureza
exponencial das curvas as torna extremamente sensíveis aos parâmetros que a defi-
nem. Isso faz com que a confiabilidade preditiva desses modelos seja ilusória.
b) A evolução epidemia no Brasil começou depois da de outros países. Nossa mode-
lagem se apoia nesse fato. Com os dados disponíveis, procuramos no instante pre-
sente determinar quem estamos seguindo, ou seja, que países mais se pareceram
conosco passado o mesmo período de disseminação. A partir do que aconteceu nesses
países projetamos o que pode acontecer aqui.
c) Esta conta é refeita dia a dia. Dependendo de nossa competência em conter ou
não a disseminação do Covid-19 nos aproximaremos dos países que melhor ou pior
lidaram com a epidemia e a projeção refletirá essa similaridade.
d) As decisões de modelagem são indicadas no código com os zoinhos: # ◔◔ {...}
São pontos de partida para discutir a modelagem e propor alternativas.
"""
import datetime
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
sns.set()
# no ipython usar este comando antes de rodar => %matplotlib osx
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
__author__ = "<NAME>" # codigo
__copyright__ = "Copyright 2020"
__license__ = "New BSD License"
__version__ = "1.5.2"
__email__ = "<EMAIL>"
__status__ = "Experimental"
def preparar_dados(p1, uf="SP", cidade=u"São Paulo"):
u"""Busca dados e organiza tabela "data" com os dados de referência para a
modelagem.
Fontes:
. Mundo: https://covid.ourworldindata.org
. Brasil: https://brasil.io
Retorna:
raw <DataFrame> | Série completa do número de mortes/dia por país, sem trans-
posição temporal
inicio <Series> | Referência dos indexes em raw para justapor o início das
curvas dos diferentes países
data <DataFrame> | Série de número de mortes/dia por país trazendo para o
zero (index 0) o primeiro dia em que ocorrem pelo menos p1 mortes
(ver macro parâmetros). Isto reduz a quantidade de países para o grupo
que está à frente ou pareado ao Brazil. A partir do index 0 é possível
comparar a evolução dos casos entre os países.
nbr <int> | Número de dias da série de dados para o Brasil
"""
# ◔◔ {usamos as mortes diárias por parecer ser o dado mais confiável}
raw = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/new_deaths.csv").fillna(0.0)
# ◔◔ {o link abaixo carrega o acumulado de mortes, não usamos pq a soma vai alisando a série}
# raw_soma = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/total_deaths.csv").fillna(0.0)
# tempo = raw['date'] # ◔◔ {não usamos as datas}
raw = raw.drop(columns='date')
raw = raw.drop(columns='World')
# para ver tbem os dados "oficias"
para_oficial = raw['Brazil']
# correcao de subnotificacao Brasil:
sub, hip = estimar_subnotificacao('Brasil')
p4br = ((sub + raw['Brazil'].sum()) / raw['Brazil'].sum())
raw['Brasil'] = raw['Brazil'] * p4br
# dict subs usa mesmas refs como chave => para reportar nos graficos
subs = {"Brasil": str(round(p4br, 1)) + " (" + hip + ")"}
# contruir base para a tabela "data"
inicio = raw.ge(p1).idxmax() # ◔◔ {encontra os index de qdo cada pais alcança p1}
data = pd.DataFrame({'Brasil':raw['Brasil'][inicio['Brasil']:]}).reset_index().drop(columns='index')
nbr = data.shape[0]
oficial = pd.DataFrame({'Brasil':para_oficial[inicio['Brasil']:]}).reset_index().drop(columns='index')
# dados Brasil
estados = [
'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS',
'MG', 'PA', 'PB', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC',
'SP', 'SE', 'TO',
]
if uf not in estados or type(uf) is not str:
uf = "SP"
print(uf, u": UF inválida, usando 'SP'")
# ◔◔ {já baixamos filtrado para uf, mas pode se usar outros estados}
uf_data = pd.read_csv("https://brasil.io/dataset/covid19/caso?state="+uf+"&format=csv")
# adicionar dados da uf
uf_select = uf_data.loc[lambda df: df['place_type'] == "state", :]
uf_mortes = list(uf_select['deaths'].head(nbr + 1).fillna(0.0))
uf_mortes = [uf_mortes[i] - uf_mortes[i+1] for i in range(len(uf_mortes)-1)]
uf_mortes += [0 for _ in range(nbr-len(uf_mortes))] # corrigir tamanho
uf_mortes.reverse()
oficial[uf] = pd.Series(uf_mortes).values
sub_uf, hip_uf = estimar_subnotificacao(uf)
p4uf = ((sub_uf + pd.Series(uf_mortes).values.sum())/pd.Series(uf_mortes).values.sum())
data[uf] = pd.Series(uf_mortes).values * p4uf
subs[uf] = str(round(p4uf, 1)) + " (" + hip_uf + ")"
# adicionar dados da cidade
cidade_select = uf_data.loc[lambda df: df['city'] == cidade, :]
if cidade_select.shape[0] > 0:
cidade_mortes = list(cidade_select['deaths'].head(nbr + 1).fillna(0.0))
cidade_mortes = [cidade_mortes[i] - cidade_mortes[i+1] for i in range(len(cidade_mortes)-1)]
cidade_mortes += [0 for _ in range(nbr-len(cidade_mortes))] # corrigir tamanho
cidade_mortes.reverse()
if sum(cidade_mortes):
# subnotificacao para cidade => aprox pela do estado
oficial[cidade] = pd.Series(cidade_mortes).values
data[cidade] = pd.Series(cidade_mortes).values * p4uf
subs[cidade] = str(round(p4uf, 1)) + " (" + hip_uf + ")"
else:
subs["n/d"] = ""
print(u"AVISO: a cidade " + cidade + " não possui mortes confirmadas")
else:
subs["n/d"] = ""
print(u"AVISO: a cidade " + cidade + " não consta nos dados para esta UF")
print(u'Utilize uma das cidades disponíveis para o terceiro gráfico:')
for d in set(uf_data['city']):
print(d)
refs = list(subs.keys()) # as referencias validas...
# adicionar dados dos países à frente ou pareados ao Brasil
for k in inicio.keys():
if k == "Brasil": continue
if inicio[k] == 0 or inicio[k] > inicio["Brasil"]: continue
C = raw[k][inicio[k]:inicio[k]+nbr]
data[k] = C.values
return raw, inicio, data, nbr, subs, refs, oficial
def rodar_modelo(raw, inicio, data, nbr, p2, p3, ref, refs):
"""
Usa os dados preparados para gerar dados para visualização e a projeção da
evoluação da epidemia.
Retorna:
correlacionados <list>: Países mais correlacionados, usados para a projeção
calibrados <DataFrame>: Série alisada de mortes por dia com dados de ref e
países correlacionados
projetado <Array>: Série estimada para a evoluação da epidemia em ref
infos <dict>: informações sobre o pico estimado da epidemia
"""
# ◔◔ {Optamos por não alisar dados antes de calcular a correlação. Sabemos
# que a qualidade do report dos dados é variável, mas assumimos que o ruído
# é aleatório e por isso não é preciso alisar para que a correlação seja
# válida. Ao contrário, a correlação "bruta" seria a mais verossível}
# ◔◔ {mas caso você ache que vale a pena alisar antes, use o codigo abaixo}
# alisamento para os casos de morte reportados (média móvel)
# data = data.rolling(5).mean()
try: data = data.drop(columns='Brazil')
except: pass
# calcular a matriz de correlações:
pearson = data.corr()
# ◔◔ {o default do método usa a correlação de Pearson, cf. ref abaixo}
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html
# ◔◔ { não incluir os casos locais para evitar endogeneidade}
out = refs # nao misturar com os demais cortes locais
# selecionar os p2 países que melhor se correlacionam com a ref
correlacionados = [_ for _ in pearson[ref].sort_values(ascending=False).keys() if _ not in out][:p2]
# criar tabela, começa com dados da ref
calibrados = pd.DataFrame({ref:data[ref]})
# preencher com os dados dos países correlacionados
for k in correlacionados:
# ◔◔ {pega os dados em raw pq agora usaremos todos os dados disponíveis para o país}
C = raw[k][inicio[k]:]
additional = pd.DataFrame({k: C.values}) # array
calibrados = pd.concat([calibrados, additional], axis=1)
# ◔◔ {aqui usamos um alisamento p3 de dias para deixar a visualização melhor}
calibrados = calibrados.rolling(p3).mean()
# ◔◔ {a projeção usa os dados alisados}
# ◔◔ {como é feita a projeção:
# 1. cada país correlacionado terá um peso, proporcianal a quanto se correlaciona
# .. soma dos pesos = 1
# .. quanto mais correlacionado, maior o peso }
pesos = [pearson[ref][c] for c in correlacionados] # melhor corr pesa mais
pesos = [pesos[i]/sum(pesos) for i in range(len(pesos))] # pesos normalizados
pesos = dict(zip(correlacionados, pesos)) # num dict para facilitar
# proj <list>: vai ter ao final o tamanho da maior serie em calibrados
proj = [np.nan for _ in range(nbr)] # começa com nan onde já temos os dados da ref
proj[-1] = calibrados[ref][nbr - 1] # primeiro valor coincide com último de ref
# será a partir daí que começa a projeção
# ◔◔ {a projeção segue dia a dia as variações dos países correlacionado}
for d in range(nbr, calibrados.shape[0]):
x = 0 # incremento estimado para o dia
for c in correlacionados:
if not np.isnan(calibrados[c][d]):
# adiciona o incremento % do país ponderado por seu peso
x += (calibrados[c][d]/calibrados[c][d-1]) * pesos[c]
else:
# ◔◔ {qdo acabam os dados de um país ele pára de influenciar a taxa}
x += 1 * pesos[c]
# print(d, c, x)
# a série da projeção é construída aplicando o incremento estimado ao dia anterior
proj.append(proj[-1] * x)
# projetado <Array>
projetado = np.array(proj)
# ◔◔ {informações adicionais}
# pico => valor máximo da série projetada
pico = np.nan_to_num(projetado).max() # float
# mortes valor absoluto
mortes_no_pico = str(int(pico)) # str
ix_do_pico = proj.index(np.nan_to_num(projetado).max()) # int => index
# dia em que acontece o pico [! soma 1 no index pq projetado sobrepoe o primeiro valor]
dia_do_pico = str(datetime.datetime.now() + datetime.timedelta(days=ix_do_pico-nbr+1))[:10] # str
# no caso do pico já ter passado
if calibrados[ref].max() > pico:
pico = calibrados[ref].max()
mortes_no_pico = str(int(pico))
ix_do_pico = list(calibrados[ref]).index(pico)
dia_do_pico = str(datetime.datetime.now() + datetime.timedelta(days=ix_do_pico-nbr))[:10] # str
# mortes totais: hoje mais tres semanas
ix_hoje = list(calibrados[ref]).index(calibrados[ref][nbr - 1])
mortes_totais = {
str(datetime.datetime.now())[:10]: int(calibrados[ref].sum()),
str(datetime.datetime.now() + datetime.timedelta(days=7))[:10]: int(calibrados[ref].sum()+projetado[nbr+1:nbr+1+7].sum()),
str(datetime.datetime.now() + datetime.timedelta(days=14))[:10]: int(calibrados[ref].sum()+projetado[nbr+1:nbr+1+14].sum()),
str(datetime.datetime.now() + datetime.timedelta(days=21))[:10]: int(calibrados[ref].sum()+projetado[nbr+1:nbr+1+21].sum()),
}
# consolidado para output
infos = {
"mortes_no_pico": mortes_no_pico,
"dia_do_pico": dia_do_pico,
"pico": pico,
"index": ix_do_pico,
"mt": mortes_totais,
}
return correlacionados, calibrados, projetado, infos
def gerar_fig_relatorio(p1, p2, p3, uf, cidade):
"""Roda vários cenários e monta mosaico de gráficos + notas."""
notas = u"""
Sobre o modelo e as estimativas:
As projeções são obtidas a partir da trajetória observada nos três países que melhor se correlacionem com a evolução dos dados do Brasil e localidades.
O desenho da curva projetada (pontilhada) é reflexo do comportamento observado nos países seguidos. Conforme a epidemia avança a referência pode mudar.
Outros parâmetros relevantes:
• os valores são corrigidos por uma estimativa de subnotificação (s) calculado para duas situações:
(a) mortes suspeitas aguardando confirmação e ainda não notificadas
(b) mortes potencialmente devido à Covid-19 notificadas como devidas a outras causas
• as curvas dos diferentes lugares são emparelhadas a partir do dia em que ocorrem N ou mais mortes (eixo x).
• as curvas são alisadas (médias móveis), por isso não iniciam no dia zero. O alisamento permite melhor visualização das curvas mas pode gerar algum
desvio com relação aos número diários absolutos.
• as projeções são recalculadas diariamente e podem sofrer alterações significativas em função das novas informações incorporadas.
Fontes dos dados:
https://covid.ourworldindata.org
https://brasil.io
https://transparencia.registrocivil.org.br
"""
equipe = u' M.Zac | L.Tozi | R.Luciano || https://github.com/Maurozac/covid-br/blob/master/compara.py'
totais = u"""
Mortes estimadas (acumulado)"""
hoje = str(datetime.datetime.now())[:16]
fig, ax = plt.subplots(1, 3, figsize=(12, 6), sharex=True, sharey=True)
fig.suptitle(u"Projeção da epidemia Covid-19" + " | " + hoje, fontsize=12)
fig.subplots_adjust(bottom=0.5)
fig.text(0.33, 0.42, notas, fontsize=7, verticalalignment='top')
fig.text(0.33, 0.02, equipe, family="monospace", fontsize='6', color='#ff003f', horizontalalignment='left')
raw, inicio, data, nbr, subs, refs, oficial = preparar_dados(p1, uf, cidade)
for i in [0, 1, 2]:
if refs[i] == 'n/d':
ax[i].set_title(u"Dados não disponíveis", fontsize=8)
break
correlacionados, calibrados, projetado, infos = rodar_modelo(raw, inicio, data, nbr, p2, p3, refs[i], refs)
ax[i].set_title(refs[i], fontsize=8)
ax[i].set_xlabel(u'Dias desde ' + str(p1) + ' mortes em um dia', fontsize=8)
ax[i].set_xlim(0, calibrados.shape[0]+25)
ax[i].set_ylabel(u'Mortes por dia', fontsize=8)
for c in correlacionados:
ax[i].plot(calibrados[c], linewidth=3, color="#ff7c7a")
lvi = calibrados[c].last_valid_index()
ax[i].text(lvi+1, calibrados[c][lvi], c, fontsize=6, verticalalignment="center")
ax[i].plot(calibrados[refs[i]], linewidth=3, color="#1f78b4")
ax[i].plot(projetado, linewidth=2, linestyle=":", color="#1f78b4")
lvi = pd.Series(projetado).last_valid_index()
ax[i].text(lvi+1, projetado[lvi], refs[i], fontsize=6, verticalalignment="center")
ax[i].plot(infos["index"], infos["pico"], '^', markersize=5.0, color="1", markeredgecolor="#1f78b4")
msg = "PICO ~" + infos["mortes_no_pico"] + " mortes em " + infos["dia_do_pico"] + " s=" + subs[refs[i]]
ax[i].text(infos["index"]-1, infos["pico"]-120, msg, fontsize=7, color="#1f78b4", verticalalignment='top')
ax[i].plot(oficial[refs[i]], linewidth=1, linestyle="--", color="#1f78b4")
ax[i].text(oficial.shape[0]+1, list(oficial[refs[i]])[-1], 'oficial', fontsize=6, verticalalignment="center")
totais += "\n\n " + refs[i] + "\n" + "\n".join([" " + x[0] + ": " + str(x[1]) for x in infos['mt'].items()])
fig.text(0.12, 0.42, totais, fontsize=7, verticalalignment='top', color="#1f78b4")
return fig
######################### Subnotificações ##################################
"""
◔◔ {cada fonte abaixo implica em um valor para o coeficiente p4 de ajuste
pela ordem, infos mais recentes ao final
ref: https://noticias.uol.com.br/saude/ultimas-noticias/redacao/2020/04/09/covid-19-declaracoes-de-obito-apontam-48-mais-mortes-do-que-dado-oficial.htm}
p4 = 1.48
https://saude.estadao.com.br/noticias/geral,em-um-mes-brasil-tem-alta-de-2239-mortes-por-problemas-respiratorios,70003268759
extrapolação => 2239 mortes por covid em março nao contabilizadas, de modo que o total ao final do mês
seria de 201 (covid oficial) + 2239 (potencialmente no pior cenário) = 2440
p4 = 12
=> DEPRECATED: esta situação ocorreu apenas pontualmente durante o mes de março,
mudamos a metodologia para calcular esse parâmetro on-fly (cf. função abaixo)
p4 foi para dentro das funções
"""
def estimar_subnotificacao(ref):
u"""Usa dados do Portal da Transparencia do Registro Civil do Brasil para estimar
a subnotificação de casos de Covid-19.
https://transparencia.registrocivil.org.br/especial-covid
Este portal nos premite ver diferença entre a ocorrência de mortes atribuídas
à insuficiência respiratória e pneumonia em 2019 e 2020.
O PROBLEMA => sabemos que há subnoticações de mortes por COVID por pelo menos duas causas:
a) demora na confirmação de casos, testados ou não
b) casos não são nem testados e são notificados com causa mortis distinta
Para estimar a subnotificação adotamos as seguintes hipóteses:
I) mortes por pneumonia e insuficiencia_respiratoria devereriam ser APROXIMADAMENTE iguais em
2019 e 2020
II) caso a) => por causa da demora na confirmacao a morte não é notificada e os números
de mortes por pneumonia ou insuficiencia_respiratoria para 2020 aparecem menores do que 2019.
Essa diferença seria igual ao número máximo de mortes por covid ainda não confirmadas. Esse
número corresponde ao número de mortes ainda no "limbo", sem causa morte determinada.
III) caso b) => por causa de notificação errada/incompleta + mortes colaterais, o número de 2020 fica maior:
a diferença sendo atribuída ao covid, direta ou indiretamente.
IV) os casos a) e b) seriam estratégias deliberadas e, portanto, não se ocorreriam simultaneamente
V) ok, mortes colaterais podem puxar estimativas para baixo no caso a); mas por enquanto não há
muito o que fazer, são ESTIMATIVAS. Fica a ressalva que s(a) pode estar sendo subestimado.
Como as bases de dados são dinâmicas e os números vão mudando conforme confirmações vão
sendo computadas, inclusive retroativamente. Portanto, o coeficiente de subnotificação (s) precisa ser
recalculado diariamente.
Inputs:
.ref => sigla do estado ou calcula para Brasil
.total => soma das mortes para ref
Retorna: tupla
. sub: número de casos potencialmente subnotificados
. hip: hipóte 'a': casos não notificados; 'b': casos notificados com outra causa
"""
sub, hip = 1, "ø"
estados = [
'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS',
'MG', 'PA', 'PB', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC',
'SP', 'SE', 'TO',
]
if ref not in estados:
ref = "all"
hoje = str(datetime.datetime.now())[:10]
api = "https://transparencia.registrocivil.org.br/api/covid?"
api += "data_type=data_ocorrido"
api += "&search=death-respiratory"
api += "&state=" + ref
api += "&start_date=2020-03-16"
api += "&end_date=" + hoje
call_1 = api + "&causa=insuficiencia_respiratoria"
call_2 = api + "&causa=pneumonia"
try:
c1 = requests.get(call_1).json()
c2 = requests.get(call_2).json()
m19 = c1['chart']['2019'] + c2['chart']['2019']
m20 = c1['chart']['2020'] + c2['chart']['2020']
if m20 <= m19: # caso a
sub = m19 - m20
hip = "a"
else: # caso b
sub = m20 - m19
hip = "b"
except:
print("[!] FALHA em registrocivil.org.br")
return sub, hip
######################### RELATORIO ########################################
def relatorio_hoje(p1, p2, p3, uf, cidade, my_path):
"""Calcula tudo e gera um relatorio em pdf."""
# gera o dash do dia
dashboard = gerar_fig_relatorio(p1, p2, p3, uf, cidade)
# salva em um arquivo pdf
hoje = str(datetime.datetime.now())[:10]
pp = PdfPages(my_path+"covid_dashboard_"+uf+"_"+cidade+"_"+hoje+".pdf")
dashboard.savefig(pp, format='pdf')
pp.close()
# acerte o caminho para o seu ambiente... esse aí é o meu :-)
my_path = "/Users/tapirus/Desktop/"
# parametros do modelo: mortes para parear séries, países comparados, alisamento
p1, p2, p3 = 15, 3, 7
relatorio_hoje(p1, p2, p3, "SP", "São Paulo", my_path)
relatorio_hoje(p1, p2, p3, "AM", "Manaus", my_path)
| StarcoderdataPython |
11176 | <filename>engine/sentiment_analysis.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 17:42:27 2018
@author: zgeorg03
"""
import re
import json # Used for converting json to dictionary
import datetime # Used for date conversions
import matplotlib.pyplot as plt
import numpy as np
from sentiment import Sentiment
import json
class NewsArticle:
def __init__(self,hash,title,author,url,content,date,topics, feed):
self.hash = hash
self.title = title
self.author = author
self.url = url
self.content = content
self.date = datetime.datetime.fromtimestamp(date/1000.0)
self.topics = topics
self.feed = feed
self.sep = re.compile("[.!?]")
def __repr__(self):
return "hash={},title={},author={},date={},topics={}".format(
self.hash, self.title, self.author,
self.date, self.topics, self.feed)
def __str__(self):
return self.__repr__()
def produce_title_scores(self, sentiment):
lines = self.sep.split(self.title)
sentiment.score(lines)
neg,neu,pos,com,count = sentiment.get_avg_scores()
return (float("{0:.2f}".format(neg*100)), float("{0:.2f}".format(neu*100))
, float("{0:.2f}".format(pos*100)), float("{0:.2f}".format(com*100)),count
)
def produce_content_scores(self, sentiment):
lines = self.sep.split(self.content)
sentiment.score(lines)
neg,neu,pos,com,count = sentiment.get_avg_scores()
return (float("{0:.2f}".format(neg*100)), float("{0:.2f}".format(neu*100))
, float("{0:.2f}".format(pos*100)), float("{0:.2f}".format(com*100)),count
)
class Parser:
def __init__(self,file_in,max_articles=None,file_out=None):
self.file_name = file_name
self.max_articles = max_articles
self.articles = []
self.sentiment = Sentiment()
self.results = []
self.file_out = file_out
def parse(self):
count = 0
with open(self.file_name,"r",encoding="UTF-8") as file:
for line in file:
if line.startswith(','):
continue
self.articles.append(self.parse_news_article(line))
count += 1
if self.max_articles:
if count >= self.max_articles:
break
def write(self):
for i,article in enumerate(self.articles):
if i % 100 == 0:
print('Finished: {} docs'.format(i))
self.write_article(article)
if self.file_out:
with open(self.file_out, 'w') as outfile:
json.dump(self.results, outfile,sort_keys=True,indent=4)
else:
print(json.dumps(self.results,sort_keys=True,indent=4))
def write_article(self,article):
res = {}
res['neg_title'],res['neu_title'],res['pos_title'],res['score_title'], _ = article.produce_title_scores(self.sentiment)
res['neg_content'],res['neu_content'],res['pos_content'],res['score_content'], _ = article.produce_content_scores(self.sentiment)
res['id'] = article.hash
res['title'] = article.title
res['date'] = int(article.date.timestamp())
res['content'] = article.content
res['topics'] = article.topics
res['feed'] = article.feed
res['url'] = article.url
res['author'] = article.author
res['overall_score']= float(res['score_title'])*0.75 + float(res['score_content'])*0.25
overall_score = res['overall_score']
if overall_score <= -50:
res['class']= 'Very Negative'
res['class_code'] = 4
elif overall_score <= 0:
res['class']= 'Negative'
res['class_code'] = 3
elif overall_score <= 50:
res['class']= 'Positive'
res['class_code'] = 2
elif overall_score <= 100:
res['class']= 'Very Positive'
res['class_code'] = 1
self.results.append(res)
def parse_news_article(self, line):
data = json.loads(line)
hash = data['hash']
title = data['title']
author = data['author']
content = data['content']
date = data['date']
topics = list(set(data['topics']))
feed = data['feed']
url = data['link']
return NewsArticle(hash,title,author,url,content,date,topics,feed)
if __name__ == '__main__':
file_name = "./log"
#max_articles = 1000
p = Parser(file_name,file_out='data-26-04.json')
p.parse()
p.write()
print('Finished')
def test():
plt.figure(figsize=(12,9))
plt.title('Articles: {}'.format(max_articles))
plt.plot(x[:,0],'x',label="Negative {0:.2f}".format(np.average(x[:,0])))
plt.plot(x[:,2],'+',label="Positive {0:.2f}".format(np.average(x[:,2])))
plt.plot(x[:,1],'.',label="Neutral {0:.2f}".format(np.average(x[:,1])))
plt.plot(x[:,3],'.',label="Compound {0:.2f}".format(np.average(x[:,3])))
plt.legend()
x = []
for i in range(0,max_articles):
x.append(articles[i].produce_content_scores(sentiment))
x = np.array(x)
print(x[:,0])
plt.figure(figsize=(12,9))
plt.title('Articles: {}'.format(max_articles))
plt.plot(x[:,0],'x',label="Negative {0:.2f}".format(np.average(x[:,0])))
plt.plot(x[:,2],'+',label="Positive {0:.2f}".format(np.average(x[:,2])))
plt.plot(x[:,1],'.',label="Neutral {0:.2f}".format(np.average(x[:,1])))
plt.plot(x[:,3],'.',label="Compound {0:.2f}".format(np.average(x[:,3])))
plt.legend()
| StarcoderdataPython |
3248565 | <reponame>rishusingh022/My-Journey-of-Data-Structures-and-Algorithms<filename>Project Euler Problems/Problem30.py
def check_self_behaviour(num,pow):
return num == sum([int(elem)**pow for elem in str(num)])
final_ans = 0
for i in range(2,1000000):
if check_self_behaviour(i,5):
final_ans += i
print(final_ans) | StarcoderdataPython |
1735031 | <reponame>ruoshengyuan/louplus-dm
import datetime
import pandas as pd
base = datetime.date(2018, 10, 30)
numdays = 80
# 所有的十月三十号以后的八十天的 list
date_list = [base + datetime.timedelta(days=x) for x in range(0, numdays)]
# 获取从 start 到 dest 的数据并插入数据库中
def getTickets(start, dest, driver, date_list, conn):
cursor = conn.cursor()
name_attribute = []
for one_day in tqdm_notebook(date_list):
# 获取数据
tmp = get_ticket_info(start, dest, str(one_day), driver)
for x in tmp:
result = cursor.execute(
SELECT_COMMAND, (x[-2], str(one_day), x[1])).fetchall()
x.append(str(one_day))
if len(result) == 0:
# 如果没有爬取则插入数据库
cursor.execute(INSERT_COMMAND1, x)
conn.commit()
cursor.close()
# 成都到上海和上海到成都
getTickets('CTU', 'SHA', driver, date_list, conn)
getTickets('SHA', 'CTU', driver, date_list, conn)
| StarcoderdataPython |
79347 | <filename>leekspin/server.py
# -*- coding: utf-8 -*-
"""Module for creating ``@type [bridge-]server-descriptor``s.
.. authors:: <NAME> <<EMAIL>> 0xA3ADB67A2CDB8B35
<NAME> <<EMAIL>>
.. licence:: see LICENSE file for licensing details
.. copyright:: (c) 2013-2014 The Tor Project, Inc.
(c) 2013-2014 Isis Lovecruft
(c) 2013-2014 <NAME>
"""
import math
import random
from leekspin import crypto
from leekspin import torversions
def generateServerDescriptor(nick, fingerprint, timestamp,
ipv4, ipv6, port, vers, protocols,
uptime, bandwidth, extraInfoHexDigest,
onionKeyLine, signingKeyLine, publicNTORKey,
bridge=True):
doc = []
# TODO: non-bridge routers need real dirports and socksports
doc.append(b"router %s %s %s 0 0" % (nick, ipv4, port))
doc.append(b"or-address [%s]:%s" % (ipv6, port - 1))
doc.append(b"platform Tor %s on Linux" % vers)
doc.append(b"%s" % protocols)
doc.append(b"published %s" % timestamp)
doc.append(b"%s" % makeFingerprintLine(fingerprint, vers))
doc.append(b"uptime %s" % uptime)
doc.append(b"%s" % bandwidth)
doc.append(b"%s" % makeExtraInfoDigestLine(extraInfoHexDigest, vers))
doc.append(b"%s" % onionKeyLine)
doc.append(b"%s" % signingKeyLine)
if not bridge:
doc.append(b"%s" % makeHSDirLine(vers))
doc.append(b"contact Somebody <<EMAIL>>")
if publicNTORKey is not None:
doc.append(b"ntor-onion-key %s" % publicNTORKey)
doc.append(b"reject *:*")
doc.append(b"router-signature\n")
unsignedDescriptor = b'\n'.join(doc)
return unsignedDescriptor
def makeProtocolsLine(version=None):
"""Generate an appropriate [bridge-]server-descriptor 'protocols' line.
:param str version: One of ``SERVER_VERSIONS``.
:rtype: str
:returns: An '@type [bridge-]server-descriptor' 'protocols' line.
"""
line = b''
if (version is not None) and torversions.shouldHaveOptPrefix(version):
line += b'opt '
line += b'protocols Link 1 2 Circuit 1'
return line
def makeExtraInfoDigestLine(hexdigest, version):
"""Create a line to embed the hex SHA-1 digest of the extrainfo.
:param string hexdigest: Should be the hex-encoded (uppercase) output of
the SHA-1 digest of the generated extrainfo document (this is the
extra-info descriptor, just without the signature at the end). This is
the same exact digest which gets signed by the OR server identity key,
and that signature is appended to the extrainfo document to create the
extra-info descriptor.
:param string version: One of ``SERVER_VERSIONS``.
:rtype: string
:returns: An ``@type [bridge-]server-descriptor`` 'extra-info-digest'
line.
"""
line = b''
if (version is not None) and torversions.shouldHaveOptPrefix(version):
line += b'opt '
line += b'extra-info-digest %s' % hexdigest
return line
def makeFingerprintLine(fingerprint, version=None):
"""Generate an appropriate [bridge-]server-descriptor 'fingerprint' line.
For example, for tor-0.2.3.25 and prior versions, this would look like:
|
| opt fingerprint D4BB C339 2560 1B7F 226E 133B A85F 72AF E734 0B29
|
:param string fingerprint: A public key fingerprint in groups of four,
separated by spaces.
:param string version: One of ``SERVER_VERSIONS``.
:rtype: string
:returns: An '@type [bridge-]server-descriptor' 'published' line.
"""
line = b''
if (version is not None) and torversions.shouldHaveOptPrefix(version):
line += b'opt '
line += b'fingerprint %s' % fingerprint
return line
def makeBandwidthLine(variance=30):
"""Create a random 'bandwidth' line with some plausible burst variance.
From torspec.git/dir-spec.txt, §2.1 "Router descriptors":
| "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed NL
|
| [Exactly once]
|
| Estimated bandwidth for this router, in bytes per second. The
| "average" bandwidth is the volume per second that the OR is willing
| to sustain over long periods; the "burst" bandwidth is the volume
| that the OR is willing to sustain in very short intervals. The
| "observed" value is an estimate of the capacity this relay can
| handle. The relay remembers the max bandwidth sustained output over
| any ten second period in the past day, and another sustained input.
| The "observed" value is the lesser of these two numbers.
The "observed" bandwidth, in this function, is taken as some random value,
bounded between 20KB/s and 2MB/s. For example, say:
>>> import math
>>> variance = 25
>>> observed = 180376
>>> percentage = float(variance) / 100.
>>> percentage
0.25
The ``variance`` in this context is the percentage of the "observed"
bandwidth, which will be added to the "observed" bandwidth, and becomes
the value for the "burst" bandwidth:
>>> burst = observed + math.ceil(observed * percentage)
>>> assert burst > observed
This doesn't do much, since the "burst" bandwidth in a real
[bridge-]server-descriptor is reported by the OR; this function mostly
serves to avoid generating completely-crazy, totally-implausible bandwidth
values. The "average" bandwidth value is then just the mean value of the
other two.
:param integer variance: The percent of the fake "observed" bandwidth to
increase the "burst" bandwidth by.
:rtype: string
:returns: A "bandwidth" line for a [bridge-]server-descriptor.
"""
observed = random.randint(20 * 2**10, 2 * 2**30)
percentage = float(variance) / 100.
burst = int(observed + math.ceil(observed * percentage))
bandwidths = [burst, observed]
nitems = len(bandwidths) if (len(bandwidths) > 0) else float('nan')
avg = int(math.ceil(float(sum(bandwidths)) / nitems))
line = b"bandwidth %s %s %s" % (avg, burst, observed)
return line
def makeHSDirLine(version):
"""This line doesn't do much… all the cool kids are HSDirs these days.
:param string version: One of ``SERVER_VERSIONS``.
:rtype: string
:returns: An ``@type [bridge-]server-descriptor`` 'hidden-service-dir'
line.
"""
line = b''
if (version is not None) and torversions.shouldHaveOptPrefix(version):
line += b'opt '
line += b'hidden-service-dir'
return line
| StarcoderdataPython |
179840 | from .context import polarity
def test_pos_polarity_result():
assert polarity.polarity_result(0.8) == 'positive'
def test_neg_polarity_result():
assert polarity.polarity_result(-0.8) == 'negative'
def test_neutral_polarity_result():
assert polarity.polarity_result(0) == 'neutral'
| StarcoderdataPython |
137380 | # Generated by Django 4.0.2 on 2022-03-23 11:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0036_profile_following'),
('base', '0037_reportcomments_reportposts_delete_report'),
]
operations = [
]
| StarcoderdataPython |
3215436 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START cloudoptimization_async_api]
from google.api_core.exceptions import GoogleAPICallError
from google.cloud import optimization_v1
# TODO(developer): Uncomment these variables before running the sample.
# project_id= 'YOUR_PROJECT_ID'
# request_file_name = 'YOUR_REQUEST_FILE_NAME'
# request_model_gcs_path = 'gs://YOUR_PROJECT/YOUR_BUCKET/YOUR_REQUEST_MODEL_PATH'
# model_solution_gcs_path = 'gs://YOUR_PROJECT/YOUR_BUCKET/YOUR_SOLUCTION_PATH'
def call_async_api(
project_id: str, request_model_gcs_path: str, model_solution_gcs_path_prefix: str
) -> None:
"""Call the async api for fleet routing."""
# Use the default credentials for the environment to authenticate the client.
fleet_routing_client = optimization_v1.FleetRoutingClient()
request_file_name = "resources/async_request.json"
with open(request_file_name, "r") as f:
fleet_routing_request = optimization_v1.BatchOptimizeToursRequest.from_json(
f.read()
)
fleet_routing_request.parent = f"projects/{project_id}"
for idx, mc in enumerate(fleet_routing_request.model_configs):
mc.input_config.gcs_source.uri = request_model_gcs_path
model_solution_gcs_path = f"{model_solution_gcs_path_prefix}_{idx}"
mc.output_config.gcs_destination.uri = model_solution_gcs_path
# The timeout argument for the gRPC call is independent from the `timeout`
# field in the request's OptimizeToursRequest message(s).
operation = fleet_routing_client.batch_optimize_tours(fleet_routing_request)
print(operation.operation.name)
try:
# Block to wait for the job to finish.
result = operation.result()
print(result)
# Do you stuff.
except GoogleAPICallError:
print(operation.operation.error)
# [END cloudoptimization_async_api]
| StarcoderdataPython |
139709 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""Speech activity detection"""
from typing import Optional
from typing import Text
import numpy as np
import torch
import torch.nn as nn
from .base import LabelingTask
from .base import LabelingTaskGenerator
from pyannote.audio.train.task import Task, TaskType, TaskOutput
from ..gradient_reversal import GradientReversal
from pyannote.audio.models.models import RNN
from pyannote.audio.features.wrapper import Wrappable
from pyannote.database import Protocol
from pyannote.database import Subset
from pyannote.audio.train.model import Resolution
from pyannote.audio.train.model import Alignment
class SpeechActivityDetectionGenerator(LabelingTaskGenerator):
"""Batch generator for training speech activity detection
Parameters
----------
task : Task
Task
feature_extraction : Wrappable
Describes how features should be obtained.
See pyannote.audio.features.wrapper.Wrapper documentation for details.
protocol : Protocol
subset : {'train', 'development', 'test'}, optional
Protocol and subset.
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
Defaults to `feature_extraction.sliding_window`
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models that
include the feature extraction step (e.g. SincNet) and therefore use a
different cropping mode. Defaults to 'center'.
duration : float, optional
Duration of audio chunks. Defaults to 2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Force total audio duration per epoch, in days.
Defaults to total duration of protocol subset.
mask : str, optional
When provided, protocol files are expected to contain a key named after
this `mask` variable and providing a `SlidingWindowFeature` instance.
Generated batches will contain an additional "mask" key (on top of
existing "X" and "y" keys) computed as an excerpt of `current_file[mask]`
time-aligned with "y". Defaults to not add any "mask" key.
"""
def __init__(
self,
task: Task,
feature_extraction: Wrappable,
protocol: Protocol,
subset: Subset = "train",
resolution: Optional[Resolution] = None,
alignment: Optional[Alignment] = None,
duration: float = 2.0,
batch_size: int = 32,
per_epoch: float = None,
mask: Text = None,
):
super().__init__(
task,
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=duration,
batch_size=batch_size,
per_epoch=per_epoch,
exhaustive=False,
mask=mask,
local_labels=True,
)
def postprocess_y(self, Y: np.ndarray) -> np.ndarray:
"""Generate labels for speech activity detection
Parameters
----------
Y : (n_samples, n_speakers) numpy.ndarray
Discretized annotation returned by
`pyannote.core.utils.numpy.one_hot_encoding`.
Returns
-------
y : (n_samples, 1) numpy.ndarray
See also
--------
`pyannote.core.utils.numpy.one_hot_encoding`
"""
# number of speakers for each frame
speaker_count = np.sum(Y, axis=1, keepdims=True)
# mark speech regions as such
return np.int64(speaker_count > 0)
@property
def specifications(self):
specs = {
"task": self.task,
"X": {"dimension": self.feature_extraction.dimension},
"y": {"classes": ["non_speech", "speech"]},
}
for key, classes in self.file_labels_.items():
# TODO. add an option to handle this list
# TODO. especially useful for domain-adversarial stuff
if key in ["duration", "audio", "uri"]:
continue
specs[key] = {"classes": classes}
return specs
class SpeechActivityDetection(LabelingTask):
"""Train speech activity (and overlap) detection
Parameters
----------
duration : float, optional
Duration of sub-sequences. Defaults to 3.2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Total audio duration per epoch, in days.
Defaults to one day (1).
"""
def get_batch_generator(
self,
feature_extraction,
protocol,
subset: Subset = "train",
resolution=None,
alignment=None,
):
"""
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models
that include the feature extraction step (e.g. SincNet) and
therefore use a different cropping mode. Defaults to 'center'.
"""
return SpeechActivityDetectionGenerator(
self.task,
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=self.duration,
per_epoch=self.per_epoch,
batch_size=self.batch_size,
)
class DomainAwareSpeechActivityDetection(SpeechActivityDetection):
"""Domain-aware speech activity detection
Trains speech activity detection and domain classification jointly.
Parameters
----------
domain : `str`, optional
Batch key to use as domain. Defaults to 'domain'.
Could be 'database' or 'uri' for instance.
attachment : `int`, optional
Intermediate level where to attach the domain classifier.
Defaults to -1. Passed to `return_intermediate` in models supporting it.
rnn: `dict`, optional
Parameters of the RNN used in the domain classifier.
See `pyannote.audio.models.models.RNN` for details.
domain_loss : `str`, optional
Loss function to use. Defaults to 'NLLLoss'.
"""
DOMAIN_PT = "{train_dir}/weights/{epoch:04d}.domain.pt"
def __init__(
self, domain="domain", attachment=-1, rnn=None, domain_loss="NLLLoss", **kwargs
):
super().__init__(**kwargs)
self.domain = domain
self.attachment = attachment
if rnn is None:
rnn = dict()
self.rnn = rnn
self.domain_loss = domain_loss
if self.domain_loss == "NLLLoss":
# Default value
self.domain_loss_ = nn.NLLLoss()
self.activation_ = nn.LogSoftmax(dim=1)
elif self.domain_loss == "MSELoss":
self.domain_loss_ = nn.MSELoss()
self.activation_ = nn.Sigmoid()
else:
msg = f"{domain_loss} has not been implemented yet."
raise NotImplementedError(msg)
def more_parameters(self):
"""Initialize trainable trainer parameters
Yields
------
parameter : nn.Parameter
Trainable trainer parameters
"""
domain_classifier_rnn = RNN(
n_features=self.model.intermediate_dimension(self.attachment), **self.rnn
)
n_classes = len(self.specifications[self.domain]["classes"])
domain_classifier_linear = nn.Linear(
domain_classifier_rnn.dimension, n_classes, bias=True
).to(self.device)
self.domain_classifier_ = nn.Sequential(
domain_classifier_rnn, domain_classifier_linear
).to(self.device)
# TODO: check if we really need to do this .to(self.device) twice
return self.domain_classifier_.parameters()
def load_more(self, model_pt=None) -> bool:
"""Load classifier from disk"""
if model_pt is None:
domain_pt = self.DOMAIN_PT.format(
train_dir=self.train_dir_, epoch=self.epoch_
)
else:
domain_pt = model_pt.with_suffix(".domain.pt")
domain_classifier_state = torch.load(
domain_pt, map_location=lambda storage, loc: storage
)
self.domain_classifier_.load_state_dict(domain_classifier_state)
# FIXME add support for different domains
return True
def save_more(self):
"""Save domain classifier to disk"""
domain_pt = self.DOMAIN_PT.format(train_dir=self.train_dir_, epoch=self.epoch_)
torch.save(self.domain_classifier_.state_dict(), domain_pt)
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch["X"], dtype=torch.float32, device=self.device_)
fX, intermediate = self.model_(X, return_intermediate=self.attachment)
# speech activity detection
fX = fX.view((-1, self.n_classes_))
target = (
torch.tensor(batch["y"], dtype=torch.int64, device=self.device_)
.contiguous()
.view((-1,))
)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
loss = self.loss_func_(fX, target, weight=weight)
# domain classification
domain_target = torch.tensor(
batch[self.domain], dtype=torch.int64, device=self.device_
)
domain_scores = self.activation_(self.domain_classifier_(intermediate))
domain_loss = self.domain_loss_(domain_scores, domain_target)
return {
"loss": loss + domain_loss,
"loss_domain": domain_loss,
"loss_task": loss,
}
class DomainAdversarialSpeechActivityDetection(DomainAwareSpeechActivityDetection):
"""Domain Adversarial speech activity detection
Parameters
----------
domain : `str`, optional
Batch key to use as domain. Defaults to 'domain'.
Could be 'database' or 'uri' for instance.
attachment : `int`, optional
Intermediate level where to attach the domain classifier.
Defaults to -1. Passed to `return_intermediate` in models supporting it.
alpha : `float`, optional
Coefficient multiplied with the domain loss
"""
def __init__(self, domain="domain", attachment=-1, alpha=1.0, **kwargs):
super().__init__(domain=domain, attachment=attachment, **kwargs)
self.alpha = alpha
self.gradient_reversal_ = GradientReversal()
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch["X"], dtype=torch.float32, device=self.device_)
fX, intermediate = self.model_(X, return_intermediate=self.attachment)
# speech activity detection
fX = fX.view((-1, self.n_classes_))
target = (
torch.tensor(batch["y"], dtype=torch.int64, device=self.device_)
.contiguous()
.view((-1,))
)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
loss = self.loss_func_(fX, target, weight=weight)
# domain classification
domain_target = torch.tensor(
batch[self.domain], dtype=torch.int64, device=self.device_
)
domain_scores = self.activation_(
self.domain_classifier_(self.gradient_reversal_(intermediate))
)
if self.domain_loss == "MSELoss":
# One hot encode domain_target for Mean Squared Error Loss
nb_domains = domain_scores.shape[1]
identity_mat = torch.sparse.torch.eye(nb_domains, device=self.device_)
domain_target = identity_mat.index_select(dim=0, index=domain_target)
domain_loss = self.domain_loss_(domain_scores, domain_target)
return {
"loss": loss + self.alpha * domain_loss,
"loss_domain": domain_loss,
"loss_task": loss,
}
| StarcoderdataPython |
117684 | <reponame>anoadragon453/synapse-config-generator
# -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from os import mkdir
from os.path import abspath, isdir, join
import yaml
from synapse.config import find_config_files
from .config import create_config
from .constants import CONFIG_LOCK, DATA_SUBDIR
from .errors import BaseConfigInUseError
from .util import is_subpath
class Model:
"""
The Model brokers modification of the config file and signing keys in the
config directory.
"""
def __init__(self, config_dir):
self.config_dir = abspath(config_dir)
self.data_dir = abspath(join(self.config_dir, DATA_SUBDIR))
if not isdir(self.config_dir):
mkdir(self.config_dir)
if not isdir(self.data_dir):
mkdir(self.data_dir)
def get_config(self, config_path: str) -> dict:
"""
Retrieves a config from the config directory. Any path can be provided
but it must be a subdirectory of self.config_dir
Args:
config_path: path to the config
Returns:
The yaml parse of the config file
"""
conf_path = abspath(join(self.config_dir, config_path))
if not is_subpath(self.config_dir, conf_path):
raise FileNotFoundError()
with open(conf_path, "r") as f:
return yaml.safe_load(f)
def write_config(self, config: dict):
"""
Given a config generates a templated config from synapse and writes it
out to the config dir. It will raise an exception if the config in
the config directory is in use.
Args:
config (dict): The configuration to template out.
"""
if self.config_in_use():
raise BaseConfigInUseError()
for conf_name, conf in create_config(
self.config_dir, self.data_dir, config
).items():
with open(abspath(join(self.config_dir, conf_name)), "w") as f:
f.write(conf)
def config_in_use(self) -> bool:
"""
Checks if we set whether the config is in use. If it was set up by the
system but synapse wasn't launched yet we will have set this to False.
However if it's not present we assume someone else has set up synapse
before so we assume the config is in use.
"""
config = {}
config_files = find_config_files([self.config_dir])
for config_file in config_files:
with open(config_file) as stream:
config.update(yaml.safe_load(stream))
if not config:
return False
print(config.get(CONFIG_LOCK))
return config.get(CONFIG_LOCK, True)
def generate_secret_key(self, server_name: str) -> str:
"""
Generates and writes a secret key.
Args:
server_name: The name of the homeserver
Returns:
The secret key identifying the server.
"""
if self.config_in_use():
raise BaseConfigInUseError()
signing_key_path = join(self.config_dir, server_name + ".signing.key")
subprocess.run(["generate_signing_key.py", "-o", signing_key_path])
with open(signing_key_path, "r") as f:
return f.read()
| StarcoderdataPython |
1627491 | <gh_stars>1-10
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
cycle_members = {4, 16, 37, 58, 89, 145, 42, 20}
def getn(n):
res = 0
while n:
res += (n % 10)**2
n = n // 10
return res
while n != 1 and n not in cycle_members:
n = getn(n)
return n == 1 | StarcoderdataPython |
3364860 | <reponame>kosarkarbasi/python_course<filename>tutproject/blog/admin.py
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Blog)
admin.site.register(Author)
admin.site.register(Entry)
# admin.site.register(person)
| StarcoderdataPython |
119533 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
sys.path.insert(0, "../../python/")
import mxnet as mx
import numpy as np
import numpy.random as rnd
import time
def check_diff_to_scalar(A, x, rank=None):
""" assert A == x"""
assert(np.sum(np.abs((A - x).asnumpy())) == 0), (rank, A.asnumpy(), x)
# setup
keys = ['3', '5', '7']
rsp_keys = ['9', '11', '13']
rate = 2
shape = (2, 3)
big_shape = (1200, 1200) # bigger than BIGARRAY_BOUND
def init_kv():
kv = mx.kv.create('dist_sync')
# init kv dns keys
kv.init(keys, [mx.nd.ones(shape)] * len(keys))
kv.init('99', mx.nd.ones(big_shape))
# init kv row_sparse keys
kv.init(rsp_keys, [mx.nd.ones(shape).tostype('row_sparse')] * len(rsp_keys))
kv.init('100', mx.nd.ones(big_shape).tostype('row_sparse'))
# worker info
my_rank = kv.rank
nworker = kv.num_workers
# init updater on servers
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=rate))
return kv, my_rank, nworker
def test_sync_push_pull():
kv, my_rank, nworker = init_kv()
def check_default_keys(kv, my_rank, nworker):
nrepeat = 3
for i in range(nrepeat):
kv.push('3', mx.nd.ones(shape)*(my_rank+1))
kv.push('99', mx.nd.ones(big_shape)*(my_rank+1))
num = (nworker + 1) * nworker * rate / 2 * nrepeat + 1
val = mx.nd.zeros(shape)
kv.pull('3', out=val)
check_diff_to_scalar(val, num)
val2 = mx.nd.zeros(big_shape)
kv.pull('99', out=val2)
check_diff_to_scalar(val2, num)
def check_row_sparse_keys(kv, my_rank, nworker):
nrepeat = 3
# prepare gradient
v = mx.nd.zeros(shape)
my_row = my_rank % shape[0]
v[my_row] = my_rank + 1
# push
for i in range(nrepeat):
kv.push('9', v.tostype('row_sparse'))
# select a random subset of rows this worker is interested in
num_rows = shape[0]
row_ids_np = np.random.randint(num_rows, size=num_rows)
row_ids = mx.nd.array(row_ids_np, dtype='int64')
# perform pull
val = mx.nd.zeros(shape, stype='row_sparse')
kv.row_sparse_pull('9', out=val, row_ids=row_ids)
# prepare updated values
updated_val = mx.nd.ones(shape)
for rank in range(nworker):
row = rank % shape[0]
updated_val[row] += (rank + 1) * rate * nrepeat
# verify subset of updated values
expected = mx.nd.zeros(shape)
for row in row_ids_np:
expected[row] = updated_val[row]
check_diff_to_scalar(val, expected)
def check_row_sparse_keys_with_zeros(kv, my_rank, nworker):
nrepeat = 3
# prepare gradient
v = mx.nd.zeros(shape)
big_v = mx.nd.zeros(big_shape)
# push
for i in range(nrepeat):
kv.push('11', v.tostype('row_sparse'))
kv.push('100', big_v.tostype('row_sparse'))
# pull a subset of rows this worker is interested in
all_row_ids = np.arange(shape[0])
val = mx.nd.ones(shape).tostype('row_sparse')
big_val = mx.nd.ones(big_shape).tostype('row_sparse')
kv.row_sparse_pull('11', out=val, row_ids=mx.nd.array(all_row_ids, dtype='int64'))
big_num_rows = shape[0]
big_all_row_ids = np.arange(big_shape[0])
kv.row_sparse_pull('100', out=big_val, row_ids=mx.nd.array(big_all_row_ids, dtype='int64'))
# verify results
check_diff_to_scalar(val, mx.nd.ones(shape))
check_diff_to_scalar(big_val, mx.nd.ones(big_shape))
def check_big_row_sparse_keys(kv, my_rank, nworker):
mx.random.seed(123)
rnd.seed(123)
density = 0.3
nrepeat = 3
# prepare gradient
v = mx.nd.zeros(big_shape)
idx_sample = rnd.rand(big_shape[0])
indices = np.argwhere(idx_sample < density).flatten()
# each worker chooses a subset of the indices to update
update_rows = []
for rank in range(nworker):
rows = []
i = 0
step = (rank + 1) * 2
while i < len(indices):
rows.append(indices[i])
i += step
update_rows.append(np.array(rows))
# rows to update for this worker
for row in update_rows[my_rank]:
v[row] = my_rank + 1
# push
for i in range(nrepeat):
kv.push('100', v.tostype('row_sparse'))
# select a random subset of rows this worker is interested in
mx.random.seed(my_rank)
rnd.seed(my_rank)
num_rows = big_shape[0]
row_ids_np = np.random.randint(num_rows, size=num_rows)
row_ids = mx.nd.array(row_ids_np, dtype='int64')
# perform pull
val = mx.nd.zeros(big_shape, stype='row_sparse')
kv.row_sparse_pull('100', out=val, row_ids=row_ids)
# prepare expected result
updated_val = mx.nd.ones(big_shape)
# apply updates from each worker
for rank in range(nworker):
for row in update_rows[rank]:
updated_val[row] += (rank + 1) * rate * nrepeat
expected = mx.nd.zeros(big_shape)
for row in row_ids_np:
expected[row] = updated_val[row]
check_diff_to_scalar(val, expected, rank=my_rank)
check_default_keys(kv, my_rank, nworker)
check_row_sparse_keys(kv, my_rank, nworker)
check_row_sparse_keys_with_zeros(kv, my_rank, nworker)
check_big_row_sparse_keys(kv, my_rank, nworker)
print('worker ' + str(my_rank) + ' is done')
if __name__ == "__main__":
test_sync_push_pull()
| StarcoderdataPython |
1628161 | <filename>app/domain/repository/musical_weather_repository.py
from datetime import datetime
from domain.model.city import CityInDb, City
from domain.repository.abstract_repository import AbstractRepository
from domain.model.track import TrackInDb
class MusicalWeatherRepository(AbstractRepository):
def get_all(self):
return self._db.query(CityInDb).all()
def create(self, city: City) -> CityInDb:
city_db = CityInDb(name=city.name, temperature=city.temperature, date=datetime.now())
city_db.track_list = [TrackInDb(name=track.name, artist=track.artist) for track in city.track_list]
self._db.add(city_db)
self._db.commit()
self._db.refresh(city_db)
return city_db
| StarcoderdataPython |
3300218 | """Module for running function evaluations in separate processes and measuring
time
Classes:
CodeBenchmark
"""
from multiprocessing import Process, Queue
from signal import signal, alarm, SIGALRM
from time import time
from benchmike import exceptions as err
from benchmike.customlogger import CustomLogger, LOGGER_NAME
class CodeBenchmark:
"""Class for measuring time of execution of function evaluation"""
logger = CustomLogger(LOGGER_NAME)
def __init__(self, path, timeout):
self.measurements = []
self.timeout = timeout
self.queue = Queue()
with open(path) as file:
self.code = compile(file.read(), path, 'exec')
self.logger.log(
"Started with path {}, timeout {}".format(path, timeout))
def run_benchmark(self, step, start, count):
"""Runs benchmark, saves data points to self.measurements, returns
measurements"""
size = start
passes_to_make = count
pass_count = 0
time_left = time_elapsed = 0.0
while time_elapsed < self.timeout and pass_count < passes_to_make:
try:
time_left = int(self.timeout - time_elapsed)
if time_left == 0:
break
data_point = self.make_measurement(size, time_left)
time_elapsed += data_point[2]
self.measurements.append((data_point[0], data_point[1]))
size += step
pass_count += 1
except err.FunTimeoutError:
print("Finished benchmarking")
self.logger.log(
"Benchmark timeouted at {} passes".format(pass_count))
break
except err.FunctionsNotFoundError as ex:
raise err.BenchmarkRuntimeError(ex.message)
except RuntimeError:
raise err.BenchmarkRuntimeError(
"Caught other type of runtime error")
except Exception as ex:
raise err.BenchmarkRuntimeError(repr(ex))
self.logger.log(
"Finished benchmarking with {} passes and {} s left".format(
pass_count, time_left))
return self.measurements
@staticmethod
def signal_handler(signum, frame):
"""Signal handler for run_code"""
raise err.FunTimeoutError("Timeout error: process was too slow")
def run_code(self, size, timeout):
"""This runs code in separate thread to measure time, requires
set_up(size) and run(size) methods in code to be executed"""
signal(SIGALRM, CodeBenchmark.signal_handler)
exec(self.code, globals())
alarm(timeout)
whole_start_time = time()
try:
exec('set_up(size)')
run_start_time = time()
exec('run(size)')
whole_end_time = run_end_time = time()
self.queue.put((size, run_end_time - run_start_time,
whole_end_time - whole_start_time))
except err.FunTimeoutError as ex:
self.queue.put((size, ex, time() - whole_start_time))
except TypeError:
self.queue.put(
(size, err.FunctionsNotFoundError(
"Could not find set_up() or run() methods in input file"),
time() - whole_start_time))
self.logger.log("File doesn't contain required methods")
except Exception as ex:
self.queue.put((size, RuntimeError(ex), time() - whole_start_time))
def make_measurement(self, size, timeout):
"""This will return tuple (size, run_time, full_time) or rethrow
exception"""
@self.logger.log_fun
def run_process():
p = Process(target=self.run_code,
args=(size, timeout))
p.start()
run_result = self.queue.get()
p.join()
return run_result
result = run_process()
if isinstance(result[1], err.FunTimeoutError):
raise result[1]
elif isinstance(result[1], err.FunctionsNotFoundError):
raise result[1]
elif isinstance(result[1], RuntimeError):
raise result[1]
elif result:
return result
else:
raise Exception("Queue returned empty value")
| StarcoderdataPython |
126424 | def do(i):
return i + 2
| StarcoderdataPython |
1632146 |
# Library imports
import random
# Project imports
from hiddil.crypt import PublicKey, b64_encode, b64_decode
from storage import Storage
from storage.uid import UID
from hiddil.exceptions import *
class Block:
BLOCK_NUM_MAX = 999999999999999
BLOCK_NUM_MIN = 1
def __init__(self, block_number: int, storage: Storage, public_key: PublicKey):
"""
Wrapper block object for storing data bytes in storage class, and managing expiration etc
:param block_number: Block number to access
:param storage: Reference to the storage instance the block is stored in
:param public_key: Public key the block is stored with
"""
# If block if block number is out of bounds, raise an exception
if self.BLOCK_NUM_MIN > block_number > self.BLOCK_NUM_MAX:
raise BlockNumOutofBounds("value:{}, max:{}, min{}".format(block_number, self.BLOCK_NUM_MAX,
self.BLOCK_NUM_MIN))
# Store args in class vars
self._storage = storage
self._pubkey = public_key
self._uid = UID(public_key.key_id)
self._block_number = block_number
self._key = str(self.block_number)
@property
def public_key(self) -> PublicKey:
return self._pubkey
@property
def block_number(self) -> int:
return self._block_number
@property
def data(self) -> bytes:
return self._storage.get(self._uid, self._key)
@data.setter
def data(self, new_data: bytes):
self._storage.put(self._uid, self._key, new_data)
def as_b64(self) -> str:
return b64_encode(self.data)
def from_b64(self, b64_data: str):
self.data = b64_decode(b64_data)
def erase(self):
self._storage.delete(self._uid, self._key)
def _key(self) -> str:
return str(self.block_number)
def __iter__(self):
raise NotImplemented
def __len__(self):
return len(self._storage.get(self._uid, self._key))
| StarcoderdataPython |
4822909 | <filename>parameters.py<gh_stars>1-10
import csv
import numpy as np
import sys
import time
import random
from typing import List , Tuple , Optional , Dict , Callable , Any
import copy
from matrix_implementations import *
from measurement import measure, generate_input
### For benchmark functions
OptTuple3i = Optional[Tuple[int ,int ,int]]
FunType = Callable [[List[int]], OptTuple3i]
### Parameters enabling runing on different computers
sleep = False
warm_up = True
file_name_prefix = "GG" # enter your prefix
relative_path = "/horse_race/results/"
## parameters for all the experiment. Modify here to run experiments with different parameters.
n_list = [2,4,8,16,32,64,128,256] ### n list for experiment with multiple values of m and n for strassen & write_through
m_list = [0,2,4,8,16,32,64,128] ### m list to find the optimal m parameter for n=256 for strassen & write_through
s_list = [2,4,8,16,32,64,128] ### s list for tiled experiment - finding optimal value of s
n = 256
N = 3
s = 32
m_strassen = 8
m_write_trhough = 16
column_titles_s = ['s', 'time(s)', 'stdv']
column_titles_m = ['m', 'time(s)', 'stdv']
column_titles_n = ['n', 'time(s)', 'stdv']
def write_csv(n_list: list, res: np.ndarray, filename: str, column_titles:str=None):
"""write_csv
Args:
n_list (list): list of n (the matrix side length) that the the experiment is run with
res (np.ndarray): results from the experiment
filename (str): the filename that you desire
column_titles (lst): takes a list with the columns title for the csv file. The titles should be given comma seperated words and no spaces
"""
with open(filename ,'w') as f:
writer = csv.writer(f)
if column_titles != None:
writer.writerow(column_titles)
for i in range(len(n_list)):
writer.writerow ([n_list[i]] + res[i,:].tolist()) | StarcoderdataPython |
3397302 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import datetime
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
clb = pyrax.cloud_loadbalancers
# Get load balancer usage
usage = clb.get_usage()
print("Usage for Account:", usage["accountId"])
print()
print("Account Usage Records")
print("-" * 30)
au_recs = usage["accountUsage"]
for rec_key in au_recs.keys()[:5]:
recs = au_recs[rec_key]
if len(recs) > 5:
print("(only the first 5 records...)")
print(recs[:5])
print()
print("Load Balancer Usage Records")
print("-" * 30)
lb_recs = usage["loadBalancerUsages"]
if len(lb_recs) > 5:
print("(only the first 5 records...)")
for rec in lb_recs[:5]:
print(rec)
print()
| StarcoderdataPython |
1693671 | # Generated by Django 2.0.13 on 2019-09-26 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0002_auto_20180918_2014'),
]
operations = [
migrations.AddField(
model_name='visitor',
name='medium',
field=models.CharField(blank=True, editable=False, max_length=255, null=True),
),
migrations.AddField(
model_name='visitor',
name='source',
field=models.CharField(blank=True, editable=False, max_length=255, null=True),
),
]
| StarcoderdataPython |
3283639 | import pyfastnoisesimd.extension as ext
import concurrent.futures as cf
import numpy as np
from enum import Enum
_MIN_CHUNK_SIZE = 8192
def empty_aligned(shape, dtype=np.float32, n_byte=ext.SIMD_ALIGNMENT):
"""
Provides an memory-aligned array for use with SIMD accelerated instructions.
Should be used to build
Adapted from: https://github.com/hgomersall/pyFFTW/blob/master/pyfftw/utils.pxi
Args:
shape: a sequence (typically a tuple) of array axes.
dtype: NumPy data type of the underlying array. Note FastNoiseSIMD supports
only `np.float32`. Seg faults may occur if this is changed.
n_byte: byte alignment. Should always use the `pyfastnoisesimd.extension.SIMD_ALIGNMENT`
value or seg faults may occur.
"""
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
if not isinstance(shape, (int, np.integer)):
array_length = 1
for each_dimension in shape:
array_length *= each_dimension
else:
array_length = shape
# Allocate a new array that will contain the aligned data
buffer = np.empty(array_length * itemsize + n_byte, dtype='int8')
offset = (n_byte - buffer.ctypes.data) % n_byte
aligned = buffer[offset:offset-n_byte].view(dtype).reshape(shape)
return aligned
def full_aligned(shape, fill, dtype=np.float32, n_byte=ext.SIMD_ALIGNMENT):
"""
As per `empty_aligned`, but returns an array initialized to a constant value.
Args:
shape: a sequence (typically a tuple) of array axes.
fill: the value to fill each array element with.
dtype: NumPy data type of the underlying array. Note FastNoiseSIMD supports
only `np.float32`. Seg faults may occur if this is changed.
n_byte: byte alignment. Should always use the `pyfastnoisesimd.extension.SIMD_ALIGNMENT`
value or seg faults may occur.
"""
aligned = empty_aligned(shape, dtype=dtype, n_byte=n_byte)
aligned.fill(fill)
return aligned
def empty_coords(length, dtype=np.float32, n_byte=ext.SIMD_ALIGNMENT):
"""
"""
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
# We need to expand length to be a multiple of the vector size
vect_len = max(ext.SIMD_ALIGNMENT // itemsize, 1)
aligned_len = int(vect_len*np.ceil(length/vect_len))
shape = (3, aligned_len)
coords = empty_aligned(shape)
# Lots of trouble with passing sliced views out, with over-running the
# array and seg-faulting on an invalid write.
# return coords[:,:length]
return coords
def check_alignment(array):
"""
Verifies that an array is aligned correctly for the supported SIMD level.
Args:
array: numpy.ndarray
Returns:
truth: bool
"""
return ((ext.SIMD_ALIGNMENT - array.ctypes.data) % ext.SIMD_ALIGNMENT) == 0
def check_coords(array):
"""
Verifies that an array is aligned correctly for the supported SIMD level.
Args:
array: numpy.ndarray
Returns:
truth: bool
"""
# print(' Array shape: ', array.shape)
# if array.base is not None:
# print(' Base shape: ', array.base.shape)
base = array if array.base is None else array.base
# print(' Check alignment: ', check_alignment(array))
# print(' Base alignment error: ', (array.shape[1]*array.dtype.itemsize)%ext.SIMD_ALIGNMENT)
return check_alignment(array) \
and (array.shape[1] * array.dtype.itemsize) % ext.SIMD_ALIGNMENT == 0 == 0
def aligned_chunks(array, n_chunks, axis=0):
"""
An generator that divides an array into chunks that have memory
addresses compatible with SIMD vector length.
Args:
array: numpy.ndarray
the array to chunk
n_chunks: int
the desired number of chunks, the returned number _may_ be less.
axis: int
the axis to chunk on, similar to `numpy` axis commanes.
Returns
chunk: numpy.ndarray
start: Tuple[int]
the start indices of the chunk, in the `array`.
"""
block_size = 1
if array.ndim > axis + 1:
block_size = np.product(array.shape[axis:])
# print(f'Got blocksize of {block_size}')
vect_len = max(ext.SIMD_ALIGNMENT // array.dtype.itemsize, 1)
if block_size % vect_len == 0:
# Iterate at-will, the underlying blocks have the correct shape
slice_size = int(np.ceil(array.shape[axis] / n_chunks))
else:
# Round slice_size up to nearest vect_len
slice_size = int(vect_len*np.ceil(array.shape[axis] / n_chunks /vect_len))
# print('Slice size: ', slice_size)
offset = 0
chunk_index = 0
while(chunk_index < n_chunks):
# Dynamic slicing is pretty clumsy, unfortunately:
# https://stackoverflow.com/questions/24398708/slicing-a-numpy-array-along-a-dynamically-specified-axis#47859801
# so just use some nested conditions.
if axis == 0:
if array.ndim == 1:
chunk = array[offset:offset+slice_size]
else:
chunk = array[offset:offset+slice_size, ...]
elif axis == 1:
if array.ndim == 2:
chunk = array[:, offset:offset+slice_size]
else:
chunk = array[:, offset:offset+slice_size, ...]
elif axis == 2:
chunk = array[:, :, offset:offset+slice_size]
# print(f'Chunk has memory addr: {chunk.ctypes.data}, remain: {chunk.ctypes.data%ext.SIMD_ALIGNMENT}')
yield chunk, offset
offset += slice_size
chunk_index += 1
def num_virtual_cores():
"""
Detects the number of virtual cores on a system without importing
``multiprocessing``. Borrowed from NumExpr 2.6.
"""
import os, subprocess
# Linux, Unix and MacOS
if hasattr(os, 'sysconf'):
if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf('SC_NPROCESSORS_ONLN')
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(subprocess.check_output(['sysctl', '-n', 'hw.ncpu']))
# Windows
if 'NUMBER_OF_PROCESSORS' in os.environ:
ncpus = int(os.environ['NUMBER_OF_PROCESSORS'])
if ncpus > 0:
return ncpus
else:
return 1
# TODO: need method for ARM7/8
return 1 # Default
class NoiseType(Enum):
"""
The class of noise generated.
Enums: ``{Value, ValueFractal, Perlin, PerlinFractal, Simplex, SimplexFractal, WhiteNoise, Cellular, Cubic, CubicFractal}``
"""
Value = 0
ValueFractal = 1
Perlin = 2
PerlinFractal = 3
Simplex = 4
SimplexFractal = 5
WhiteNoise = 6
Cellular = 7
Cubic = 8
CubicFractal = 9
class FractalType(Enum):
"""
Enum: Fractal noise types also have an additional fractal type.
Values: ``{FBM, Billow, RigidMulti}``"""
FBM = 0
Billow = 1
RigidMulti = 2
class PerturbType(Enum):
"""
Enum: The enumerator for the class of Perturbation.
Values: ``{NoPeturb, Gradient, GradientFractal, Normalise, Gradient_Normalise, GradientFractal_Normalise}``
"""
NoPerturb = 0
Gradient = 1
GradientFractal = 2
Normalise = 3
Gradient_Normalise = 4
GradientFractal_Normalise = 5
class CellularDistanceFunction(Enum):
"""
Enum: The distance function for cellular noise.
Values: ``{Euclidean, Manhattan, Natural}``"""
Euclidean = 0
Manhattan = 1
Natural = 2
class CellularReturnType(Enum):
"""
Enum: The functional filter to apply to the distance function to generate the
return from cellular noise.
Values: ``{CellValue, Distance, Distance2, Distance2Add, Distance2Sub, Distance2Mul, Distance2Div, NoiseLookup, Distance2Cave}``
"""
CellValue = 0
Distance = 1
Distance2 = 2
Distance2Add = 3
Distance2Sub = 4
Distance2Mul = 5
Distance2Div = 6
NoiseLookup = 7
Distance2Cave = 8
class FractalClass(object):
"""
Holds properties related to noise types that include fractal octaves.
Do not instantiate this class separately from `Noise`.
"""
def __init__(self, fns):
self._fns = fns
self._octaves = 3
self._lacunarity = 2.0
self._gain = 0.5
self._fractalType = FractalType.FBM
@property
def fractalType(self) -> FractalType:
"""
The type of fractal for fractal NoiseTypes.
Default: ``FractalType.FBM``"""
return self._fractalType
@fractalType.setter
def fractalType(self, new):
if isinstance(new, FractalType):
pass
elif isinstance(new, int):
new = FractalType(int)
elif isinstance(new, str):
new = FractalType[new]
else:
raise TypeError('Unparsable type for fractalType: {}'.format(type(new)))
self._fractalType = new
self._fns.SetFractalType(new.value)
@property
def octaves(self) -> int:
"""
Octave count for all fractal noise types, i.e. the number of
log-scaled frequency levels of noise to apply. Generally ``3`` is
sufficient for small textures/sprites (256x256 pixels), use larger
values for larger textures/sprites.
Default: ``3``
"""
return self._octaves
@octaves.setter
def octaves(self, new):
self._octaves = int(new)
self._fns.SetFractalOctaves(int(new))
@property
def lacunarity(self) -> float:
"""
Octave lacunarity for all fractal noise types.
Default: ``2.0``
"""
return self._lacunarity
@lacunarity.setter
def lacunarity(self, new):
self._lacunarity = float(new)
self._fns.SetFractalLacunarity(float(new))
@property
def gain(self) -> float:
"""
Octave gain for all fractal noise types. Reflects the ratio
of the underlying noise to that of the fractal. Values > 0.5 up-weight
the fractal.
Default: ``0.5``
"""
return self._gain
@gain.setter
def gain(self, new):
self._gain = float(new)
self._fns.SetFractalGain(float(new))
class CellularClass(object):
"""
Holds properties related to `NoiseType.Cellular`.
Do not instantiate this class separately from ``Noise``.
"""
def __init__(self, fns):
self._fns = fns
self._returnType = CellularReturnType.Distance
self._distanceFunc = CellularDistanceFunction.Euclidean
self._noiseLookupType = NoiseType.Simplex
self._lookupFrequency = 0.2
self._jitter = 0.45
self._distanceIndices = (0.0, 1.0)
@property
def returnType(self):
"""
The return type for cellular (cubic Voronoi) noise.
Default: ``CellularReturnType.Distance``
"""
return self._returnType
@returnType.setter
def returnType(self, new):
if isinstance(new, CellularReturnType):
pass
elif isinstance(new, int):
new = CellularReturnType(int)
elif isinstance(new, str):
new = CellularReturnType[new]
else:
raise TypeError('Unparsable type for returnType: {}'.format(type(new)))
self._returnType = new
self._fns.SetCellularReturnType(new.value)
@property
def distanceFunc(self):
return self._distanceFunc
@distanceFunc.setter
def distanceFunc(self, new):
"""
The distance function for cellular (cubic Voronoi) noise.
Default: ``CellularDistanceFunction.Euclidean``
"""
if isinstance(new, CellularDistanceFunction):
pass
elif isinstance(new, int):
new = CellularDistanceFunction(int)
elif isinstance(new, str):
new = CellularDistanceFunction[new]
else:
raise TypeError('Unparsable type for distanceFunc: {}'.format(type(new)))
self._distanceFunc = new
self._fns.SetCellularDistanceFunction(new.value)
@property
def noiseLookupType(self) -> NoiseType:
"""
Sets the type of noise used if cellular return type.
Default: `NoiseType.Simplex`
"""
return self._noiseLookupType
@noiseLookupType.setter
def noiseLookupType(self, new):
if isinstance(new, NoiseType):
pass
elif isinstance(new, int):
new = NoiseType(int)
elif isinstance(new, str):
new = NoiseType[new]
else:
raise TypeError('Unparsable type for noiseLookupType: {}'.format(type(new)))
self._noiseLookupType = new
self._fns.SetCellularNoiseLookupType(new.value)
@property
def lookupFrequency(self):
"""
Relative frequency on the cellular noise lookup return type.
Default: ``0.2``
"""
return self._lookupFrequency
@lookupFrequency.setter
def lookupFrequency(self, new):
self._lookupFrequency = float(new)
self._fns.SetCellularNoiseLookupFrequency(float(new))
@property
def jitter(self):
"""
The maximum distance a cellular point can move from it's grid
position. The value is relative to the cubic cell spacing of ``1.0``.
Setting ``jitter > 0.5`` can generate wrapping artifacts.
Default: ``0.45``
"""
return self._jitter
@jitter.setter
def jitter(self, new):
self._jitter = float(new)
self._fns.SetCellularJitter(float(new))
@property
def distanceIndices(self) -> tuple:
"""
Sets the two distance indices used for ``distance2X`` return types
Default: ``(0, 1)``
.. note: * index0 should be lower than index1
* Both indices must be ``>= 0``
* index1 must be ``< 4``
"""
return self._distanceIndices
@distanceIndices.setter
def distanceIndices(self, new):
if not hasattr(new, '__len__') or len(new) != 2:
raise ValueError( 'distanceIndices must be a length 2 array/list/tuple' )
new = list(new)
if new[0] < 0:
new[0] = 0
if new[1] < 0:
new[0] = 0
if new[1] >= 4:
new[1] = 3
if new[0] >= new[1]:
new[0] = new[1]-1
self._distanceIndices = new
return self._fns.SetCellularDistance2Indices(*new)
class PerturbClass(object):
"""
Holds properties related to the perturbation applied to noise.
Do not instantiate this class separately from ``Noise``.
"""
def __init__(self, fns):
self._fns = fns
self._perturbType = PerturbType.NoPerturb
self._amp = 1.0
self._frequency = 0.5
self._octaves = 3
self._lacunarity = 2.0
self._gain = 2.0
self._normaliseLength = 1.0
@property
def perturbType(self) -> PerturbType:
"""
The class of perturbation.
Default: ``PerturbType.NoPeturb``
"""
return self._perturbType
@perturbType.setter
def perturbType(self, new):
if isinstance(new, PerturbType):
pass
elif isinstance(new, int):
new = PerturbType(int)
elif isinstance(new, str):
new = PerturbType[new]
else:
raise TypeError('Unparsable type for perturbType: {}'.format(type(new)))
self._perturbType = new
return self._fns.SetPerturbType(new.value)
@property
def amp(self) -> float:
"""
The maximum distance the input position can be perturbed. The
reasonable values of ``amp`` before artifacts are apparent increase with
decreased ``frequency``. The default value of ``1.0`` is quite high.
Default: ``1.0``
"""
return self._amp
@amp.setter
def amp(self, new):
self._amp = float(new)
return self._fns.SetPerturbAmp(float(new))
@property
def frequency(self) -> float:
"""
The relative frequency for the perturbation gradient.
Default: ``0.5``
"""
return self._frequency
@frequency.setter
def frequency(self, new):
self._frequency = float(new)
return self._fns.SetPerturbFrequency(float(new))
@property
def octaves(self) -> int:
"""
The octave count for fractal perturbation types, i.e. the number of
log-scaled frequency levels of noise to apply. Generally ``3`` is
sufficient for small textures/sprites (256x256 pixels), use larger values for
larger textures/sprites.
Default: ``3``
"""
return self._octaves
@octaves.setter
def octaves(self, new):
self._octaves = int(new)
return self._fns.SetPerturbFractalOctaves(int(new))
@property
def lacunarity(self) -> float:
"""
The octave lacunarity (gap-fill) for fractal perturbation types.
Lacunarity increases the fineness of fractals. The appearance of
graininess in fractal noise occurs when lacunarity is too high for
the given frequency.
Default: ``2.0``
"""
return self._lacunarity
@lacunarity.setter
def lacunarity(self, new):
self._lacunarity = float(new)
return self._fns.SetPerturbFractalLacunarity(float(new))
@property
def gain(self) -> float:
"""
The octave gain for fractal perturbation types. Reflects the ratio
of the underlying noise to that of the fractal. Values > 0.5 up-weight
the fractal.
Default: ``0.5``
"""
return self._gain
@gain.setter
def gain(self, new):
self._gain = float(new)
return self._fns.SetPerturbFractalGain(float(new))
@property
def normaliseLength(self) -> float:
"""
The length for vectors after perturb normalising
Default: ``1.0``
"""
return self._normaliseLength
@normaliseLength.setter
def normaliseLength(self, new):
self._normaliseLength = float(new)
return self._fns.SetPerturbNormaliseLength(float(new))
def _chunk_noise_grid(fns, chunk, chunkStart, chunkAxis, start=[0,0,0]):
"""
For use by ``concurrent.futures`` to multi-thread ``Noise.genAsGrid()`` calls.
"""
dataPtr = chunk.__array_interface__['data'][0]
# print( 'pointer: {:X}, start: {}, shape: {}'.format(dataPtr, chunkStart, chunk.shape) )
if chunkAxis == 0:
fns.FillNoiseSet(chunk, chunkStart+start[0], start[1], start[2], *chunk.shape)
elif chunkAxis == 1:
fns.FillNoiseSet(chunk, start[0], chunkStart+start[1], start[2], *chunk.shape)
else:
fns.FillNoiseSet(chunk, start[0], start[1], chunkStart+start[2], *chunk.shape)
class Noise(object):
"""
``Noise`` encapsulates the C++ SIMD class ``FNSObject`` and enables get/set
of all relative properties via Python properties.
Args:
seed: The random number (int32) that seeds the random-number generator
If ``seed == None`` a random integer is generated as the seed.
numWorkers: The number of threads used for parallel noise generation.
If ``numWorkers == None``, the default applied by
`concurrent.futures.ThreadPoolExecutor` is used.
"""
def __init__(self, seed: int=None, numWorkers: int=None):
self._fns = ext.FNS()
if numWorkers is not None:
self._num_workers = int(numWorkers)
else:
self._num_workers = num_virtual_cores()
self._asyncExecutor = cf.ThreadPoolExecutor(max_workers = self._num_workers)
# Sub-classed object handles
self.fractal = FractalClass(self._fns)
self.cell = CellularClass(self._fns)
self.perturb = PerturbClass(self._fns)
if seed is not None:
self.seed = seed # calls setter
else:
self.seed = np.random.randint(-2147483648, 2147483647)
# Syncronizers for property getters should use the default values as
# stated in `FastNoiseSIMD.h`
self._noiseType = NoiseType.Simplex
self._frequency = 0.01
self._axesScales = (1.0, 1.0, 1.0)
@property
def numWorkers(self) -> int:
"""
Sets the maximum number of thread workers that will be used for
generating noise. Generally should be the number of physical CPU cores
on the machine.
Default: Number of virtual cores on machine.
"""
return self._num_workers
@numWorkers.setter
def numWorkers(self, N_workers) -> int:
N_workers = int(N_workers)
if N_workers <= 0:
raise ValueError('numWorkers must be greater than 0')
self._num_workers = N_workers
self._asyncExecutor = cf.ThreadPoolExecutor(max_workers = N_workers)
@property
def seed(self) -> int:
"""
The random-number seed used for generation of noise.
Default: ``numpy.random.randint()``
"""
return self._fns.GetSeed()
@seed.setter
def seed(self, new):
return self._fns.SetSeed(int(np.int32(new)))
@property
def frequency(self) -> float:
"""
The frequency of the noise, lower values result in larger noise features.
Default: ``0.01``
"""
return self._frequency
@frequency.setter
def frequency(self, new):
self._frequency = float(new)
return self._fns.SetFrequency(float(new))
@property
def noiseType(self) -> NoiseType:
"""
The class of noise.
Default: ``NoiseType.Simplex``
"""
return self._noiseType
@noiseType.setter
def noiseType(self, new):
if isinstance(new, NoiseType):
pass
elif isinstance(new, int):
new = NoiseType(int)
elif isinstance(new, str):
new = NoiseType[new]
else:
raise TypeError('Unparsable type for noiseType: {}'.format(type(new)))
self._noiseType = new
return self._fns.SetNoiseType(new.value)
@property
def axesScales(self) -> tuple:
"""
Sets the FastNoiseSIMD axes scales, which allows for non-square
voxels. Indirectly affects `frequency` by changing the voxel pitch.
Default: ``(1.0, 1.0, 1.0)``
"""
return self._axesScales
@axesScales.setter
def axesScales(self, new: tuple):
if not hasattr(new, '__len__') or len(new) != 3:
raise ValueError( 'axesScales must be a length 3 array/list/tuple' )
self._axesScales = new
return self._fns.SetAxesScales(*new)
def genAsGrid(self, shape=[1,1024,1024], start=[0,0,0]) -> np.ndarray:
"""
Generates noise according to the set properties along a rectilinear
(evenly-spaced) grid.
Args:
shape: Tuple[int]
the shape of the output noise volume.
start: Tuple[int]
the starting coordinates for generation of the grid.
I.e. the coordinates are essentially `start: start + shape`
Example::
import numpy as np
import pyfastnoisesimd as fns
noise = fns.Noise()
result = noise.genFromGrid(shape=[256,256,256], start=[0,0,0])
nextResult = noise.genFromGrid(shape=[256,256,256], start=[256,0,0])
"""
if isinstance(shape, (int, np.integer)):
shape = (shape,)
# There is a minimum array size before we bother to turn on futures.
size = np.product(shape)
# size needs to be evenly divisible by ext.SIMD_ALINGMENT
if np.remainder(size, ext.SIMD_ALIGNMENT/np.dtype(np.float32).itemsize) != 0.0:
raise ValueError('The size of the array (in bytes) must be evenly divisible by the SIMD vector length')
result = empty_aligned(shape)
# Shape could be 1 or 2D, so we need to expand it with singleton
# dimensions for the FillNoiseSet call
if len(start) == 1:
start = [start[0], 0, 0]
elif len(start) == 2:
start = [start[0], start[1], 1]
else:
start = list(start)
start_zero = start[0]
if self._num_workers <= 1 or size < _MIN_CHUNK_SIZE:
# print('Grid single-threaded')
if len(shape) == 1:
shape = (*shape, 1, 1)
elif len(shape) == 2:
shape = (*shape, 1)
else:
shape = shape
self._fns.FillNoiseSet(result, *start, *shape)
return result
# else run in threaded mode.
n_chunks = np.minimum(self._num_workers, shape[0])
# print(f'genAsGrid using {n_chunks} chunks')
# print('Grid multi-threaded')
workers = []
for I, (chunk, consumed) in enumerate(aligned_chunks(result, n_chunks, axis=0)):
# print(f'{I}: Got chunk of shape {chunk.shape} with {consumed} consumed')
if len(chunk.shape) == 1:
chunk_shape = (*chunk.shape, 1, 1)
elif len(chunk.shape) == 2:
chunk_shape = (*chunk.shape, 1)
else:
chunk_shape = chunk.shape
start[0] = start_zero + consumed
# print('len start: ', len(start), ', len shape: ', len(chunk_shape))
peon = self._asyncExecutor.submit(self._fns.FillNoiseSet,
chunk, *start, *chunk_shape)
workers.append(peon)
for peon in workers:
peon.result()
# For memory management we have to tell NumPy it's ok to free the memory
# region when it is dereferenced.
# self._fns._OwnSplitArray(noise)
return result
def genFromCoords(self, coords: np.ndarray) -> np.ndarray:
"""
Generate noise from supplied coordinates, rather than a rectilinear grid.
Useful for complicated shapes, such as tesselated surfaces.
Args:
coords: 3-D coords as generated by ``fns.empty_coords()``
and filled with relevant values by the user.
Returns:
noise: a shape (N,) array of the generated noise values.
Example::
import numpy as np
import pyfastnoisesimd as fns
numCoords = 256
coords = fns.empty_coords(3,numCoords)
# <Set the coordinate values, it is a (3, numCoords) array
coords[0,:] = np.linspace(-np.pi, np.pi, numCoords)
coords[1,:] = np.linspace(-1.0, 1.0, numCoords)
coords[2,:] = np.zeros(numCoords)
noise = fns.Noise()
result = noise.genFromCoords(coords)
"""
if not isinstance(coords, np.ndarray):
raise TypeError('`coords` must be of type `np.ndarray`, not type: ', type(coords))
if coords.ndim != 2:
raise ValueError('`coords` must be a 2D array')
shape = coords.shape
if shape[0] != 3:
raise ValueError('`coords.shape[0]` must equal 3')
if not check_alignment(coords):
raise ValueError('Memory alignment of `coords` is not valid')
if coords.dtype != np.float32:
raise ValueError('`coords` must be of dtype `np.float32`')
if np.remainder(coords.shape[1], ext.SIMD_ALIGNMENT/np.dtype(np.float32).itemsize) != 0.0:
raise ValueError('The number of coordinates must be evenly divisible by the SIMD vector length')
itemsize = coords.dtype.itemsize
result = empty_aligned(shape[1])
if self._num_workers <= 1 or shape[1] < _MIN_CHUNK_SIZE:
self._fns.NoiseFromCoords(result,
coords[0,:], coords[1,:], coords[2,:], shape[1], 0)
return result
n_chunks = np.minimum(self._num_workers,
shape[1] * itemsize / ext.SIMD_ALIGNMENT)
workers = []
# for I, ((result_chunk, r_offset), (coord_chunk, offset)) in enumerate(zip(
# aligned_chunks(result, self._num_workers, axis=0),
# aligned_chunks(coords, self._num_workers, axis=1))):
for I, (result_chunk, offset) in enumerate(
aligned_chunks(result, self._num_workers, axis=0)):
# aligned_size = int(vect_len*np.ceil(result_chunk.size/vect_len))
# print(f' {I}: Got chunk of length {result_chunk.size}, AlignedSize would be: {aligned_size}')
# print(' Offset: ', offset, ', offset error: ', offset % 8)
# zPtr = (coords[0,:].ctypes.data + offset) % 8
# yPtr = (coords[1,:].ctypes.data + offset) % 8
# xPtr = (coords[2,:].ctypes.data + offset) % 8
# print(f' Pointer alignment: {zPtr, yPtr, xPtr}')
# peon = self._asyncExecutor.submit(self._fns.NoiseFromCoords, result,
# coords[0,:], coords[1,:], coords[2,:], aligned_size, offset)
peon = self._asyncExecutor.submit(self._fns.NoiseFromCoords, result,
coords[0,:], coords[1,:], coords[2,:], result_chunk.size, offset)
workers.append(peon)
for peon in workers:
peon.result()
return result
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.