ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a582ead54f304207a87505d8997446e264cf0d7
|
import _plotly_utils.basevalidators
class YperiodalignmentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="yperiodalignment", parent_name="funnel", **kwargs):
super(YperiodalignmentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["start", "middle", "end"]),
**kwargs
)
|
py
|
1a582f017d4deb1dc417964aff337049acdfcd62
|
def evalRec(env, rec):
"""Has Damaging Predictions"""
if (rec.Severity > 2):
return True
# 2.a. Present in ClinVar Path, Likely Path, VUS (worst annotation).
clinvar_clinically_significant = (rec.Clinvar_Benign == False) \
and (rec.Clinvar_Trusted_Benign in {False, "No data"})
if (clinvar_clinically_significant):
return True
# Include Splice Altering variants
if (rec.splice_ai_dsmax > 0.2):
return True
if len(rec.Polyphen &
{"possibly_damaging", "probably_damaging"}) > 0:
return True
if (len(rec.Polyphen_2_HVAR) > 0 and
len(rec.Polyphen_2_HVAR - {"P", "D"}) == 0):
return True
if (len(rec.Polyphen_2_HDIV) > 0 and
len(rec.Polyphen_2_HDIV - {"P", "D"}) == 0):
return True
return len(rec.SIFT &
{"deleterious", "tolerated_low_confidence"}) > 0
|
py
|
1a582f46ab02dae9802b1a20d0d6b5c26b6791d2
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.storage.v1", manifest={"AvroSchema", "AvroRows",},
)
class AvroSchema(proto.Message):
r"""Avro schema.
Attributes:
schema (str):
Json serialized schema, as described at
https://avro.apache.org/docs/1.8.1/spec.html.
"""
schema = proto.Field(proto.STRING, number=1,)
class AvroRows(proto.Message):
r"""Avro rows.
Attributes:
serialized_binary_rows (bytes):
Binary serialized rows in a block.
row_count (int):
The count of rows in the returning block.
"""
serialized_binary_rows = proto.Field(proto.BYTES, number=1,)
row_count = proto.Field(proto.INT64, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py
|
1a582f50553344cd344c92b87265938fe47aec68
|
import numpy as np
def laplace_numpy(image):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1]
+ image[2:, 1:-1]
+ image[1:-1, :-2]
+ image[1:-1, 2:]
- 4 * image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
def laplace_loops(image):
"""Laplace operator for 2D images."""
h = image.shape[0]
w = image.shape[1]
laplacian = np.empty((h - 2, w - 2), np.uint8)
for i in range(1, h - 1):
for j in range(1, w - 1):
laplacian[i - 1, j - 1] = (
np.abs(
image[i - 1, j]
+ image[i + 1, j]
+ image[i, j - 1]
+ image[i, j + 1]
- 4 * image[i, j]
)
> 0.05
)
return laplacian
|
py
|
1a582f5ae68aca8c2e962b65734137c06a3a06ba
|
#
# PySNMP MIB module PAIRGAIN-DSLAM-ALARM-SEVERITY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PAIRGAIN-DSLAM-ALARM-SEVERITY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:36:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
pgDSLAMAlarmSeverity, pgDSLAMAlarm = mibBuilder.importSymbols("PAIRGAIN-COMMON-HD-MIB", "pgDSLAMAlarmSeverity", "pgDSLAMAlarm")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, NotificationType, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Gauge32, Integer32, Counter32, iso, IpAddress, ObjectIdentity, TimeTicks, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "NotificationType", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Gauge32", "Integer32", "Counter32", "iso", "IpAddress", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "MibIdentifier")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
pgdsalsvMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1))
if mibBuilder.loadTexts: pgdsalsvMIB.setLastUpdated('9901141600Z')
if mibBuilder.loadTexts: pgdsalsvMIB.setOrganization('PairGain Technologies, INC.')
if mibBuilder.loadTexts: pgdsalsvMIB.setContactInfo(' Ken Huang Tel: +1 714-481-4543 Fax: +1 714-481-2114 E-mail: [email protected] ')
if mibBuilder.loadTexts: pgdsalsvMIB.setDescription('The MIB module defining objects for the alarm severity configuration and status management of a central DSLAM (Digital Subscriber Line Access Multiplexer), including from chassis power supply, fan status, to each channel/port related alarms in each HDSL/ADSL card inside the chassis. For HDSL alarm management: Please refer to Spec#157-1759-01 by Ken Huang for detail architecture model. For ADSL alarm management: Please refer to AdslLineMib(TR006) from adslForum for details architecture model. Naming Conventions: Atuc -- (ATU-C) ADSL modem at near (Central) end of line Atur -- (ATU-R) ADSL modem at Remote end of line ES -- Errored Second. Lof -- Loss of Frame Los -- Loss of Signal Lpr -- Loss of Power LOSW -- Loss of Sync Word UAS -- Unavailable Second ')
class PgDSLAMAlarmSeverity(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("disable", 1), ("minor", 2), ("major", 3), ("critical", 4))
class PgDSLAMAlarmStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("noalarm", 1), ("minor", 2), ("major", 3), ("critical", 4), ("alarm", 5))
pgDSLAMChassisAlarmSeverity = MibIdentifier((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 2))
pgDSLAMChassisPsAlarmSeverity = MibScalar((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 2, 1), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMChassisPsAlarmSeverity.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMChassisPsAlarmSeverity.setDescription('The Chassis Power failure Alarm Severity Setting.')
pgDSLAMChassisFanAlarmSeverity = MibScalar((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 2, 2), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMChassisFanAlarmSeverity.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMChassisFanAlarmSeverity.setDescription('The Chassis Fan failure Alarm Severity Setting.')
pgDSLAMChassisConfigChangeAlarmSeverity = MibScalar((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 2, 3), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMChassisConfigChangeAlarmSeverity.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMChassisConfigChangeAlarmSeverity.setDescription('The Chassis Config change Alarm Severity Setting.')
pgDSLAMChassisTempAlarmSeverity = MibScalar((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 2, 4), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMChassisTempAlarmSeverity.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMChassisTempAlarmSeverity.setDescription('The Chassis Temperature exceed threshold Alarm Severity Setting.')
pgDSLAMHDSLSpanAlarmThresholdConfProfileIndexNext = MibScalar((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileIndexNext.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileIndexNext.setDescription("This object contains an appropriate value to be used for pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex when creating entries in the alarmThresholdConfTable. The value '0' indicates that no unassigned entries are available. To obtain the pgDSLAMHDSLSpanAlarmThresholdConfProfileIndexNext value for a new entry, the manager issues a management protocol retrieval operation to obtain the current value of this object. After each retrieval, the agent should modify the value to the next unassigned index.")
pgDSLAMHDSLSpanAlarmThresholdConfProfileTable = MibTable((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8), )
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileTable.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileTable.setDescription('The DSLAM HDSL Span Alarm Threshold Configuration Profile table.')
pgDSLAMHDSLSpanAlarmThresholdConfProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8, 1), ).setIndexNames((0, "PAIRGAIN-DSLAM-ALARM-SEVERITY-MIB", "pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex"))
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileEntry.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileEntry.setDescription('Entry in the DSLAM HDSL Span Alarm Threshold Configuration Profile table.')
pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)))
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex.setDescription('This object is used by the line alarm Threshold configuration table in order to identify a row of this table')
pgDSLAMHDSLSpanMarginThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanMarginThreshold.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanMarginThreshold.setDescription('Sets the HDSL Margin threshold value.')
pgDSLAMHDSLSpanESThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanESThreshold.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanESThreshold.setDescription('Sets the HDSL Errored Seconds threshold value.')
pgDSLAMHDSLSpanUASThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanUASThreshold.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanUASThreshold.setDescription('Sets the HDSL Unavailable Seconds threshold value.')
pgDSLAMHDSLSpanAlarmThresholdConfProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 8, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmThresholdConfProfileRowStatus.setDescription('This object is used to create a new row or modify or delete an existing row in this table.')
pgDSLAMHDSLSpanAlarmSeverityConfProfileIndexNext = MibScalar((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileIndexNext.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileIndexNext.setDescription("This object contains an appropriate value to be used for pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex when creating entries in the alarmSeverityConfTable. The value '0' indicates that no unassigned entries are available. To obtain the pgDSLAMHDSLSpanAlarmSeverityConfProfileIndexNext value for a new entry, the manager issues a management protocol retrieval operation to obtain the current value of this object. After each retrieval, the agent should modify the value to the next unassigned index.")
pgDSLAMHDSLSpanAlarmSeverityConfProfileTable = MibTable((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10), )
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileTable.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileTable.setDescription('The DSLAM HDSL Span Alarm Severity Configuration Profile table.')
pgDSLAMHDSLSpanAlarmSeverityConfProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1), ).setIndexNames((0, "PAIRGAIN-DSLAM-ALARM-SEVERITY-MIB", "pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex"))
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileEntry.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileEntry.setDescription('Entry in the DSLAM HDSL Span Alarm Severity Configuration Profile table.')
pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)))
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex.setDescription('This object is used by the line alarm severity configuration table in order to identify a row of this table')
pgDSLAMHDSLSpanLOSWAlarmSetting = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1, 2), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanLOSWAlarmSetting.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanLOSWAlarmSetting.setDescription('Sets the severity for Loss of Sync Word alarm.')
pgDSLAMHDSLSpanMarginAlarmSetting = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1, 3), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanMarginAlarmSetting.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanMarginAlarmSetting.setDescription('Sets the severity for Margin threshold exceeded alarm.')
pgDSLAMHDSLSpanESAlarmSetting = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1, 4), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanESAlarmSetting.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanESAlarmSetting.setDescription('Sets the severity for Errored Seconds threshold exceeded alarm.')
pgDSLAMHDSLSpanUASAlarmSetting = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1, 5), PgDSLAMAlarmSeverity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanUASAlarmSetting.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanUASAlarmSetting.setDescription('Sets the severity for UAS threshold exceeded alarm.')
pgDSLAMHDSLSpanAlarmSeverityConfProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 3, 1, 10, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMHDSLSpanAlarmSeverityConfProfileRowStatus.setDescription('This object is used to create a new row or modify or delete an existing row in this table.')
pgDSLAMADSLAtucAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1), )
if mibBuilder.loadTexts: pgDSLAMADSLAtucAlarmTable.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucAlarmTable.setDescription('The DSLAM ADSL ATU-C Alarm indication table.')
pgDSLAMADSLAtucAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: pgDSLAMADSLAtucAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucAlarmEntry.setDescription('Entry in the DSLAM ADSL ATU-C Alarm indication table.')
pgDSLAMADSLAtucLofAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1, 1), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAtucLofAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucLofAlarm.setDescription('ADSL loss of framing alarm on ATU-C ')
pgDSLAMADSLAtucLosAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1, 2), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAtucLosAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucLosAlarm.setDescription('ADSL loss of signal alarm on ATU-C ')
pgDSLAMADSLAtucLprAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1, 3), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAtucLprAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucLprAlarm.setDescription('ADSL loss of power alarm on ATU-C ')
pgDSLAMADSLAtucESAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1, 4), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAtucESAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucESAlarm.setDescription('ADSL Errored Second threshold exceeded alarm on ATU-C ')
pgDSLAMADSLAtucRateChangeAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1, 5), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAtucRateChangeAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucRateChangeAlarm.setDescription('ADSL Rate Changed alarm on ATU-C ')
pgDSLAMADSLAtucInitFailureAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 1, 1, 6), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAtucInitFailureAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAtucInitFailureAlarm.setDescription('ADSL initialization failed alarm on ATU-C ')
pgDSLAMADSLAturAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2), )
if mibBuilder.loadTexts: pgDSLAMADSLAturAlarmTable.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturAlarmTable.setDescription('The DSLAM ADSL ATU-R Alarm indication table.')
pgDSLAMADSLAturAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: pgDSLAMADSLAturAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturAlarmEntry.setDescription('Entry in the DSLAM ADSL ATU-R Alarm indication table.')
pgDSLAMADSLAturLofAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1, 1), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAturLofAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturLofAlarm.setDescription('ADSL loss of framing alarm on ATU-R ')
pgDSLAMADSLAturLosAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1, 2), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAturLosAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturLosAlarm.setDescription('ADSL loss of signal alarm on ATU-R ')
pgDSLAMADSLAturLprAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1, 3), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAturLprAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturLprAlarm.setDescription('ADSL loss of power alarm on ATU-R ')
pgDSLAMADSLAturESAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1, 4), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAturESAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturESAlarm.setDescription('ADSL Errored Second threshold exceeded alarm on ATU-R ')
pgDSLAMADSLAturRateChangeAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1, 5), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAturRateChangeAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturRateChangeAlarm.setDescription('ADSL Rate Changed alarm on ATU-R ')
pgDSLAMADSLAturInitFailureAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 2, 1, 6), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMADSLAturInitFailureAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMADSLAturInitFailureAlarm.setDescription('ADSL initialization failed alarm on ATU-R ')
pgDSLAMChassisAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3), )
if mibBuilder.loadTexts: pgDSLAMChassisAlarmTable.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMChassisAlarmTable.setDescription('The DSLAM Chassis Alarm indication table.')
pgDSLAMChassisAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: pgDSLAMChassisAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMChassisAlarmEntry.setDescription('Entry in the DSLAM Chassis Alarm indication table.')
pgDSLAMPSFailureAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1, 1), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMPSFailureAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMPSFailureAlarm.setDescription('chassis power supply failure alarm ')
pgDSLAMFanFailureAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1, 2), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMFanFailureAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMFanFailureAlarm.setDescription('chassis fan failure alarm ')
pgDSLAMConfigChangeAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1, 3), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMConfigChangeAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMConfigChangeAlarm.setDescription('chassis config changed alarm ')
pgDSLAMTempExceedThreshAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1, 4), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMTempExceedThreshAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMTempExceedThreshAlarm.setDescription('chassis temperature exceeded threshold ')
pgDSLAMLineCardDownAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1, 5), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMLineCardDownAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMLineCardDownAlarm.setDescription('the line card in the chassis is down ')
pgDSLAMCellBusDownAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 927, 1, 9, 4, 3, 1, 6), PgDSLAMAlarmStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pgDSLAMCellBusDownAlarm.setStatus('current')
if mibBuilder.loadTexts: pgDSLAMCellBusDownAlarm.setDescription('the cell bus in the chassis is down ')
mibBuilder.exportSymbols("PAIRGAIN-DSLAM-ALARM-SEVERITY-MIB", pgDSLAMADSLAtucLprAlarm=pgDSLAMADSLAtucLprAlarm, pgDSLAMHDSLSpanAlarmThresholdConfProfileEntry=pgDSLAMHDSLSpanAlarmThresholdConfProfileEntry, pgDSLAMHDSLSpanAlarmThresholdConfProfileIndexNext=pgDSLAMHDSLSpanAlarmThresholdConfProfileIndexNext, pgDSLAMADSLAtucAlarmTable=pgDSLAMADSLAtucAlarmTable, pgDSLAMChassisFanAlarmSeverity=pgDSLAMChassisFanAlarmSeverity, pgDSLAMADSLAturAlarmEntry=pgDSLAMADSLAturAlarmEntry, PgDSLAMAlarmSeverity=PgDSLAMAlarmSeverity, PgDSLAMAlarmStatus=PgDSLAMAlarmStatus, pgDSLAMHDSLSpanAlarmSeverityConfProfileIndexNext=pgDSLAMHDSLSpanAlarmSeverityConfProfileIndexNext, pgDSLAMHDSLSpanAlarmSeverityConfProfileTable=pgDSLAMHDSLSpanAlarmSeverityConfProfileTable, pgDSLAMADSLAtucAlarmEntry=pgDSLAMADSLAtucAlarmEntry, pgDSLAMHDSLSpanESAlarmSetting=pgDSLAMHDSLSpanESAlarmSetting, pgDSLAMHDSLSpanAlarmSeverityConfProfileEntry=pgDSLAMHDSLSpanAlarmSeverityConfProfileEntry, pgDSLAMADSLAturLofAlarm=pgDSLAMADSLAturLofAlarm, pgDSLAMADSLAturLprAlarm=pgDSLAMADSLAturLprAlarm, pgDSLAMADSLAtucESAlarm=pgDSLAMADSLAtucESAlarm, pgDSLAMADSLAtucRateChangeAlarm=pgDSLAMADSLAtucRateChangeAlarm, pgDSLAMConfigChangeAlarm=pgDSLAMConfigChangeAlarm, pgDSLAMFanFailureAlarm=pgDSLAMFanFailureAlarm, pgDSLAMHDSLSpanAlarmThresholdConfProfileTable=pgDSLAMHDSLSpanAlarmThresholdConfProfileTable, pgDSLAMHDSLSpanAlarmSeverityConfProfileRowStatus=pgDSLAMHDSLSpanAlarmSeverityConfProfileRowStatus, pgdsalsvMIB=pgdsalsvMIB, pgDSLAMChassisAlarmSeverity=pgDSLAMChassisAlarmSeverity, pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex=pgDSLAMHDSLSpanAlarmThresholdConfProfileIndex, pgDSLAMHDSLSpanUASAlarmSetting=pgDSLAMHDSLSpanUASAlarmSetting, pgDSLAMPSFailureAlarm=pgDSLAMPSFailureAlarm, pgDSLAMADSLAtucLofAlarm=pgDSLAMADSLAtucLofAlarm, pgDSLAMTempExceedThreshAlarm=pgDSLAMTempExceedThreshAlarm, pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex=pgDSLAMHDSLSpanAlarmSeverityConfProfileIndex, pgDSLAMHDSLSpanLOSWAlarmSetting=pgDSLAMHDSLSpanLOSWAlarmSetting, pgDSLAMHDSLSpanMarginThreshold=pgDSLAMHDSLSpanMarginThreshold, pgDSLAMADSLAtucInitFailureAlarm=pgDSLAMADSLAtucInitFailureAlarm, pgDSLAMChassisAlarmTable=pgDSLAMChassisAlarmTable, pgDSLAMHDSLSpanESThreshold=pgDSLAMHDSLSpanESThreshold, pgDSLAMADSLAtucLosAlarm=pgDSLAMADSLAtucLosAlarm, pgDSLAMADSLAturRateChangeAlarm=pgDSLAMADSLAturRateChangeAlarm, pgDSLAMChassisAlarmEntry=pgDSLAMChassisAlarmEntry, pgDSLAMLineCardDownAlarm=pgDSLAMLineCardDownAlarm, pgDSLAMHDSLSpanMarginAlarmSetting=pgDSLAMHDSLSpanMarginAlarmSetting, pgDSLAMADSLAturInitFailureAlarm=pgDSLAMADSLAturInitFailureAlarm, pgDSLAMADSLAturLosAlarm=pgDSLAMADSLAturLosAlarm, pgDSLAMCellBusDownAlarm=pgDSLAMCellBusDownAlarm, pgDSLAMADSLAturESAlarm=pgDSLAMADSLAturESAlarm, pgDSLAMChassisPsAlarmSeverity=pgDSLAMChassisPsAlarmSeverity, pgDSLAMChassisTempAlarmSeverity=pgDSLAMChassisTempAlarmSeverity, pgDSLAMHDSLSpanAlarmThresholdConfProfileRowStatus=pgDSLAMHDSLSpanAlarmThresholdConfProfileRowStatus, pgDSLAMHDSLSpanUASThreshold=pgDSLAMHDSLSpanUASThreshold, pgDSLAMADSLAturAlarmTable=pgDSLAMADSLAturAlarmTable, pgDSLAMChassisConfigChangeAlarmSeverity=pgDSLAMChassisConfigChangeAlarmSeverity, PYSNMP_MODULE_ID=pgdsalsvMIB)
|
py
|
1a582fa2f0454cc25db00310e67ac3a9b4e1ddb9
|
# ------------------------------
# 561. Array Partition I
#
# Description:
# Given an array of 2n integers, your task is to group these integers into n pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.
# Example 1:
# Input: [1,4,3,2]
# Output: 4
# Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).
#
# Note:
# n is a positive integer, which is in the range of [1, 10000].
# All the integers in the array will be in the range of [-10000, 10000].
#
# Version: 1.0
# 07/16/18 by Jianfa
# ------------------------------
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(sorted(nums)[::2])
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# It actually is a math problem. Sort at first, then pick the smaller one in every continuous pair.
|
py
|
1a58311b8083db713008288fa707376bad72a70f
|
""" Implements a the task queue worker and routing. This is just
a template and not the actual script which is run. Actual scripts
can be found in /etc/appscale/celery/scripts.
Find and replace the following:
APP_ID: Set this to the current application ID.
CELERY_CONFIGURATION: The name of the celery configuration file.
"""
import datetime
import httplib
import os
import sys
import yaml
def setup_environment():
ENVIRONMENT_FILE = "/etc/appscale/environment.yaml"
FILE = open(ENVIRONMENT_FILE)
env = yaml.load(FILE.read())
APPSCALE_HOME = env["APPSCALE_HOME"]
sys.path.append(APPSCALE_HOME + "/AppTaskQueue")
sys.path.append(APPSCALE_HOME + "/AppServer")
sys.path.append(APPSCALE_HOME + "/lib")
setup_environment()
from celery import Celery
from celery.utils.log import get_task_logger
from urlparse import urlparse
from tq_config import TaskQueueConfig
from tq_lib import TASK_STATES
from distributed_tq import TaskName
import appscale_info
import constants
from google.appengine.runtime import apiproxy_errors
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_distributed
from google.appengine.api import datastore
from google.appengine.ext import db
sys.path.append(TaskQueueConfig.CELERY_CONFIG_DIR)
sys.path.append(TaskQueueConfig.CELERY_WORKER_DIR)
app_id = 'APP_ID'
config = TaskQueueConfig(TaskQueueConfig.RABBITMQ, app_id)
module_name = TaskQueueConfig.get_celery_worker_module_name(app_id)
celery = Celery(module_name, broker=config.get_broker_string(),
backend='amqp://')
celery.config_from_object('CELERY_CONFIGURATION')
logger = get_task_logger(__name__)
master_db_ip = appscale_info.get_db_master_ip()
connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
ds_distrib = datastore_distributed.DatastoreDistributed(
"appscaledashboard", connection_str, require_indexes=False)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_distrib)
os.environ['APPLICATION_ID'] = "appscaledashboard"
# This template header and tasks can be found in appscale/AppTaskQueue/templates
|
py
|
1a5832496f9280911728af6d975dbbc41ba24278
|
import os.path
import numpy as np
import itertools
import Tools
import statsmodels.tsa.stattools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
def cartesian(*somelists):
r=[]
for element in itertools.product(*somelists):
r.append(element)
return(r)
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[result.size//2:]
def writeTests(config,format):
config.setOverwrite(False)
NBSAMPLES=128
inputsA=np.random.randn(NBSAMPLES)
inputsB=np.random.randn(NBSAMPLES)
inputsA = Tools.normalize(inputsA)
inputsB = Tools.normalize(inputsB)
if format==31:
# To avoid overflow. There is no saturation in CMSIS code for Q31 conv/corr
inputsA = inputsA / 16
inputsB = inputsB / 16
config.writeInput(1, inputsA,"InputsA")
config.writeInput(1, inputsB,"InputsB")
a = [1,2,3,Tools.loopnb(format,Tools.TAILONLY),
Tools.loopnb(format,Tools.BODYONLY),
Tools.loopnb(format,Tools.BODYANDTAIL)
]
a = list(np.unique(np.array(a)))
if format == 15:
nbs = [(14, 15), (14, 16), (14, 17), (14, 18), (14, 33), (15, 15),
(15, 16), (15, 17), (15, 18), (15, 33), (16, 15), (16, 16),
(16, 17), (16, 18), (16, 33), (17, 15), (17, 16), (17, 17),
(17, 18), (17, 33), (32, 15), (32, 16), (32, 17), (32, 18), (32, 33)]
elif format == 7 :
nbs = [(30, 31), (30, 32), (30, 33), (30, 34), (30, 49), (31, 31),
(31,32), (31, 33), (31, 34), (31, 49), (32, 31), (32, 32),
(32, 33), (32,34), (32, 49), (33, 31), (33, 32), (33, 33), (33, 34),
(33, 49), (48,31), (48, 32), (48, 33), (48, 34), (48, 49)]
else:
nbs = [(4, 1), (4, 2), (4, 3), (4, 8), (4, 11), (5, 1), (5, 2), (5, 3), (5, 8), (5, 11), (6, 1), (6, 2), (6, 3), (6, 8), (6, 11), (9, 1), (9, 2),
(9, 3), (9, 8), (9, 11), (10, 1), (10, 2), (10, 3), (10, 8), (10, 11), (11, 1), (11, 2), (11, 3), (11, 8), (11, 11), (12, 1), (12, 2),
(12, 3), (12, 8), (12, 11), (13, 1), (13, 2), (13, 3), (13, 8), (13, 11)]
nbTest = 1
for (na,nb) in nbs:
#print(na,nb)
ref = np.correlate(inputsA[0:na],inputsB[0:nb],"full")
if na > nb:
padding = na - nb
z = np.zeros(padding)
ref = np.concatenate((z,ref))
else:
padding = nb - na
z = np.zeros(padding)
ref = np.concatenate((ref,z))
config.writeReference(nbTest, ref)
nbTest = nbTest + 1
for (na,nb) in nbs:
#print(na,nb)
ref = np.convolve(inputsA[0:na],inputsB[0:nb],"full")
config.writeReference(nbTest, ref)
nbTest = nbTest + 1
# Levinson durbin tests
config.setOverwrite(True)
a = [Tools.loopnb(format,Tools.TAILONLY),
Tools.loopnb(format,Tools.BODYONLY),
Tools.loopnb(format,Tools.BODYANDTAIL),
]
a = list(np.unique(np.array(a)))
#a = [3]
# Errors of each levinson durbin test
err=[]
errTestID = nbTest
for na in a:
s = np.random.randn(na+1)
s = Tools.normalize(s)
phi = autocorr(s)
phi = Tools.normalize(phi)
config.writeInput(nbTest, phi,"InputPhi")
sigmav,arcoef,pacf,sigma,phi=statsmodels.tsa.stattools.levinson_durbin(phi,nlags=na,isacov=True)
err.append(sigmav)
config.writeReference(nbTest, arcoef)
nbTest = nbTest + 1
config.writeReference(errTestID, err,"LDErrors")
def generatePatterns():
PATTERNDIR = os.path.join("Patterns","DSP","Filtering","MISC","MISC")
PARAMDIR = os.path.join("Parameters","DSP","Filtering","MISC","MISC")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
configq7=Tools.Config(PATTERNDIR,PARAMDIR,"q7")
writeTests(configf32,0)
writeTests(configf16,16)
writeTests(configq31,31)
writeTests(configq15,15)
writeTests(configq7,7)
if __name__ == '__main__':
generatePatterns()
|
py
|
1a5833cb9b16f8e5732ac5f8d9e2b67dc28286ff
|
#!/usr/bin/env python3
"""
Input: collaboration bipartite graph X-Y and weights on X.
Output: X'= Downsample set of nodes of X (from bipartite graph X-Y) such that each node connects to at most 10 nodes in Y
(eg the paper has at most 10 authors) and its weights are at least 5 (eg the number of citation is at least 5).
To ensure that the resulting bipartite graph X'-Y' is connected we downsampled X (with the above restrictions) by performing random walks on the X-graph.
(eg performing random walks on the papers graph -restricted to papers that have at least 5 citations and at most 10 authors-
where two papers are connected if they have at least one author in common)
"""
import numpy as np
from scipy import sparse
import pandas as pd
import networkx as nx
from networkx.algorithms import bipartite as nxb
import scipy
from scipy import sparse
from scipy.sparse import coo_matrix
from random import shuffle
import time
def starting_node_random_walk(bipartite,weights_x, min_weight=100, max_dim=10 ):
"""
Sample random node in X (from bipartite graph X-Y) with the restriction that it does not connect to more
than "max_dim" nodes in Y and that its weight is more than "min_weight"
Parameters
----------
bipartite : scipy sparse matrix
bipartite collaboration graph X-Y
weights_x : ndarray
Array of size bipartite.shape[0], containing the weights on the node of X
min_weight : float
minimum weight of the sampled node
max_dim : int
maximum number of adjacent nodes in Y
Returns
-------
start : starting node of the random walk
"""
Al=bipartite.tolil()
rows=Al.rows
seeds_papers=[]
for j, el in enumerate(rows[np.where(weights_x>100)]):
if len(el)<max_dim:
#print('Paper {} has {} authors and {} citations'.format(np.where(weights_x>100)[0][j],len(el),weights_x[np.where(weights_x>100)][j]))
seeds_papers.append(np.where(weights_x>100)[0][j])
copy_seed=np.copy(seeds_papers)
shuffle(copy_seed)
start=copy_seed[0]
return int(start)
def subsample_node_x(adjaceny_graph_x,bipartite,weights_x, min_weight=5, max_dim=10,length_walk=80):
""""
Downsample set of nodes X' of X (from bipartite graph X-Y) such that each node connects to at most 10 nodes in Y
(eg the paper has at most 10 authors) and its weights are at least 5 (eg the number of citation is at least 5).
To ensure that the resulting bipartite graph X'-Y' is connected we downsampled X (with the above restrictions) by performing random walks on the X-graph.
(eg performing random walks on the papers graph -restricted to papers that have at least 5 citations and at most 10 authors-
where two papers are connected if they have at least one author in common)
Parameters
----------
adjaceny_graph_x : scipy sparse matrix
adjacency matrix of X (from the bipartite graph X-Y)
bipartite : scipy sparse matrix
bipartite collaboration graph X-Y
weights_x : ndarray
Array of size bipartite.shape[0], containing the weights on the node of X
min_weight : float
minimum weight of the sampled node, default 5
max_dim : int
maximum number of adjacent nodes in Y, default 1-
length_walk : int
length of random walk with the above restrictions
Returns
-------
p: array of the downsampled nodes in X = X'
"""
start= starting_node_random_walk(bipartite,weights_x, min_weight=min_weight, max_dim=max_dim )
Al=bipartite.tolil()
rows=Al.rows
G = nx.from_scipy_sparse_matrix(adjaceny_graph_x)
new_start=start
H=nx.algorithms.traversal.breadth_first_search.bfs_edges(G, new_start, reverse=False, depth_limit=1)
e=list(H)
B=nx.Graph()
B.add_edges_from(e)
nodes=np.array(B.nodes())
down_cit=weights_x[nodes]
p=nodes[np.where(down_cit>=min_weight)]
list_seeds=[new_start]
for iterations in range(0,length_walk):
seed_papers=[]
for j, el in enumerate(rows[nodes]):
if len(el)<max_dim and weights_x[nodes[j]]>=min_weight:
seed_papers.append(nodes[j])
c=list(set(seed_papers).difference(list_seeds))
if len(c)<=1:
break
new_start=c[np.argsort(weights_x[c])[-2]]
H1=nx.algorithms.traversal.breadth_first_search.bfs_edges(G, new_start, reverse=False, depth_limit=1)
e1=list(H1)
B=nx.Graph()
B.add_edges_from(e1)
nodes=np.array(B.nodes())
down_cit=weights_x[nodes]
p1=nodes[np.where(down_cit>=min_weight)]
final=np.concatenate((p,p1))
p=np.unique(final)
list_seeds.append(new_start)
return p
if __name__ == '__main__':
start = time.time()
def timeit(name):
print('wall time ({}): {:.0f}s'.format(name, time.time() - start))
adjacency_papers = sparse.load_npz('s2_2_bipartite_graph/papers_adjacency.npz')
adjacency = scipy.sparse.load_npz('s2_2_bipartite_graph/paper_author_biadjacency.npz')
papers = pd.read_csv('s2_2_bipartite_graph/papers.csv', index_col=0)
citations=np.array(papers['citations_2019'])
starting_node=starting_node_random_walk(adjacency,weights_x=citations, min_weight=100, max_dim=10 )
print("The starting node of the random walk has ID {}".format(starting_node))
downsample= subsample_node_x(adjacency_papers,adjacency,weights_x=citations, min_weight=5, max_dim=10,length_walk=80)
timeit('process')
np.save(f's2_3_collaboration_complex/{starting_node}_downsampled.npy', downsample)
timeit('total')
|
py
|
1a5833da4172d446ad26ffbbbd80cbb9174da674
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_host_dump_transfer_details import UpdateHostDumpTransferDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateCurlTransferDetails(UpdateHostDumpTransferDetails):
"""
Optional properties for Curl-based dump transfer in source or target host.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateCurlTransferDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.database_migration.models.UpdateCurlTransferDetails.kind` attribute
of this class is ``CURL`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param kind:
The value to assign to the kind property of this UpdateCurlTransferDetails.
Allowed values for this property are: "CURL", "OCI_CLI"
:type kind: str
"""
self.swagger_types = {
'kind': 'str'
}
self.attribute_map = {
'kind': 'kind'
}
self._kind = None
self._kind = 'CURL'
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py
|
1a5833e1c6e8d3d305928a6849a7131b49ee5f2d
|
from compiler import *
####################################################################################################################
# Each faction record contains the following fields:
# 1) Faction id: used for referencing factions in other files.
# The prefix fac_ is automatically added before each faction id.
# 2) Faction name.
# 3) Faction flags. See header_factions.py for a list of available flags
# 4) Faction coherence. Relation between members of this faction.
# 5) Relations. This is a list of relation records.
# Each relation record is a tuple that contains the following fields:
# 5.1) Faction. Which other faction this relation is referring to
# 5.2) Value: Relation value between the two factions.
# Values range between -1 and 1.
# 6) Ranks
# 7) Faction color (default is gray)
####################################################################################################################
default_kingdom_relations = [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.05),("mountain_bandits", -0.02),("forest_bandits", -0.02)]
factions = [
("no_faction","No Faction",0, 0.9, [], []),
("commoners","Commoners",0, 0.1,[("player_faction",0.1)], []),
("outlaws","Outlaws", max_player_rating(-30), 0.5,[("commoners",-0.6),("player_faction",-0.15)], [], 0x888888),
# Factions before this point are hardwired into the game end their order should not be changed.
("neutral","Neutral",0, 0.1,[("player_faction",0.0)], [],0xFFFFFF),
("innocents","Innocents", ff_always_hide_label, 0.5,[("outlaws",-0.05)], []),
("merchants","Merchants", ff_always_hide_label, 0.5,[("outlaws",-0.5),], []),
("dark_knights","{!}Dark Knights", 0, 0.5,[("innocents",-0.9),("player_faction",-0.4)], []),
("culture_1", "{!}culture_1", 0, 0.9, [], []),
("culture_2", "{!}culture_2", 0, 0.9, [], []),
("culture_3", "{!}culture_3", 0, 0.9, [], []),
("culture_4", "{!}culture_4", 0, 0.9, [], []),
("culture_5", "{!}culture_5", 0, 0.9, [], []),
("culture_6", "{!}culture_6", 0, 0.9, [], []),
# ("swadian_caravans","Swadian Caravans", 0, 0.5,[("outlaws",-0.8), ("dark_knights",-0.2)], []),
# ("vaegir_caravans","Vaegir Caravans", 0, 0.5,[("outlaws",-0.8), ("dark_knights",-0.2)], []),
("player_faction","Player Faction",0, 0.9, [], []),
("player_supporters_faction","Player's Supporters",0, 0.9, [("player_faction",1.00),("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xFF4433), #changed name so that can tell difference if shows up on map
("kingdom_1", "Kingdom of Swadia", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xEE7744),
("kingdom_2", "Kingdom of Vaegirs", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCCBB99),
("kingdom_3", "Khergit Khanate", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC99FF),
("kingdom_4", "Kingdom of Nords", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0x33DDDD),
("kingdom_5", "Kingdom of Rhodoks", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0x33DD33),
("kingdom_6", "Sarranid Sultanate", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xDDDD33),
## ("kingdom_1_rebels", "Swadian rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_2_rebels", "Vaegir rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_3_rebels", "Khergit rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_4_rebels", "Nord rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
## ("kingdom_5_rebels", "Rhodok rebels", 0, 0.9, [("outlaws",-0.05),("peasant_rebels", -0.1),("deserters", -0.02),("mountain_bandits", -0.05),("forest_bandits", -0.05)], [], 0xCC2211),
("kingdoms_end","{!}kingdoms_end", 0, 0,[], []),
("robber_knights", "{!}robber_knights", 0, 0.1, [], []),
("khergits","{!}Khergits", 0, 0.5,[("player_faction",0.0)], []),
("black_khergits","{!}Black Khergits", 0, 0.5,[("player_faction",-0.3),("kingdom_1",-0.02),("kingdom_2",-0.02)], []),
## ("rebel_peasants","Rebel Peasants", 0, 0.5,[("vaegirs",-0.5),("player_faction",0.0)], []),
("manhunters","Manhunters", 0, 0.5,[("outlaws",-0.6),("player_faction",0.1)], []),
("deserters","Deserters", 0, 0.5,[("manhunters",-0.6),("merchants",-0.5),("player_faction",-0.1)], [], 0x888888),
("mountain_bandits","Mountain Bandits", 0, 0.5,[("commoners",-0.2),("merchants",-0.5),("manhunters",-0.6),("player_faction",-0.15)], [], 0x888888),
("forest_bandits","Forest Bandits", 0, 0.5,[("commoners",-0.2),("merchants",-0.5),("manhunters",-0.6),("player_faction",-0.15)], [], 0x888888),
("undeads","{!}Undeads", max_player_rating(-30), 0.5,[("commoners",-0.7),("player_faction",-0.5)], []),
("slavers","{!}Slavers", 0, 0.1, [], []),
("peasant_rebels","{!}Peasant Rebels", 0, 1.0,[("noble_refugees",-1.0),("player_faction",-0.4)], []),
("noble_refugees","{!}Noble Refugees", 0, 0.5,[], []),
]
|
py
|
1a5834f18be0924d66dab064ba5e2e22ea5ae2f1
|
import os
import shutil
import sys
import argparse
ext_music = [".mp3", ".flac", ".aac", ".wav", ".wma", ".ape", ".alac", ".m4a", ".m4b", ".m4p", ".ogg", ".aiff", ".aif"]
ext_artwork = [".jpg", ".png", ".bmp", ".gif", ".jpeg"]
ext_extras = [".m3u", ".m3u8", ".wpl", ".pls", ".asx", ".smi", ".sami", ".xspf", ".txt", ".cue", ".log"]
ext_both = ext_music + ext_artwork
lists = [ext_music, ext_artwork, ext_both]
def file_copy(destination_location, path, file, source_location):
# manual path joining is used because os.path.join returns the second argument when two absolute paths are joined
# e.g. /foo and /bar are joined as /bar instead of /foo/bar
# whereas this method creates /foo//bar which is then normalized by normpath to /foo/bar
target_path = os.path.normpath(destination_location + os.path.sep + path[len(source_location):])
target_file = os.path.join(target_path, file)
if not os.path.exists(target_path):
try:
os.makedirs(target_path)
except OSError:
print("Unable to create the appropriate folder for", file)
return 3
if not os.path.isfile(target_file):
try:
shutil.copy2(os.path.join(path, file), target_path)
return 1 # Returned when the file was not found and the copy was successful.
except shutil.Error:
print("Copy failed.")
return 2 # Returned when the file was not found, but the copy failed.
return 0 # Returned when the file already exists in destination_location.
def create_log(log_file, source_location, destination_location, operation, extras, sync):
operation_list = ["Music only.", "Artwork only.", "Music and Artwork."]
if not os.path.exists("Logs"):
try:
os.makedirs("Logs")
except OSError:
print("ERROR: Unable to create log folder.")
return
try:
with open(os.path.join("Logs", log_file), "w+", encoding="UTF-8") as log:
log.write("Source Location: " + source_location + "\n")
log.write("Destination Location: " + destination_location + "\n")
log.write("Chosen Operation: " + operation_list[operation] + "\n")
log.write("Extra files: " + str(extras) + "\n")
log.write("Folder-Sync: " + ("True" if sync == 2 else "False") + "\n\n")
except OSError:
print("ERROR: Unable to create log-folder or logfile.")
return
def update_log(file, backup_result, log_file):
with open(os.path.join("Logs", log_file), 'a', encoding="UTF-8") as log:
if backup_result == 1:
log.write(file + "\n")
elif backup_result == 2:
log.write("Unable to copy " + file + " because an error occurred.\n")
elif backup_result == 3:
log.write("Unable to create the appropriate folder for " + file + "\n")
def scan_and_backup(source_location, destination_location, operation, extras, sync, log, log_file=None):
if log:
if sync == 1:
with open(os.path.join("Logs", log_file), 'a', encoding="UTF-8") as log:
log.write("\nReverse way\n\n")
else:
from datetime import datetime
current_date = str(datetime.now())[:19]
log_file = "AutoBackup_" + current_date[:10] + '_' + current_date[11:] + ".log"
create_log(log_file, source_location, destination_location, operation, extras, sync)
for path, _, files in os.walk(source_location):
for file in files:
name, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext in lists[int(operation)] or extras and ext in ext_extras:
if ext not in ext_artwork or name[:8] != "AlbumArt" and name != "Thumbnail" and name != "Folder":
backup_result = file_copy(destination_location, path, file, source_location)
if log and backup_result > 0:
update_log(file, backup_result, log_file)
if sync == 2:
scan_and_backup(destination_location, source_location, operation, extras, 1, log, log_file)
def argument_validity():
parser = argparse.ArgumentParser()
parser.add_argument("source", help="location containing the files to be backed up")
parser.add_argument("destination", help="location where the files will be stored")
parser.add_argument("action", metavar="action", choices=["m", "music", "a", "artwork", "c", "complete"],
help="m or music: audio files only, a or artwork: image files only, c or complete: both audio"
" and image files")
parser.add_argument("-e", "--extras", action="store_true", dest="extras_flag", help="back-up extra files")
parser.add_argument("-s", "--sync", action="store_const", const=2, default=0, dest="sync_flag",
help="synchronize source and destination folder")
parser.add_argument("-l", "--log", action="store_true", dest="log_flag", help="save a log file")
parser.add_argument("-v", "--version", action="version", version="%(prog)s 1.3.0")
args = parser.parse_args()
source_location = os.path.abspath(args.source)
if not os.path.exists(source_location):
print(source_location, "does not exist.")
sys.exit()
destination_location = os.path.abspath(args.destination)
if args.action == "m" or args.action == "music":
action = 0
elif args.action == "a" or args.action == "artwork":
action = 1
else:
action = 2
return source_location, destination_location, action, args.extras_flag, args.sync_flag, args.log_flag
if __name__ == "__main__":
source, destination, selection, extras_flag, sync_flag, log_flag = argument_validity()
scan_and_backup(source, destination, selection, extras_flag, sync_flag, log_flag)
|
py
|
1a5835257ab212a9efcf3d2dacfac41f7111b19a
|
import asyncio
import copy
import importlib
import math
import os
import pprint
import random
import secrets
import signal
import struct
import time
import uuid
from collections import Counter
from dataclasses import dataclass
from datetime import datetime
from functools import wraps
from importlib.metadata import version as pkg_version
from pathlib import Path
from time import perf_counter_ns as clock_ns
from typing import Awaitable
from typing import Callable
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import TYPE_CHECKING
from typing import TypedDict
from typing import TypeVar
from typing import Union
import cmyui.utils
import psutil
import timeago
from cmyui.osu.oppai_ng import OppaiWrapper
from peace_performance_python.objects import Beatmap as PeaceMap
from peace_performance_python.objects import Calculator as PeaceCalculator
import app.packets
import app.state
import app.utils
import settings
from app.constants import regexes
from app.constants.gamemodes import GameMode
from app.constants.mods import Mods
from app.constants.mods import SPEED_CHANGING_MODS
from app.constants.privileges import ClanPrivileges
from app.constants.privileges import Privileges
from app.objects.beatmap import Beatmap
from app.objects.beatmap import ensure_local_osu_file
from app.objects.beatmap import RankedStatus
from app.objects.clan import Clan
from app.objects.match import MapPool
from app.objects.match import Match
from app.objects.match import MatchTeams
from app.objects.match import MatchTeamTypes
from app.objects.match import MatchWinConditions
from app.objects.match import SlotStatus
from app.objects.player import Player
from app.objects.score import SubmissionStatus
from app.utils import seconds_readable
if TYPE_CHECKING:
from app.objects.channel import Channel
R = TypeVar("R")
BEATMAPS_PATH = Path.cwd() / ".data/osu"
@dataclass
class Context:
player: Player
trigger: str
args: Sequence[str]
recipient: Union["Channel", Player]
Callback = Callable[[Context], Awaitable[Optional[str]]]
class Command(NamedTuple):
triggers: list[str]
callback: Callback
priv: Privileges
hidden: bool
doc: Optional[str]
class CommandSet:
__slots__ = ("trigger", "doc", "commands")
def __init__(self, trigger: str, doc: str) -> None:
self.trigger = trigger
self.doc = doc
self.commands: list[Command] = []
def add(
self,
priv: Privileges,
aliases: list[str] = [],
hidden: bool = False,
) -> Callable[[Callback], Callback]:
def wrapper(f: Callback) -> Callback:
self.commands.append(
Command(
# NOTE: this method assumes that functions without any
# triggers will be named like '{self.trigger}_{trigger}'.
triggers=(
[f.__name__.removeprefix(f"{self.trigger}_").strip()] + aliases
),
callback=f,
priv=priv,
hidden=hidden,
doc=f.__doc__,
),
)
return f
return wrapper
# TODO: refactor help commands into some base ver
# since they're all the same anyways lol.
# not sure if this should be in glob or not,
# trying to think of some use cases lol..
regular_commands = []
command_sets = [
mp_commands := CommandSet("mp", "Multiplayer commands."),
pool_commands := CommandSet("pool", "Mappool commands."),
clan_commands := CommandSet("clan", "Clan commands."),
]
def command(
priv: Privileges,
aliases: list[str] = [],
hidden: bool = False,
) -> Callable[[Callback], Callback]:
def wrapper(f: Callback) -> Callback:
regular_commands.append(
Command(
callback=f,
priv=priv,
hidden=hidden,
triggers=[f.__name__.strip("_")] + aliases,
doc=f.__doc__,
),
)
return f
return wrapper
""" User commands
# The commands below are not considered dangerous,
# and are granted to any unbanned players.
"""
@command(Privileges.NORMAL, aliases=["", "h"], hidden=True)
async def _help(ctx: Context) -> Optional[str]:
"""Show all documented commands the player can access."""
prefix = settings.COMMAND_PREFIX
l = ["Individual commands", "-----------"]
for cmd in regular_commands:
if not cmd.doc or ctx.player.priv & cmd.priv != cmd.priv:
# no doc, or insufficient permissions.
continue
l.append(f"{prefix}{cmd.triggers[0]}: {cmd.doc}")
l.append("") # newline
l.extend(["Command sets", "-----------"])
for cmd_set in command_sets:
l.append(f"{prefix}{cmd_set.trigger}: {cmd_set.doc}")
return "\n".join(l)
@command(Privileges.NORMAL)
async def roll(ctx: Context) -> Optional[str]:
"""Roll an n-sided die where n is the number you write (100 default)."""
if ctx.args and ctx.args[0].isdecimal():
max_roll = min(int(ctx.args[0]), 0x7FFF)
else:
max_roll = 100
if max_roll == 0:
return "Roll what?"
points = random.randrange(0, max_roll)
return f"{ctx.player.name} rolls {points} points!"
@command(Privileges.NORMAL, hidden=True)
async def block(ctx: Context) -> Optional[str]:
"""Block another user from communicating with you."""
target = await app.state.sessions.players.from_cache_or_sql(name=" ".join(ctx.args))
if not target:
return "User not found."
if target is app.state.sessions.bot or target is ctx.player:
return "What?"
if target.id in ctx.player.blocks:
return f"{target.name} already blocked!"
if target.id in ctx.player.friends:
ctx.player.friends.remove(target.id)
await ctx.player.add_block(target)
return f"Added {target.name} to blocked users."
@command(Privileges.NORMAL, hidden=True)
async def unblock(ctx: Context) -> Optional[str]:
"""Unblock another user from communicating with you."""
target = await app.state.sessions.players.from_cache_or_sql(name=" ".join(ctx.args))
if not target:
return "User not found."
if target is app.state.sessions.bot or target is ctx.player:
return "What?"
if target.id not in ctx.player.blocks:
return f"{target.name} not blocked!"
await ctx.player.remove_block(target)
return f"Removed {target.name} from blocked users."
@command(Privileges.NORMAL)
async def reconnect(ctx: Context) -> Optional[str]:
"""Disconnect and reconnect a given player (or self) to the server."""
if ctx.args:
# !reconnect <player>
if not ctx.player.priv & Privileges.ADMINISTRATOR:
return # requires admin
target = app.state.sessions.players.get(name=" ".join(ctx.args))
if not target:
return "Player not found"
else:
# !reconnect
target = ctx.player
target.logout()
@command(Privileges.DONATOR)
async def changename(ctx: Context) -> Optional[str]:
"""Change your username."""
name = " ".join(ctx.args).strip()
if not regexes.USERNAME.match(name):
return "Must be 2-15 characters in length."
if "_" in name and " " in name:
return 'May contain "_" and " ", but not both.'
if name in settings.DISALLOWED_NAMES:
return "Disallowed username; pick another."
if await app.state.services.database.fetch_one(
"SELECT 1 FROM users WHERE name = :name",
{"name": name},
):
return "Username already taken by another player."
# all checks passed, update their name
safe_name = name.lower().replace(" ", "_")
await app.state.services.database.execute(
"UPDATE users SET name = :name, safe_name = :safe_name WHERE id = :user_id",
{"name": name, "safe_name": safe_name, "user_id": ctx.player.id},
)
ctx.player.enqueue(
app.packets.notification(f"Your username has been changed to {name}!"),
)
ctx.player.logout()
@command(Privileges.NORMAL, aliases=["bloodcat", "beatconnect", "chimu", "q"])
async def maplink(ctx: Context) -> Optional[str]:
"""Return a download link to the user's current map (situation dependant)."""
bmap = None
# priority: multiplayer -> spectator -> last np
match = ctx.player.match
spectating = ctx.player.spectating
if match and match.map_id:
bmap = await Beatmap.from_md5(match.map_md5)
elif spectating and spectating.status.map_id:
bmap = await Beatmap.from_md5(spectating.status.map_md5)
elif time.time() < ctx.player.last_np["timeout"]:
bmap = ctx.player.last_np["bmap"]
if bmap is None:
return "No map found!"
# gatari.pw & nerina.pw are pretty much the only
# reliable mirrors i know of? perhaps beatconnect
return f"[https://osu.gatari.pw/d/{bmap.set_id} {bmap.full}]"
@command(Privileges.NORMAL, aliases=["last", "r"])
async def recent(ctx: Context) -> Optional[str]:
"""Show information about a player's most recent score."""
if ctx.args:
if not (target := app.state.sessions.players.get(name=" ".join(ctx.args))):
return "Player not found."
else:
target = ctx.player
if not (s := target.recent_score):
return "No scores found :o (only saves per play session)"
l = [f"[{s.mode!r}] {s.bmap.embed}", f"{s.acc:.2f}%"]
if s.mods:
l.insert(1, f"+{s.mods!r}")
l = [" ".join(l)]
if s.passed:
rank = s.rank if s.status == SubmissionStatus.BEST else "NA"
l.append(f"PASS {{{s.pp:.2f}pp #{rank}}}")
else:
# XXX: prior to v3.2.0, gulag didn't parse total_length from
# the osu!api, and thus this can do some zerodivision moments.
# this can probably be removed in the future, or better yet
# replaced with a better system to fix the maps.
if s.bmap.total_length != 0:
completion = s.time_elapsed / (s.bmap.total_length * 1000)
l.append(f"FAIL {{{completion * 100:.2f}% complete}})")
else:
l.append("FAIL")
return " | ".join(l)
GAMEMODE_STRINGS = (
"osu!vn",
"taiko!vn",
"catch!vn",
"mania!vn",
"osu!rx",
"taiko!rx",
"catch!rx",
"osu!ap",
)
TOP_SCORE_FMTSTR = (
"{idx}. ({pp:.2f}pp) [https://osu.{domain}/beatmaps/{bmapid} "
"{artist} - {title} [{version}]]"
)
@command(Privileges.NORMAL, hidden=True)
async def top(ctx: Context) -> Optional[str]:
"""Show information about a player's top 10 scores."""
# !top <mode> (player)
if (args_len := len(ctx.args)) not in (1, 2):
return "Invalid syntax: !top <mode> (player)"
if ctx.args[0] not in GAMEMODE_STRINGS:
return f'Valid gamemodes: {", ".join(GAMEMODE_STRINGS)}.'
if args_len == 2:
if not regexes.USERNAME.match(ctx.args[1]):
return "Invalid username."
# specific player provided
if not (
p := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[1])
):
return "Player not found."
else:
# no player provided, use self
p = ctx.player
mode_str, _, special_mode_str = ctx.args[0].partition("!")
mode = ["osu", "taiko", "catch", "mania"].index(mode_str)
table = f"scores_{special_mode_str}"
scores = await app.state.services.database.fetch_all(
"SELECT s.pp, b.artist, b.title, b.version, b.id AS bmapid "
f"FROM {table} s "
"LEFT JOIN maps b ON b.md5 = s.map_md5 "
"WHERE s.userid = :user_id "
"AND s.mode = :mode "
"AND s.status = 2 "
"AND b.status in (2, 3) "
"ORDER BY s.pp DESC LIMIT 10",
{"user_id": p.id, "mode": mode},
)
if not scores:
return "No scores"
return "\n".join(
[f"Top 10 scores for {p.embed} ({ctx.args[0]})."]
+ [
TOP_SCORE_FMTSTR.format(idx=idx + 1, domain=settings.DOMAIN, **s)
for idx, s in enumerate(scores)
],
)
# TODO: !compare (compare to previous !last/!top post's map)
@command(Privileges.NORMAL, aliases=["w"], hidden=True)
async def _with(ctx: Context) -> Optional[str]:
"""Specify custom accuracy & mod combinations with `/np`."""
if ctx.recipient is not app.state.sessions.bot:
return "This command can only be used in DM with bot."
if time.time() >= ctx.player.last_np["timeout"]:
return "Please /np a map first!"
bmap: Beatmap = ctx.player.last_np["bmap"]
osu_file_path = BEATMAPS_PATH / f"{bmap.id}.osu"
if not await ensure_local_osu_file(osu_file_path, bmap.id, bmap.md5):
return "Mapfile could not be found; this incident has been reported."
mode_vn = ctx.player.last_np["mode_vn"]
if mode_vn in (0, 1, 2): # osu, taiko, catch
if not ctx.args or len(ctx.args) > 4:
return "Invalid syntax: !with <acc/nmiss/combo/mods ...>"
# !with 95% 1m 429x hddt
acc = mods = combo = nmiss = None
# parse acc, misses, combo and mods from arguments.
# tried to balance complexity vs correctness here
for arg in map(str.lower, ctx.args):
# mandatory suffix, combo & nmiss
if combo is None and arg.endswith("x") and arg[:-1].isdecimal():
combo = int(arg[:-1])
if combo > bmap.max_combo:
return "Invalid combo."
elif nmiss is None and arg.endswith("m") and arg[:-1].isdecimal():
nmiss = int(arg[:-1])
# TODO: store nobjects?
if nmiss > bmap.max_combo:
return "Invalid misscount."
else:
# optional prefix/suffix, mods & accuracy
arg_stripped = arg.removeprefix("+").removesuffix("%")
if (
mods is None
and arg_stripped.isalpha()
and len(arg_stripped) % 2 == 0
):
mods = Mods.from_modstr(arg_stripped)
mods = mods.filter_invalid_combos(mode_vn)
elif acc is None and arg_stripped.replace(".", "", 1).isdecimal():
acc = float(arg_stripped)
if not 0 <= acc <= 100:
return "Invalid accuracy."
else:
return f"Unknown argument: {arg}"
msg = []
if mode_vn == 0:
with OppaiWrapper("oppai-ng/liboppai.so") as ezpp:
if mods is not None:
ezpp.set_mods(int(mods))
msg.append(f"{mods!r}")
if nmiss is not None:
ezpp.set_nmiss(nmiss)
msg.append(f"{nmiss}m")
if combo is not None:
ezpp.set_combo(combo)
msg.append(f"{combo}x")
if acc is not None:
ezpp.set_accuracy_percent(acc)
msg.append(f"{acc:.2f}%")
ezpp.calculate(osu_file_path)
pp, sr = ezpp.get_pp(), ezpp.get_sr()
return f"{' '.join(msg)}: {pp:.2f}pp ({sr:.2f}*)"
else:
beatmap = PeaceMap(osu_file_path)
peace = PeaceCalculator()
if mods is not None:
peace.set_mods(int(mods))
msg.append(f"{mods!r}")
if nmiss is not None:
peace.set_miss(nmiss)
msg.append(f"{nmiss}m")
if combo is not None:
peace.set_combo(combo)
msg.append(f"{combo}x")
if acc is not None:
peace.set_acc(acc)
msg.append(f"{acc:.2f}%")
if mode_vn:
peace.set_mode(mode_vn)
calculated = peace.calculate(beatmap)
if math.isnan(calculated.pp) or math.isinf(calculated.pp):
# TODO: report to logserver
return f"{' '.join(msg)}: 0pp (0*)"
return f"{' '.join(msg)}: {calculated.pp:.2f}pp ({calculated.stars:.2f}*)"
else: # mania
if not ctx.args or len(ctx.args) > 2:
return "Invalid syntax: !with <score/mods ...>"
score = 1000
mods = Mods.NOMOD
for param in (p.strip("+k") for p in ctx.args):
if param.isdecimal(): # acc
if not 0 <= (score := int(param)) <= 1000:
return "Invalid score."
if score <= 500:
return "<=500k score is always 0pp."
elif len(param) % 2 == 0:
mods = Mods.from_modstr(param)
mods = mods.filter_invalid_combos(mode_vn)
else:
return "Invalid syntax: !with <score/mods ...>"
beatmap = PeaceMap(osu_file_path)
peace = PeaceCalculator()
if mods != Mods.NOMOD:
peace.set_mods(int(mods))
if mode_vn:
peace.set_mode(mode_vn)
peace.set_score(score * 1000)
calc = peace.calculate(beatmap)
return f"{score}k {mods!r}: {calc.pp:.2f}pp ({calc.stars:.2f}*)"
@command(Privileges.NORMAL, aliases=["req"])
async def request(ctx: Context) -> Optional[str]:
"""Request a beatmap for nomination."""
if ctx.args:
return "Invalid syntax: !request"
if time.time() >= ctx.player.last_np["timeout"]:
return "Please /np a map first!"
bmap = ctx.player.last_np["bmap"]
if bmap.status != RankedStatus.Pending:
return "Only pending maps may be requested for status change."
await app.state.services.database.execute(
"INSERT INTO map_requests "
"(map_id, player_id, datetime, active) "
"VALUES (:map_id, :user_id, NOW(), 1)",
{"map_id": bmap.id, "user_id": ctx.player.id},
)
return "Request submitted."
@command(Privileges.NORMAL)
async def get_apikey(ctx: Context) -> Optional[str]:
"""Generate a new api key & assign it to the player."""
if ctx.recipient is not app.state.sessions.bot:
return f"Command only available in DMs with {app.state.sessions.bot.name}."
# remove old token
if ctx.player.api_key:
app.state.sessions.api_keys.pop(ctx.player.api_key)
# generate new token
ctx.player.api_key = str(uuid.uuid4())
await app.state.services.database.execute(
"UPDATE users SET api_key = :api_key WHERE id = :user_id",
{"api_key": ctx.player.api_key, "user_id": ctx.player.id},
)
app.state.sessions.api_keys[ctx.player.api_key] = ctx.player.id
ctx.player.enqueue(
app.packets.notification(
"Type /savelog and click the popup for an easy way to copy this.",
),
)
return f"Your API key is now: {ctx.player.api_key}"
""" Nominator commands
# The commands below allow users to
# manage the server's state of beatmaps.
"""
@command(Privileges.NOMINATOR, aliases=["reqs"], hidden=True)
async def requests(ctx: Context) -> Optional[str]:
"""Check the nomination request queue."""
if ctx.args:
return "Invalid syntax: !requests"
rows = await app.state.services.database.fetch_all(
"SELECT map_id, player_id, datetime FROM map_requests WHERE active = 1",
)
if not rows:
return "The queue is clean! (0 map request(s))"
l = [f"Total requests: {len(rows)}"]
for (map_id, player_id, dt) in rows:
# find player & map for each row, and add to output.
if not (p := await app.state.sessions.players.from_cache_or_sql(id=player_id)):
l.append(f"Failed to find requesting player ({player_id})?")
continue
if not (bmap := await Beatmap.from_bid(map_id)):
l.append(f"Failed to find requested map ({map_id})?")
continue
l.append(f"[{p.embed} @ {dt:%b %d %I:%M%p}] {bmap.embed}.")
return "\n".join(l)
_status_str_to_int_map = {"unrank": 0, "rank": 2, "love": 5}
def status_to_id(s: str) -> int:
return _status_str_to_int_map[s]
@command(Privileges.NOMINATOR)
async def _map(ctx: Context) -> Optional[str]:
"""Changes the ranked status of the most recently /np'ed map."""
if (
len(ctx.args) != 2
or ctx.args[0] not in ("rank", "unrank", "love")
or ctx.args[1] not in ("set", "map")
):
return "Invalid syntax: !map <rank/unrank/love> <map/set>"
if time.time() >= ctx.player.last_np["timeout"]:
return "Please /np a map first!"
bmap = ctx.player.last_np["bmap"]
new_status = RankedStatus(status_to_id(ctx.args[0]))
if bmap.status == new_status:
return f"{bmap.embed} is already {new_status!s}!"
# update sql & cache based on scope
# XXX: not sure if getting md5s from sql
# for updating cache would be faster?
# surely this will not scale as well..
async with app.state.services.database.connection() as db_conn:
if ctx.args[1] == "set":
# update whole set
await db_conn.execute(
"UPDATE maps SET status = :status, frozen = 1 WHERE set_id = :set_id",
{"status": new_status, "set_id": bmap.set_id},
)
# select all map ids for clearing map requests.
map_ids = [
row[0]
for row in await db_conn.fetch_all(
"SELECT id FROM maps WHERE set_id = :set_id",
{"set_id": bmap.set_id},
)
]
for bmap in app.state.cache.beatmapset[bmap.set_id].maps:
bmap.status = new_status
else:
# update only map
await db_conn.execute(
"UPDATE maps SET status = :status, frozen = 1 WHERE id = :map_id",
{"status": new_status, "map_id": bmap.id},
)
map_ids = [bmap.id]
if bmap.md5 in app.state.cache.beatmap:
app.state.cache.beatmap[bmap.md5].status = new_status
# deactivate rank requests for all ids
await db_conn.execute(
"UPDATE map_requests SET active = 0 WHERE map_id IN :map_ids",
{"map_ids": map_ids},
)
return f"{bmap.embed} updated to {new_status!s}."
""" Mod commands
# The commands below are somewhat dangerous,
# and are generally for managing players.
"""
ACTION_STRINGS = {
"restrict": "Restricted for",
"unrestrict": "Unrestricted for",
"silence": "Silenced for",
"unsilence": "Unsilenced for",
"note": "Note added:",
}
@command(Privileges.MODERATOR, hidden=True)
async def notes(ctx: Context) -> Optional[str]:
"""Retrieve the logs of a specified player by name."""
if len(ctx.args) != 2 or not ctx.args[1].isdecimal():
return "Invalid syntax: !notes <name> <days_back>"
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return f'"{ctx.args[0]}" not found.'
days = int(ctx.args[1])
if days > 365:
return "Please contact a developer to fetch >365 day old information."
elif days <= 0:
return "Invalid syntax: !notes <name> <days_back>"
res = await app.state.services.database.fetch_all(
"SELECT `action`, `msg`, `time`, `from` "
"FROM `logs` WHERE `to` = :to "
"AND UNIX_TIMESTAMP(`time`) >= UNIX_TIMESTAMP(NOW()) - :seconds "
"ORDER BY `time` ASC",
{"to": t.id, "seconds": days * 86400},
)
if not res:
return f"No notes found on {t} in the past {days} days."
notes = []
for row in res:
logger = await app.state.sessions.players.from_cache_or_sql(id=row["from"])
if not logger:
continue
action_str = ACTION_STRINGS.get(row["action"], "Unknown action:")
time_str = row["time"]
note = row["msg"]
notes.append(f"[{time_str}] {action_str} {note} by {logger.name}")
return "\n".join(notes)
@command(Privileges.MODERATOR, hidden=True)
async def addnote(ctx: Context) -> Optional[str]:
"""Add a note to a specified player by name."""
if len(ctx.args) < 2:
return "Invalid syntax: !addnote <name> <note ...>"
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return f'"{ctx.args[0]}" not found.'
await app.state.services.database.execute(
"INSERT INTO logs "
"(`from`, `to`, `action`, `msg`, `time`) "
"VALUES (:from, :to, :action, :msg, NOW())",
{
"from": ctx.player.id,
"to": t.id,
"action": "note",
"msg": " ".join(ctx.args[1:]),
},
)
return f"Added note to {t}."
# some shorthands that can be used as
# reasons in many moderative commands.
SHORTHAND_REASONS = {
"aa": "having their appeal accepted",
"cc": "using a modified osu! client",
"3p": "using 3rd party programs",
"rx": "using 3rd party programs (relax)",
"tw": "using 3rd party programs (timewarp)",
"au": "using 3rd party programs (auto play)",
}
DURATION_MULTIPLIERS = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
@command(Privileges.MODERATOR, hidden=True)
async def silence(ctx: Context) -> Optional[str]:
"""Silence a specified player with a specified duration & reason."""
if len(ctx.args) < 3:
return "Invalid syntax: !silence <name> <duration> <reason>"
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return f'"{ctx.args[0]}" not found.'
if t.priv & Privileges.STAFF and not ctx.player.priv & Privileges.DEVELOPER:
return "Only developers can manage staff members."
if not (r_match := regexes.SCALED_DURATION.match(ctx.args[1])):
return "Invalid syntax: !silence <name> <duration> <reason>"
multiplier = DURATION_MULTIPLIERS[r_match["scale"]]
duration = int(r_match["duration"]) * multiplier
reason = " ".join(ctx.args[2:])
if reason in SHORTHAND_REASONS:
reason = SHORTHAND_REASONS[reason]
await t.silence(ctx.player, duration, reason)
return f"{t} was silenced."
@command(Privileges.MODERATOR, hidden=True)
async def unsilence(ctx: Context) -> Optional[str]:
"""Unsilence a specified player."""
if len(ctx.args) != 1:
return "Invalid syntax: !unsilence <name>"
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return f'"{ctx.args[0]}" not found.'
if not t.silenced:
return f"{t} is not silenced."
if t.priv & Privileges.STAFF and not ctx.player.priv & Privileges.DEVELOPER:
return "Only developers can manage staff members."
await t.unsilence(ctx.player)
return f"{t} was unsilenced."
""" Admin commands
# The commands below are relatively dangerous,
# and are generally for managing players.
"""
@command(Privileges.ADMINISTRATOR, aliases=["u"], hidden=True)
async def user(ctx: Context) -> Optional[str]:
"""Return general information about a given user."""
if not ctx.args:
# no username specified, use ctx.player
p = ctx.player
else:
# username given, fetch the player
p = await app.state.sessions.players.from_cache_or_sql(name=" ".join(ctx.args))
if not p:
return "Player not found."
priv_list = [
priv.name for priv in Privileges if p.priv & priv and bin(priv).count("1") == 1
][::-1]
if time.time() < p.last_np["timeout"]:
last_np = p.last_np["bmap"].embed
else:
last_np = None
return "\n".join(
(
f'[{"Bot" if p.bot_client else "Player"}] {p.full_name} ({p.id})',
f"Privileges: {priv_list}",
f"Channels: {[p._name for p in p.channels]}",
f"Logged in: {timeago.format(p.login_time)}",
f"Last server interaction: {timeago.format(p.last_recv_time)}",
f"osu! build: {p.osu_ver} | Tourney: {p.tourney_client}",
f"Silenced: {p.silenced} | Spectating: {p.spectating}",
f"Last /np: {last_np}",
f"Recent score: {p.recent_score}",
f"Match: {p.match}",
f"Spectators: {p.spectators}",
),
)
@command(Privileges.ADMINISTRATOR, hidden=True)
async def restrict(ctx: Context) -> Optional[str]:
"""Restrict a specified player's account, with a reason."""
if len(ctx.args) < 2:
return "Invalid syntax: !restrict <name> <reason>"
# find any user matching (including offline).
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return f'"{ctx.args[0]}" not found.'
if t.priv & Privileges.STAFF and not ctx.player.priv & Privileges.DEVELOPER:
return "Only developers can manage staff members."
if t.restricted:
return f"{t} is already restricted!"
reason = " ".join(ctx.args[1:])
if reason in SHORTHAND_REASONS:
reason = SHORTHAND_REASONS[reason]
await t.restrict(admin=ctx.player, reason=reason)
return f"{t} was restricted."
@command(Privileges.ADMINISTRATOR, hidden=True)
async def unrestrict(ctx: Context) -> Optional[str]:
"""Unrestrict a specified player's account, with a reason."""
if len(ctx.args) < 2:
return "Invalid syntax: !unrestrict <name> <reason>"
# find any user matching (including offline).
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return f'"{ctx.args[0]}" not found.'
if t.priv & Privileges.STAFF and not ctx.player.priv & Privileges.DEVELOPER:
return "Only developers can manage staff members."
if not t.restricted:
return f"{t} is not restricted!"
reason = " ".join(ctx.args[1:])
if reason in SHORTHAND_REASONS:
reason = SHORTHAND_REASONS[reason]
await t.unrestrict(ctx.player, reason)
return f"{t} was unrestricted."
@command(Privileges.ADMINISTRATOR, hidden=True)
async def alert(ctx: Context) -> Optional[str]:
"""Send a notification to all players."""
if len(ctx.args) < 1:
return "Invalid syntax: !alert <msg>"
notif_txt = " ".join(ctx.args)
app.state.sessions.players.enqueue(app.packets.notification(notif_txt))
return "Alert sent."
@command(Privileges.ADMINISTRATOR, aliases=["alertu"], hidden=True)
async def alertuser(ctx: Context) -> Optional[str]:
"""Send a notification to a specified player by name."""
if len(ctx.args) < 2:
return "Invalid syntax: !alertu <name> <msg>"
if not (t := app.state.sessions.players.get(name=ctx.args[0])):
return "Could not find a user by that name."
notif_txt = " ".join(ctx.args[1:])
t.enqueue(app.packets.notification(notif_txt))
return "Alert sent."
# NOTE: this is pretty useless since it doesn't switch anything other
# than the c[e4-6].ppy.sh domains; it exists on bancho as a tournament
# server switch mechanism, perhaps we could leverage this in the future.
@command(Privileges.ADMINISTRATOR, hidden=True)
async def switchserv(ctx: Context) -> Optional[str]:
"""Switch your client's internal endpoints to a specified IP address."""
if len(ctx.args) != 1:
return "Invalid syntax: !switch <endpoint>"
new_bancho_ip = ctx.args[0]
ctx.player.enqueue(app.packets.switch_tournament_server(new_bancho_ip))
return "Have a nice journey.."
@command(Privileges.ADMINISTRATOR, aliases=["restart"])
async def shutdown(ctx: Context) -> Optional[str]:
"""Gracefully shutdown the server."""
if ctx.trigger == "restart":
_signal = signal.SIGUSR1
else:
_signal = signal.SIGTERM
if ctx.args: # shutdown after a delay
if not (r_match := regexes.SCALED_DURATION.match(ctx.args[0])):
return f"Invalid syntax: !{ctx.trigger} <delay> <msg ...>"
multiplier = DURATION_MULTIPLIERS[r_match["scale"]]
delay = int(r_match["duration"]) * multiplier
if delay < 15:
return "Minimum delay is 15 seconds."
if len(ctx.args) > 1:
# alert all online players of the reboot.
alert_msg = (
f"The server will {ctx.trigger} in {ctx.args[0]}.\n\n"
f'Reason: {" ".join(ctx.args[1:])}'
)
app.state.sessions.players.enqueue(app.packets.notification(alert_msg))
app.state.loop.call_later(delay, os.kill, os.getpid(), _signal)
return f"Enqueued {ctx.trigger}."
else: # shutdown immediately
os.kill(os.getpid(), _signal)
return ":D"
""" Developer commands
# The commands below are either dangerous or
# simply not useful for any other roles.
"""
_fake_users = []
@command(Privileges.DEVELOPER, aliases=["fu"])
async def fakeusers(ctx: Context) -> Optional[str]:
"""Add fake users to the online player list (for testing)."""
# NOTE: this is mostly just for speedtesting things
# regarding presences/stats. it's implementation is
# indeed quite cursed, but rather efficient.
if (
len(ctx.args) != 2
or ctx.args[0] not in ("add", "rm")
or not ctx.args[1].isdecimal()
):
return "Invalid syntax: !fakeusers <add/rm> <amount>"
action = ctx.args[0]
amount = int(ctx.args[1])
if not 0 < amount <= 100_000:
return "Amount must be in range 0-100k."
# we start at half way through
# the i32 space for fake user ids.
FAKE_ID_START = 0x7FFFFFFF >> 1
# data to send to clients (all new user info)
# we'll send all the packets together at end (more efficient)
data = bytearray()
if action == "add":
const_uinfo = { # non important stuff
"utc_offset": 0,
"osu_ver": "dn",
"pm_private": False,
"clan": None,
"clan_priv": None,
"priv": Privileges.NORMAL | Privileges.VERIFIED,
"silence_end": 0,
"login_time": 0x7FFFFFFF, # never auto-dc
}
_stats = app.packets.user_stats(ctx.player)
if _fake_users:
current_fakes = max([x.id for x in _fake_users]) - (FAKE_ID_START - 1)
else:
current_fakes = 0
start_id = FAKE_ID_START + current_fakes
end_id = start_id + amount
vn_std = GameMode.VANILLA_OSU
base_player = Player(id=0, name="", **const_uinfo)
base_player.stats[vn_std] = copy.copy(ctx.player.stats[vn_std])
new_fakes = []
# static part of the presence packet,
# no need to redo this every iteration.
static_presence = struct.pack(
"<BBBffi",
19, # -5 (EST) + 24
38, # country (canada)
0b11111, # all in-game privs
0.0,
0.0, # lat, lon
1, # rank #1
)
for i in range(start_id, end_id):
# create new fake player from base
name = f"fake #{i - (FAKE_ID_START - 1)}"
fake = copy.copy(base_player)
fake.id = i
fake.name = name
# append userpresence packet
data += struct.pack(
"<HxIi",
83,
21 + len(name),
i, # packetid # packet len # userid
)
data += f"\x0b{chr(len(name))}{name}".encode()
data += static_presence
data += _stats
new_fakes.append(fake)
# extend all added fakes to the real list
_fake_users.extend(new_fakes)
app.state.sessions.players.extend(new_fakes)
del new_fakes
msg = "Added."
else: # remove
len_fake_users = len(_fake_users)
if amount > len_fake_users:
return f"Too many! only {len_fake_users} remaining."
to_remove = _fake_users[len_fake_users - amount :]
logout_packet_header = b"\x0c\x00\x00\x05\x00\x00\x00"
for fake in to_remove:
if not fake.online:
# already auto-dced
_fake_users.remove(fake)
continue
data += logout_packet_header
data += fake.id.to_bytes(4, "little") # 4 bytes pid
data += b"\x00" # 1 byte 0
app.state.sessions.players.remove(fake)
_fake_users.remove(fake)
msg = "Removed."
data = bytes(data) # bytearray -> bytes
# only enqueue data to real users.
for o in [x for x in app.state.sessions.players if x.id < FAKE_ID_START]:
o.enqueue(data)
return msg
@command(Privileges.DEVELOPER)
async def stealth(ctx: Context) -> Optional[str]:
"""Toggle the developer's stealth, allowing them to be hidden."""
# NOTE: this command is a large work in progress and currently
# half works; eventually it will be moved to the Admin level.
ctx.player.stealth = not ctx.player.stealth
return f'Stealth {"enabled" if ctx.player.stealth else "disabled"}.'
@command(Privileges.DEVELOPER)
async def recalc(ctx: Context) -> Optional[str]:
"""Recalculate pp for a given map, or all maps."""
# NOTE: at the moment this command isn't very optimal and re-parses
# the beatmap file each iteration; this will be heavily improved.
if len(ctx.args) != 1 or ctx.args[0] not in ("map", "all"):
return "Invalid syntax: !recalc <map/all>"
if ctx.args[0] == "map":
# by specific map, use their last /np
if time.time() >= ctx.player.last_np["timeout"]:
return "Please /np a map first!"
bmap: Beatmap = ctx.player.last_np["bmap"]
osu_file_path = BEATMAPS_PATH / f"{bmap.id}.osu"
if not await ensure_local_osu_file(osu_file_path, bmap.id, bmap.md5):
return "Mapfile could not be found; this incident has been reported."
async with (
app.state.services.database.connection() as score_select_conn,
app.state.services.database.connection() as update_conn,
):
with OppaiWrapper("oppai-ng/liboppai.so") as ezpp:
ezpp.set_mode(0) # TODO: other modes
for table in ("scores_vn", "scores_rx", "scores_ap"):
for (
row
) in await score_select_conn.fetch_all( # TODO: should be aiter
"SELECT id, acc, mods, max_combo, nmiss "
f"FROM {table} "
"WHERE map_md5 = :map_md5 AND mode = 0",
{"map_md5": bmap.md5},
):
ezpp.set_mods(row["mods"])
ezpp.set_nmiss(row["nmiss"]) # clobbers acc
ezpp.set_combo(row["max_combo"])
ezpp.set_accuracy_percent(row["acc"])
ezpp.calculate(osu_file_path)
pp = ezpp.get_pp()
if math.isinf(pp) or math.isnan(pp):
continue
await update_conn.execute(
f"UPDATE {table} SET pp = :pp WHERE id = :score_id",
{"pp": pp, "score_id": row["id"]},
)
return "Map recalculated."
else:
# recalc all plays on the server, on all maps
staff_chan = app.state.sessions.channels["#staff"] # log any errs here
async def recalc_all() -> None:
staff_chan.send_bot(f"{ctx.player} started a full recalculation.")
st = time.time()
async with (
app.state.services.database.connection() as bmap_select_conn,
app.state.services.database.connection() as score_select_conn,
app.state.services.database.connection() as update_conn,
):
for (
bmap_row
) in await bmap_select_conn.fetch_all( # TODO: should be aiter
"SELECT id, md5 FROM maps WHERE passes > 0",
):
bmap_id = bmap_row["id"]
bmap_md5 = bmap_row["md5"]
osu_file_path = BEATMAPS_PATH / f"{bmap_id}.osu"
if not await ensure_local_osu_file(
osu_file_path,
bmap_id,
bmap_md5,
):
staff_chan.send_bot(
f"[Recalc] Couldn't find {bmap_id} / {bmap_md5}",
)
continue
with OppaiWrapper("oppai-ng/liboppai.so") as ezpp:
ezpp.set_mode(0) # TODO: other modes
for table in ("scores_vn", "scores_rx", "scores_ap"):
# TODO: this should probably also be an aiter
for row in await score_select_conn.fetch_all(
"SELECT id, acc, mods, max_combo, nmiss "
f"FROM {table} "
"WHERE map_md5 = :map_md5 AND mode = 0",
{"map_md5": bmap_md5},
):
ezpp.set_mods(row["mods"])
ezpp.set_nmiss(row["nmiss"]) # clobbers acc
ezpp.set_combo(row["max_combo"])
ezpp.set_accuracy_percent(row["acc"])
ezpp.calculate(osu_file_path)
pp = ezpp.get_pp()
if math.isinf(pp) or math.isnan(pp):
continue
await update_conn.execute(
f"UPDATE {table} SET pp = :pp WHERE id = :score_id",
{"pp": pp, "score_id": row["id"]},
)
# leave at least 1/100th of
# a second for handling conns.
await asyncio.sleep(0.01)
elapsed = app.utils.seconds_readable(int(time.time() - st))
staff_chan.send_bot(f"Recalculation complete. | Elapsed: {elapsed}")
app.state.loop.create_task(recalc_all())
return "Starting a full recalculation."
@command(Privileges.DEVELOPER, hidden=True)
async def debug(ctx: Context) -> Optional[str]:
"""Toggle the console's debug setting."""
settings.DEBUG = not settings.DEBUG
return f"Toggled {'on' if settings.DEBUG else 'off'}."
# NOTE: these commands will likely be removed
# with the addition of a good frontend.
str_priv_dict = {
"normal": Privileges.NORMAL,
"verified": Privileges.VERIFIED,
"whitelisted": Privileges.WHITELISTED,
"supporter": Privileges.SUPPORTER,
"premium": Privileges.PREMIUM,
"alumni": Privileges.ALUMNI,
"tournament": Privileges.TOURNAMENT,
"nominator": Privileges.NOMINATOR,
"mod": Privileges.MODERATOR,
"admin": Privileges.ADMINISTRATOR,
"dangerous": Privileges.DEVELOPER,
}
@command(Privileges.DEVELOPER, hidden=True)
async def addpriv(ctx: Context) -> Optional[str]:
"""Set privileges for a specified player (by name)."""
if len(ctx.args) < 2:
return "Invalid syntax: !addpriv <name> <role1 role2 role3 ...>"
bits = Privileges(0)
for m in [m.lower() for m in ctx.args[1:]]:
if m not in str_priv_dict:
return f"Not found: {m}."
bits |= str_priv_dict[m]
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return "Could not find user."
await t.add_privs(bits)
return f"Updated {t}'s privileges."
@command(Privileges.DEVELOPER, hidden=True)
async def rmpriv(ctx: Context) -> Optional[str]:
"""Set privileges for a specified player (by name)."""
if len(ctx.args) < 2:
return "Invalid syntax: !rmpriv <name> <role1 role2 role3 ...>"
bits = Privileges(0)
for m in [m.lower() for m in ctx.args[1:]]:
if m not in str_priv_dict:
return f"Not found: {m}."
bits |= str_priv_dict[m]
if not (t := await app.state.sessions.players.from_cache_or_sql(name=ctx.args[0])):
return "Could not find user."
await t.remove_privs(bits)
return f"Updated {t}'s privileges."
@command(Privileges.DEVELOPER)
async def wipemap(ctx: Context) -> Optional[str]:
# (intentionally no docstring)
if ctx.args:
return "Invalid syntax: !wipemap"
if time.time() >= ctx.player.last_np["timeout"]:
return "Please /np a map first!"
map_md5 = ctx.player.last_np["bmap"].md5
# delete scores from all tables
async with app.state.services.database.connection() as db_conn:
for t in ("vn", "rx", "ap"):
await db_conn.execute(
f"DELETE FROM scores_{t} WHERE map_md5 = :map_md5",
{"map_md5": map_md5},
)
return "Scores wiped."
@command(Privileges.DEVELOPER, hidden=True)
async def menu(ctx: Context) -> Optional[str]:
"""Temporary command to illustrate the menu option idea."""
ctx.player.send_current_menu()
@command(Privileges.DEVELOPER, aliases=["re"])
async def reload(ctx: Context) -> Optional[str]:
"""Reload a python module."""
if len(ctx.args) != 1:
return "Invalid syntax: !reload <module>"
parent, *children = ctx.args[0].split(".")
try:
mod = __import__(parent)
except ModuleNotFoundError:
return "Module not found."
try:
for child in children:
mod = getattr(mod, child)
except AttributeError:
return f"Failed at {child}." # type: ignore
try:
mod = importlib.reload(mod)
except TypeError as exc:
return f"{exc.args[0]}."
return f"Reloaded {mod.__name__}"
@command(Privileges.NORMAL)
async def server(ctx: Context) -> Optional[str]:
"""Retrieve performance data about the server."""
build_str = f"gulag v{settings.VERSION!r} ({settings.DOMAIN})"
# get info about this process
proc = psutil.Process(os.getpid())
uptime = int(time.time() - proc.create_time())
# get info about our cpu
with open("/proc/cpuinfo") as f:
header = "model name\t: "
trailer = "\n"
model_names = Counter(
line[len(header) : -len(trailer)]
for line in f.readlines()
if line.startswith("model name")
)
# list of all cpus installed with thread count
cpus_info = " | ".join([f"{v}x {k}" for k, v in model_names.most_common()])
# get system-wide ram usage
sys_ram = psutil.virtual_memory()
# output ram usage as `{gulag_used}MB / {sys_used}MB / {sys_total}MB`
gulag_ram = proc.memory_info()[0]
ram_values = (gulag_ram, sys_ram.used, sys_ram.total)
ram_info = " / ".join([f"{v // 1024 ** 2}MB" for v in ram_values])
# divide up pkg versions, 3 displayed per line, e.g.
# aiohttp v3.6.3 | aiomysql v0.0.21 | bcrypt v3.2.0
# cmyui v1.7.3 | datadog v0.40.1 | geoip2 v4.1.0
# maniera v1.0.0 | mysql-connector-python v8.0.23 | orjson v3.5.1
# psutil v5.8.0 | py3rijndael v0.3.3 | uvloop v0.15.2
reqs = (Path.cwd() / "requirements.txt").read_text().splitlines()
pkg_sections = [reqs[i : i + 3] for i in range(0, len(reqs), 3)]
mirror_url = settings.MIRROR_URL
using_osuapi = settings.OSU_API_KEY != ""
advanced_mode = settings.DEVELOPER_MODE
auto_logging = settings.AUTOMATICALLY_REPORT_PROBLEMS
return "\n".join(
[
f"{build_str} | uptime: {seconds_readable(uptime)}",
f"cpu(s): {cpus_info}",
f"ram: {ram_info}",
f"mirror: {mirror_url} | osu!api connection: {using_osuapi}",
f"advanced mode: {advanced_mode} | auto logging: {auto_logging}",
"",
"requirements",
"\n".join(
[
" | ".join([f"{pkg} v{pkg_version(pkg)}" for pkg in section])
for section in pkg_sections
],
),
],
)
if settings.DEVELOPER_MODE:
"""Advanced (& potentially dangerous) commands"""
# NOTE: some of these commands are potentially dangerous, and only
# really intended for advanced users looking for access to lower level
# utilities. Some may give direct access to utilties that could perform
# harmful tasks to the underlying machine, so use at your own risk.
from sys import modules as installed_mods
__py_namespace = globals() | {
mod: __import__(mod)
for mod in (
"asyncio",
"dis",
"os",
"sys",
"struct",
"discord",
"cmyui",
"datetime",
"time",
"inspect",
"math",
"importlib",
)
if mod in installed_mods
}
@command(Privileges.DEVELOPER)
async def py(ctx: Context) -> Optional[str]:
"""Allow for (async) access to the python interpreter."""
# This can be very good for getting used to gulag's API; just look
# around the codebase and find things to play with in your server.
# Ex: !py return (await app.state.sessions.players.get(name='cmyui')).status.action
if not ctx.args:
return "owo"
# turn our input args into a coroutine definition string.
definition = "\n ".join(["async def __py(ctx):", " ".join(ctx.args)])
try: # def __py(ctx)
exec(definition, __py_namespace) # add to namespace
ret = await __py_namespace["__py"](ctx) # await it's return
except Exception as exc: # return exception in osu! chat
ret = f"{exc.__class__}: {exc}"
if "__py" in __py_namespace:
del __py_namespace["__py"]
if ret is None:
return "Success"
# TODO: perhaps size checks?
if not isinstance(ret, str):
ret = pprint.pformat(ret, compact=True)
return ret
""" Multiplayer commands
# The commands below for multiplayer match management.
# Most commands are open to player usage.
"""
def ensure_match(
f: Callable[[Context, "Match"], Awaitable[Optional[R]]],
) -> Callable[[Context], Awaitable[Optional[R]]]:
@wraps(f)
async def wrapper(ctx: Context) -> Optional[R]:
match = ctx.player.match
# multi set is a bit of a special case,
# as we do some additional checks.
if match is None:
# player not in a match
return
if ctx.recipient is not match.chat:
# message not in match channel
return
if f is not mp_help and (
ctx.player not in match.refs and not ctx.player.priv & Privileges.TOURNAMENT
):
# doesn't have privs to use !mp commands (allow help).
return
return await f(ctx, match)
return wrapper
@mp_commands.add(Privileges.NORMAL, aliases=["h"])
@ensure_match
async def mp_help(ctx: Context, match: "Match") -> Optional[str]:
"""Show all documented multiplayer commands the player can access."""
prefix = settings.COMMAND_PREFIX
cmds = []
for cmd in mp_commands.commands:
if not cmd.doc or ctx.player.priv & cmd.priv != cmd.priv:
# no doc, or insufficient permissions.
continue
cmds.append(f"{prefix}mp {cmd.triggers[0]}: {cmd.doc}")
return "\n".join(cmds)
@mp_commands.add(Privileges.NORMAL, aliases=["st"])
@ensure_match
async def mp_start(ctx: Context, match: "Match") -> Optional[str]:
"""Start the current multiplayer match, with any players ready."""
if len(ctx.args) > 1:
return "Invalid syntax: !mp start <force/seconds>"
# this command can be used in a few different ways;
# !mp start: start the match now (make sure all players are ready)
# !mp start force: start the match now (don't check for ready)
# !mp start N: start the match in N seconds (don't check for ready)
# !mp start cancel: cancel the current match start timer
if not ctx.args:
# !mp start
if match.starting["start"] is not None:
time_remaining = int(match.starting["time"] - time.time())
return f"Match starting in {time_remaining} seconds."
if any([s.status == SlotStatus.not_ready for s in match.slots]):
return "Not all players are ready (`!mp start force` to override)."
else:
if ctx.args[0].isdecimal():
# !mp start N
if match.starting["start"] is not None:
time_remaining = int(match.starting["time"] - time.time())
return f"Match starting in {time_remaining} seconds."
# !mp start <seconds>
duration = int(ctx.args[0])
if not 0 < duration <= 300:
return "Timer range is 1-300 seconds."
def _start() -> None:
"""Remove any pending timers & start the match."""
# remove start & alert timers
match.starting["start"] = None
match.starting["alerts"] = None
match.starting["time"] = None
# make sure player didn't leave the
# match since queueing this start lol..
if ctx.player not in match:
match.chat.send_bot("Player left match? (cancelled)")
return
match.start()
match.chat.send_bot("Starting match.")
def _alert_start(t: int) -> None:
"""Alert the match of the impending start."""
match.chat.send_bot(f"Match starting in {t} seconds.")
# add timers to our match object,
# so we can cancel them if needed.
match.starting["start"] = app.state.loop.call_later(duration, _start)
match.starting["alerts"] = [
app.state.loop.call_later(duration - t, lambda t=t: _alert_start(t))
for t in (60, 30, 10, 5, 4, 3, 2, 1)
if t < duration
]
match.starting["time"] = time.time() + duration
return f"Match will start in {duration} seconds."
elif ctx.args[0] in ("cancel", "c"):
# !mp start cancel
if match.starting["start"] is None:
return "Match timer not active!"
match.starting["start"].cancel()
for alert in match.starting["alerts"]:
alert.cancel()
match.starting["start"] = None
match.starting["alerts"] = None
match.starting["time"] = None
return "Match timer cancelled."
elif ctx.args[0] not in ("force", "f"):
return "Invalid syntax: !mp start <force/seconds>"
# !mp start force simply passes through
match.start()
return "Good luck!"
@mp_commands.add(Privileges.NORMAL, aliases=["a"])
@ensure_match
async def mp_abort(ctx: Context, match: "Match") -> Optional[str]:
"""Abort the current in-progress multiplayer match."""
if not match.in_progress:
return "Abort what?"
match.unready_players(expected=SlotStatus.playing)
match.in_progress = False
match.enqueue(app.packets.match_abort())
match.enqueue_state()
return "Match aborted."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_map(ctx: Context, match: "Match") -> Optional[str]:
"""Set the current match's current map by id."""
if len(ctx.args) != 1 or not ctx.args[0].isdecimal():
return "Invalid syntax: !mp map <beatmapid>"
map_id = int(ctx.args[0])
if map_id == match.map_id:
return "Map already selected."
if not (bmap := await Beatmap.from_bid(map_id)):
return "Beatmap not found."
match.map_id = bmap.id
match.map_md5 = bmap.md5
match.map_name = bmap.full
match.mode = bmap.mode
match.enqueue_state()
return f"Selected: {bmap.embed}."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_mods(ctx: Context, match: "Match") -> Optional[str]:
"""Set the current match's mods, from string form."""
if len(ctx.args) != 1 or len(ctx.args[0]) % 2 != 0:
return "Invalid syntax: !mp mods <mods>"
mods = Mods.from_modstr(ctx.args[0])
mods = mods.filter_invalid_combos(match.mode.as_vanilla)
if match.freemods:
if ctx.player is match.host:
# allow host to set speed-changing mods.
match.mods = mods & SPEED_CHANGING_MODS
# set slot mods
match.get_slot(ctx.player).mods = mods & ~SPEED_CHANGING_MODS
else:
# not freemods, set match mods.
match.mods = mods
match.enqueue_state()
return "Match mods updated."
@mp_commands.add(Privileges.NORMAL, aliases=["fm", "fmods"])
@ensure_match
async def mp_freemods(ctx: Context, match: "Match") -> Optional[str]:
"""Toggle freemods status for the match."""
if len(ctx.args) != 1 or ctx.args[0] not in ("on", "off"):
return "Invalid syntax: !mp freemods <on/off>"
if ctx.args[0] == "on":
# central mods -> all players mods.
match.freemods = True
for s in match.slots:
if s.status & SlotStatus.has_player:
# the slot takes any non-speed
# changing mods from the match.
s.mods = match.mods & ~SPEED_CHANGING_MODS
match.mods &= SPEED_CHANGING_MODS
else:
# host mods -> central mods.
match.freemods = False
host = match.get_host_slot() # should always exist
# the match keeps any speed-changing mods,
# and also takes any mods the host has enabled.
match.mods &= SPEED_CHANGING_MODS
match.mods |= host.mods
for s in match.slots:
if s.status & SlotStatus.has_player:
s.mods = Mods.NOMOD
match.enqueue_state()
return "Match freemod status updated."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_host(ctx: Context, match: "Match") -> Optional[str]:
"""Set the current match's current host by id."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp host <name>"
if not (t := app.state.sessions.players.get(name=ctx.args[0])):
return "Could not find a user by that name."
if t is match.host:
return "They're already host, silly!"
if t not in match:
return "Found no such player in the match."
match.host_id = t.id
match.host.enqueue(app.packets.match_transfer_host())
match.enqueue_state(lobby=True)
return "Match host updated."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_randpw(ctx: Context, match: "Match") -> Optional[str]:
"""Randomize the current match's password."""
match.passwd = secrets.token_hex(8)
return "Match password randomized."
@mp_commands.add(Privileges.NORMAL, aliases=["inv"])
@ensure_match
async def mp_invite(ctx: Context, match: "Match") -> Optional[str]:
"""Invite a player to the current match by name."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp invite <name>"
if not (t := app.state.sessions.players.get(name=ctx.args[0])):
return "Could not find a user by that name."
if t is app.state.sessions.bot:
return "I'm too busy!"
if t is ctx.player:
return "You can't invite yourself!"
t.enqueue(app.packets.match_invite(ctx.player, t.name))
return f"Invited {t} to the match."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_addref(ctx: Context, match: "Match") -> Optional[str]:
"""Add a referee to the current match by name."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp addref <name>"
if not (t := app.state.sessions.players.get(name=ctx.args[0])):
return "Could not find a user by that name."
if t not in match:
return "User must be in the current match!"
if t in match.refs:
return f"{t} is already a match referee!"
match._refs.add(t)
return f"{t.name} added to match referees."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_rmref(ctx: Context, match: "Match") -> Optional[str]:
"""Remove a referee from the current match by name."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp addref <name>"
if not (t := app.state.sessions.players.get(name=ctx.args[0])):
return "Could not find a user by that name."
if t not in match.refs:
return f"{t} is not a match referee!"
if t is match.host:
return "The host is always a referee!"
match._refs.remove(t)
return f"{t.name} removed from match referees."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_listref(ctx: Context, match: "Match") -> Optional[str]:
"""List all referees from the current match."""
return ", ".join(map(str, match.refs)) + "."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_lock(ctx: Context, match: "Match") -> Optional[str]:
"""Lock all unused slots in the current match."""
for slot in match.slots:
if slot.status == SlotStatus.open:
slot.status = SlotStatus.locked
match.enqueue_state()
return "All unused slots locked."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_unlock(ctx: Context, match: "Match") -> Optional[str]:
"""Unlock locked slots in the current match."""
for slot in match.slots:
if slot.status == SlotStatus.locked:
slot.status = SlotStatus.open
match.enqueue_state()
return "All locked slots unlocked."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_teams(ctx: Context, match: "Match") -> Optional[str]:
"""Change the team type for the current match."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp teams <type>"
team_type = ctx.args[0]
if team_type in ("ffa", "freeforall", "head-to-head"):
match.team_type = MatchTeamTypes.head_to_head
elif team_type in ("tag", "coop", "co-op", "tag-coop"):
match.team_type = MatchTeamTypes.tag_coop
elif team_type in ("teams", "team-vs", "teams-vs"):
match.team_type = MatchTeamTypes.team_vs
elif team_type in ("tag-teams", "tag-team-vs", "tag-teams-vs"):
match.team_type = MatchTeamTypes.tag_team_vs
else:
return "Unknown team type. (ffa, tag, teams, tag-teams)"
# find the new appropriate default team.
# defaults are (ffa: neutral, teams: red).
if match.team_type in (MatchTeamTypes.head_to_head, MatchTeamTypes.tag_coop):
new_t = MatchTeams.neutral
else:
new_t = MatchTeams.red
# change each active slots team to
# fit the correspoding team type.
for s in match.slots:
if s.status & SlotStatus.has_player:
s.team = new_t
if match.is_scrimming:
# reset score if scrimming.
match.reset_scrim()
match.enqueue_state()
return "Match team type updated."
@mp_commands.add(Privileges.NORMAL, aliases=["cond"])
@ensure_match
async def mp_condition(ctx: Context, match: "Match") -> Optional[str]:
"""Change the win condition for the match."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp condition <type>"
cond = ctx.args[0]
if cond == "pp":
# special case - pp can't actually be used as an ingame
# win condition, but gulag allows it to be passed into
# this command during a scrims to use pp as a win cond.
if not match.is_scrimming:
return "PP is only useful as a win condition during scrims."
if match.use_pp_scoring:
return "PP scoring already enabled."
match.use_pp_scoring = True
else:
if match.use_pp_scoring:
match.use_pp_scoring = False
if cond == "score":
match.win_condition = MatchWinConditions.score
elif cond in ("accuracy", "acc"):
match.win_condition = MatchWinConditions.accuracy
elif cond == "combo":
match.win_condition = MatchWinConditions.combo
elif cond in ("scorev2", "v2"):
match.win_condition = MatchWinConditions.scorev2
else:
return "Invalid win condition. (score, acc, combo, scorev2, *pp)"
match.enqueue_state(lobby=False)
return "Match win condition updated."
@mp_commands.add(Privileges.NORMAL, aliases=["autoref"])
@ensure_match
async def mp_scrim(ctx: Context, match: "Match") -> Optional[str]:
"""Start a scrim in the current match."""
if len(ctx.args) != 1 or not (r_match := regexes.BEST_OF.fullmatch(ctx.args[0])):
return "Invalid syntax: !mp scrim <bo#>"
if not 0 <= (best_of := int(r_match[1])) < 16:
return "Best of must be in range 0-15."
winning_pts = (best_of // 2) + 1
if winning_pts != 0:
# setting to real num
if match.is_scrimming:
return "Already scrimming!"
if best_of % 2 == 0:
return "Best of must be an odd number!"
match.is_scrimming = True
msg = (
f"A scrimmage has been started by {ctx.player.name}; "
f"first to {winning_pts} points wins. Best of luck!"
)
else:
# setting to 0
if not match.is_scrimming:
return "Not currently scrimming!"
match.is_scrimming = False
match.reset_scrim()
msg = "Scrimming cancelled."
match.winning_pts = winning_pts
return msg
@mp_commands.add(Privileges.NORMAL, aliases=["end"])
@ensure_match
async def mp_endscrim(ctx: Context, match: "Match") -> Optional[str]:
"""End the current matches ongoing scrim."""
if not match.is_scrimming:
return "Not currently scrimming!"
match.is_scrimming = False
match.reset_scrim()
return "Scrimmage ended." # TODO: final score (get_score method?)
@mp_commands.add(Privileges.NORMAL, aliases=["rm"])
@ensure_match
async def mp_rematch(ctx: Context, match: "Match") -> Optional[str]:
"""Restart a scrim, or roll back previous match point."""
if ctx.args:
return "Invalid syntax: !mp rematch"
if ctx.player is not match.host:
return "Only available to the host."
if not match.is_scrimming:
if match.winning_pts == 0:
msg = "No scrim to rematch; to start one, use !mp scrim."
else:
# re-start scrimming with old points
match.is_scrimming = True
msg = (
f"A rematch has been started by {ctx.player.name}; "
f"first to {match.winning_pts} points wins. Best of luck!"
)
else:
# reset the last match point awarded
if not match.winners:
return "No match points have yet been awarded!"
if (recent_winner := match.winners[-1]) is None:
return "The last point was a tie!"
match.match_points[recent_winner] -= 1 # TODO: team name
match.winners.pop()
msg = f"A point has been deducted from {recent_winner}."
return msg
@mp_commands.add(Privileges.ADMINISTRATOR, aliases=["f"], hidden=True)
@ensure_match
async def mp_force(ctx: Context, match: "Match") -> Optional[str]:
"""Force a player into the current match by name."""
# NOTE: this overrides any limits such as silences or passwd.
if len(ctx.args) != 1:
return "Invalid syntax: !mp force <name>"
if not (t := app.state.sessions.players.get(name=ctx.args[0])):
return "Could not find a user by that name."
t.join_match(match, match.passwd)
return "Welcome."
# mappool-related mp commands
@mp_commands.add(Privileges.NORMAL, aliases=["lp"])
@ensure_match
async def mp_loadpool(ctx: Context, match: "Match") -> Optional[str]:
"""Load a mappool into the current match."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp loadpool <name>"
if ctx.player is not match.host:
return "Only available to the host."
name = ctx.args[0]
if not (pool := app.state.sessions.pools.get_by_name(name)):
return "Could not find a pool by that name!"
if match.pool is pool:
return f"{pool!r} already selected!"
match.pool = pool
return f"{pool!r} selected."
@mp_commands.add(Privileges.NORMAL, aliases=["ulp"])
@ensure_match
async def mp_unloadpool(ctx: Context, match: "Match") -> Optional[str]:
"""Unload the current matches mappool."""
if ctx.args:
return "Invalid syntax: !mp unloadpool"
if ctx.player is not match.host:
return "Only available to the host."
if not match.pool:
return "No mappool currently selected!"
match.pool = None
return "Mappool unloaded."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_ban(ctx: Context, match: "Match") -> Optional[str]:
"""Ban a pick in the currently loaded mappool."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp ban <pick>"
if not match.pool:
return "No pool currently selected!"
mods_slot = ctx.args[0]
# separate mods & slot
if not (r_match := regexes.MAPPOOL_PICK.fullmatch(mods_slot)):
return "Invalid pick syntax; correct example: HD2"
# not calling mods.filter_invalid_combos here intentionally.
mods = Mods.from_modstr(r_match[1])
slot = int(r_match[2])
if (mods, slot) not in match.pool.maps:
return f"Found no {mods_slot} pick in the pool."
if (mods, slot) in match.bans:
return "That pick is already banned!"
match.bans.add((mods, slot))
return f"{mods_slot} banned."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_unban(ctx: Context, match: "Match") -> Optional[str]:
"""Unban a pick in the currently loaded mappool."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp unban <pick>"
if not match.pool:
return "No pool currently selected!"
mods_slot = ctx.args[0]
# separate mods & slot
if not (r_match := regexes.MAPPOOL_PICK.fullmatch(mods_slot)):
return "Invalid pick syntax; correct example: HD2"
# not calling mods.filter_invalid_combos here intentionally.
mods = Mods.from_modstr(r_match[1])
slot = int(r_match[2])
if (mods, slot) not in match.pool.maps:
return f"Found no {mods_slot} pick in the pool."
if (mods, slot) not in match.bans:
return "That pick is not currently banned!"
match.bans.remove((mods, slot))
return f"{mods_slot} unbanned."
@mp_commands.add(Privileges.NORMAL)
@ensure_match
async def mp_pick(ctx: Context, match: "Match") -> Optional[str]:
"""Pick a map from the currently loaded mappool."""
if len(ctx.args) != 1:
return "Invalid syntax: !mp pick <pick>"
if not match.pool:
return "No pool currently loaded!"
mods_slot = ctx.args[0]
# separate mods & slot
if not (r_match := regexes.MAPPOOL_PICK.fullmatch(mods_slot)):
return "Invalid pick syntax; correct example: HD2"
# not calling mods.filter_invalid_combos here intentionally.
mods = Mods.from_modstr(r_match[1])
slot = int(r_match[2])
if (mods, slot) not in match.pool.maps:
return f"Found no {mods_slot} pick in the pool."
if (mods, slot) in match.bans:
return f"{mods_slot} has been banned from being picked."
# update match beatmap to the picked map.
bmap = match.pool.maps[(mods, slot)]
match.map_md5 = bmap.md5
match.map_id = bmap.id
match.map_name = bmap.full
# TODO: some kind of abstraction allowing
# for something like !mp pick fm.
if match.freemods:
# if freemods are enabled, disable them.
match.freemods = False
for s in match.slots:
if s.status & SlotStatus.has_player:
s.mods = Mods.NOMOD
# update match mods to the picked map.
match.mods = mods
match.enqueue_state()
return f"Picked {bmap.embed}. ({mods_slot})"
""" Mappool management commands
# The commands below are for event managers
# and tournament hosts/referees to help automate
# tedious processes of running tournaments.
"""
@pool_commands.add(Privileges.TOURNAMENT, aliases=["h"], hidden=True)
async def pool_help(ctx: Context) -> Optional[str]:
"""Show all documented mappool commands the player can access."""
prefix = settings.COMMAND_PREFIX
cmds = []
for cmd in pool_commands.commands:
if not cmd.doc or ctx.player.priv & cmd.priv != cmd.priv:
# no doc, or insufficient permissions.
continue
cmds.append(f"{prefix}pool {cmd.triggers[0]}: {cmd.doc}")
return "\n".join(cmds)
@pool_commands.add(Privileges.TOURNAMENT, aliases=["c"], hidden=True)
async def pool_create(ctx: Context) -> Optional[str]:
"""Add a new mappool to the database."""
if len(ctx.args) != 1:
return "Invalid syntax: !pool create <name>"
name = ctx.args[0]
if app.state.sessions.pools.get_by_name(name):
return "Pool already exists by that name!"
# insert pool into db
await app.state.services.database.execute(
"INSERT INTO tourney_pools "
"(name, created_at, created_by) "
"VALUES (:name, NOW(), :user_id)",
{"name": name, "user_id": ctx.player.id},
)
# add to cache (get from sql for id & time)
row = await app.state.services.database.fetch_one(
"SELECT * FROM tourney_pools WHERE name = :name",
{"name": name},
)
row = dict(row) # make mutable copy
row["created_by"] = await app.state.sessions.players.from_cache_or_sql(
id=row["created_by"],
)
app.state.sessions.pools.append(MapPool(**row))
return f"{name} created."
@pool_commands.add(Privileges.TOURNAMENT, aliases=["del", "d"], hidden=True)
async def pool_delete(ctx: Context) -> Optional[str]:
"""Remove a mappool from the database."""
if len(ctx.args) != 1:
return "Invalid syntax: !pool delete <name>"
name = ctx.args[0]
if not (pool := app.state.sessions.pools.get_by_name(name)):
return "Could not find a pool by that name!"
# delete from db
await app.state.services.database.execute(
"DELETE FROM tourney_pools WHERE id = :pool_id",
{"pool_id": pool.id},
)
await app.state.services.database.execute(
"DELETE FROM tourney_pool_maps WHERE pool_id = :pool_id",
{"pool_id": pool.id},
)
# remove from cache
app.state.sessions.pools.remove(pool)
return f"{name} deleted."
@pool_commands.add(Privileges.TOURNAMENT, aliases=["a"], hidden=True)
async def pool_add(ctx: Context) -> Optional[str]:
"""Add a new map to a mappool in the database."""
if len(ctx.args) != 2:
return "Invalid syntax: !pool add <name> <pick>"
if time.time() >= ctx.player.last_np["timeout"]:
return "Please /np a map first!"
name, mods_slot = ctx.args
mods_slot = mods_slot.upper() # ocd
bmap = ctx.player.last_np["bmap"]
# separate mods & slot
if not (r_match := regexes.MAPPOOL_PICK.fullmatch(mods_slot)):
return "Invalid pick syntax; correct example: HD2"
if len(r_match[1]) % 2 != 0:
return "Invalid mods."
# not calling mods.filter_invalid_combos here intentionally.
mods = Mods.from_modstr(r_match[1])
slot = int(r_match[2])
if not (pool := app.state.sessions.pools.get_by_name(name)):
return "Could not find a pool by that name!"
if (mods, slot) in pool.maps:
return f"{mods_slot} is already {pool.maps[(mods, slot)].embed}!"
if bmap in pool.maps.values():
return "Map is already in the pool!"
# insert into db
await app.state.services.database.execute(
"INSERT INTO tourney_pool_maps "
"(map_id, pool_id, mods, slot) "
"VALUES (:map_id, :pool_id, :mods, :slot)",
{"map_id": bmap.id, "pool_id": pool.id, "mods": mods, "slot": slot},
)
# add to cache
pool.maps[(mods, slot)] = bmap
return f"{bmap.embed} added to {name}."
@pool_commands.add(Privileges.TOURNAMENT, aliases=["rm", "r"], hidden=True)
async def pool_remove(ctx: Context) -> Optional[str]:
"""Remove a map from a mappool in the database."""
if len(ctx.args) != 2:
return "Invalid syntax: !pool remove <name> <pick>"
name, mods_slot = ctx.args
mods_slot = mods_slot.upper() # ocd
# separate mods & slot
if not (r_match := regexes.MAPPOOL_PICK.fullmatch(mods_slot)):
return "Invalid pick syntax; correct example: HD2"
# not calling mods.filter_invalid_combos here intentionally.
mods = Mods.from_modstr(r_match[1])
slot = int(r_match[2])
if not (pool := app.state.sessions.pools.get_by_name(name)):
return "Could not find a pool by that name!"
if (mods, slot) not in pool.maps:
return f"Found no {mods_slot} pick in the pool."
# delete from db
await app.state.services.database.execute(
"DELETE FROM tourney_pool_maps WHERE mods = :mods AND slot = :slot",
{"mods": mods, "slot": slot},
)
# remove from cache
del pool.maps[(mods, slot)]
return f"{mods_slot} removed from {name}."
@pool_commands.add(Privileges.TOURNAMENT, aliases=["l"], hidden=True)
async def pool_list(ctx: Context) -> Optional[str]:
"""List all existing mappools information."""
if not (pools := app.state.sessions.pools):
return "There are currently no pools!"
l = [f"Mappools ({len(pools)})"]
for pool in pools:
l.append(
f"[{pool.created_at:%Y-%m-%d}] {pool.id}. "
f"{pool.name}, by {pool.created_by}.",
)
return "\n".join(l)
@pool_commands.add(Privileges.TOURNAMENT, aliases=["i"], hidden=True)
async def pool_info(ctx: Context) -> Optional[str]:
"""Get all information for a specific mappool."""
if len(ctx.args) != 1:
return "Invalid syntax: !pool info <name>"
name = ctx.args[0]
if not (pool := app.state.sessions.pools.get_by_name(name)):
return "Could not find a pool by that name!"
_time = pool.created_at.strftime("%H:%M:%S%p")
_date = pool.created_at.strftime("%Y-%m-%d")
datetime_fmt = f"Created at {_time} on {_date}"
l = [f"{pool.id}. {pool.name}, by {pool.created_by} | {datetime_fmt}."]
for (mods, slot), bmap in pool.maps.items():
l.append(f"{mods!r}{slot}: {bmap.embed}")
return "\n".join(l)
""" Clan managment commands
# The commands below are for managing gulag
# clans, for users, clan staff, and server staff.
"""
@clan_commands.add(Privileges.NORMAL, aliases=["h"])
async def clan_help(ctx: Context) -> Optional[str]:
"""Show all documented clan commands the player can access."""
prefix = settings.COMMAND_PREFIX
cmds = []
for cmd in clan_commands.commands:
if not cmd.doc or ctx.player.priv & cmd.priv != cmd.priv:
# no doc, or insufficient permissions.
continue
cmds.append(f"{prefix}clan {cmd.triggers[0]}: {cmd.doc}")
return "\n".join(cmds)
@clan_commands.add(Privileges.NORMAL, aliases=["c"])
async def clan_create(ctx: Context) -> Optional[str]:
"""Create a clan with a given tag & name."""
if len(ctx.args) < 2:
return "Invalid syntax: !clan create <tag> <name>"
if not 1 <= len(tag := ctx.args[0].upper()) <= 6:
return "Clan tag may be 1-6 characters long."
if not 2 <= len(name := " ".join(ctx.args[1:])) <= 16:
return "Clan name may be 2-16 characters long."
if ctx.player.clan:
return f"You're already a member of {ctx.player.clan}!"
if app.state.sessions.clans.get(name=name):
return "That name has already been claimed by another clan."
if app.state.sessions.clans.get(tag=tag):
return "That tag has already been claimed by another clan."
created_at = datetime.now()
# add clan to sql (generates id)
clan_id = await app.state.services.database.execute(
"INSERT INTO clans "
"(name, tag, created_at, owner) "
"VALUES (:name, :tag, :created_at, :user_id)",
{"name": name, "tag": tag, "created_at": created_at, "user_id": ctx.player.id},
)
# add clan to cache
clan = Clan(
id=clan_id,
name=name,
tag=tag,
created_at=created_at,
owner_id=ctx.player.id,
)
app.state.sessions.clans.append(clan)
# set owner's clan & clan priv (cache & sql)
ctx.player.clan = clan
ctx.player.clan_priv = ClanPrivileges.Owner
clan.owner_id = ctx.player.id
clan.member_ids.add(ctx.player.id)
if "full_name" in ctx.player.__dict__:
del ctx.player.full_name # wipe cached_property
await app.state.services.database.execute(
"UPDATE users "
"SET clan_id = :clan_id, "
"clan_priv = 3 " # ClanPrivileges.Owner
"WHERE id = :user_id",
{"clan_id": clan_id, "user_id": ctx.player.id},
)
# announce clan creation
if announce_chan := app.state.sessions.channels["#announce"]:
msg = f"\x01ACTION founded {clan!r}."
announce_chan.send(msg, sender=ctx.player, to_self=True)
return f"{clan!r} created."
@clan_commands.add(Privileges.NORMAL, aliases=["delete", "d"])
async def clan_disband(ctx: Context) -> Optional[str]:
"""Disband a clan (admins may disband others clans)."""
if ctx.args:
# disband a specified clan by tag
if ctx.player not in app.state.sessions.players.staff:
return "Only staff members may disband the clans of others."
if not (clan := app.state.sessions.clans.get(tag=" ".join(ctx.args).upper())):
return "Could not find a clan by that tag."
else:
# disband the player's clan
if not (clan := ctx.player.clan):
return "You're not a member of a clan!"
# delete clan from sql
await app.state.services.database.execute(
"DELETE FROM clans WHERE id = :clan_id",
{"clan_id": clan.id},
)
# remove all members from the clan,
# reset their clan privs (cache & sql).
# NOTE: only online players need be to be uncached.
for member_id in clan.member_ids:
if member := app.state.sessions.players.get(id=member_id):
member.clan = None
member.clan_priv = None
if "full_name" in member.__dict__:
del member.full_name # wipe cached_property
await app.state.services.database.execute(
"UPDATE users SET clan_id = 0, clan_priv = 0 WHERE clan_id = :clan_id",
{"clan_id": clan.id},
)
# remove clan from cache
app.state.sessions.clans.remove(clan)
# announce clan disbanding
if announce_chan := app.state.sessions.channels["#announce"]:
msg = f"\x01ACTION disbanded {clan!r}."
announce_chan.send(msg, sender=ctx.player, to_self=True)
return f"{clan!r} disbanded."
@clan_commands.add(Privileges.NORMAL, aliases=["i"])
async def clan_info(ctx: Context) -> Optional[str]:
"""Lookup information of a clan by tag."""
if not ctx.args:
return "Invalid syntax: !clan info <tag>"
if not (clan := app.state.sessions.clans.get(tag=" ".join(ctx.args).upper())):
return "Could not find a clan by that tag."
msg = [f"{clan!r} | Founded {clan.created_at:%b %d, %Y}."]
# get members privs from sql
for row in await app.state.services.database.fetch_all(
"SELECT name, clan_priv "
"FROM users "
"WHERE clan_id = :clan_id "
"ORDER BY clan_priv DESC",
{"clan_id": clan.id},
):
priv_str = ("Member", "Officer", "Owner")[row["clan_priv"] - 1]
msg.append(f"[{priv_str}] {row['name']}")
return "\n".join(msg)
@clan_commands.add(Privileges.NORMAL)
async def clan_leave(ctx: Context):
"""Leaves the clan you're in."""
p = await app.state.sessions.players.from_cache_or_sql(name=ctx.player.name)
if not p.clan:
return "You're not in a clan."
elif p.clan_priv == ClanPrivileges.Owner:
return "You must transfer your clan's ownership before leaving it. Alternatively, you can use !clan disband."
p.clan.remove_member(p)
return f"You have successfully left {p.clan!r}."
# TODO: !clan inv, !clan join, !clan leave
@clan_commands.add(Privileges.NORMAL, aliases=["l"])
async def clan_list(ctx: Context) -> Optional[str]:
"""List all existing clans information."""
if ctx.args:
if len(ctx.args) != 1 or not ctx.args[0].isdecimal():
return "Invalid syntax: !clan list (page)"
else:
offset = 25 * int(ctx.args[0])
else:
offset = 0
if offset >= (total_clans := len(app.state.sessions.clans)):
return "No clans found."
msg = [f"gulag clans listing ({total_clans} total)."]
for idx, clan in enumerate(app.state.sessions.clans, offset):
msg.append(f"{idx + 1}. {clan!r}")
return "\n".join(msg)
class CommandResponse(TypedDict):
resp: Optional[str]
hidden: bool
async def process_commands(
p: Player,
target: Union["Channel", Player],
msg: str,
) -> Optional[CommandResponse]:
# response is either a CommandResponse if we hit a command,
# or simply False if we don't have any command hits.
start_time = clock_ns()
prefix_len = len(settings.COMMAND_PREFIX)
trigger, *args = msg[prefix_len:].strip().split(" ")
# case-insensitive triggers
trigger = trigger.lower()
# check if any command sets match.
for cmd_set in command_sets:
if trigger == cmd_set.trigger:
if not args:
args = ["help"]
trigger, *args = args # get subcommand
# case-insensitive triggers
trigger = trigger.lower()
commands = cmd_set.commands
break
else:
# no set commands matched, check normal commands.
commands = regular_commands
for cmd in commands:
if trigger in cmd.triggers and p.priv & cmd.priv == cmd.priv:
# found matching trigger with sufficient privs
res = await cmd.callback(
Context(
player=p,
trigger=trigger,
args=args,
recipient=target,
),
)
if res:
# we have a message to return, include elapsed time
elapsed = cmyui.utils.magnitude_fmt_time(clock_ns() - start_time)
return {"resp": f"{res} | Elapsed: {elapsed}", "hidden": cmd.hidden}
else:
# no message to return
return {"resp": None, "hidden": False}
|
py
|
1a58356e3513b4f7810dd85f31e31727dce0d722
|
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import insert
from sqlalchemy import literal_column
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.orm import aliased
from sqlalchemy.orm import column_property
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import query_expression
from sqlalchemy.orm import relationship
from sqlalchemy.orm import with_expression
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql import sqltypes
from sqlalchemy.sql.selectable import Join as core_join
from sqlalchemy.sql.selectable import LABEL_STYLE_DISAMBIGUATE_ONLY
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing.fixtures import fixture_session
from .inheritance import _poly_fixtures
from .test_query import QueryTest
# TODO:
# composites / unions, etc.
class SelectableTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_filter_by(self):
User, Address = self.classes("User", "Address")
stmt = select(User).filter_by(name="ed")
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"WHERE users.name = :name_1",
)
def test_froms_single_table(self):
User, Address = self.classes("User", "Address")
stmt = select(User).filter_by(name="ed")
eq_(stmt.froms, [self.tables.users])
def test_froms_join(self):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
stmt = select(User).join(User.addresses)
assert stmt.froms[0].compare(users.join(addresses))
@testing.combinations(
(
lambda User: (User,),
lambda User: [
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
}
],
),
(
lambda User: (User.id,),
lambda User: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
}
],
),
(
lambda User, Address: (User.id, Address),
lambda User, Address: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "Address",
"type": Address,
"aliased": False,
"expr": Address,
"entity": Address,
},
],
),
)
def test_column_descriptions(self, cols, expected):
User, Address = self.classes("User", "Address")
cols = testing.resolve_lambda(cols, User=User, Address=Address)
expected = testing.resolve_lambda(expected, User=User, Address=Address)
stmt = select(*cols)
eq_(stmt.column_descriptions, expected)
class JoinTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_join_from_no_onclause(self):
User, Address = self.classes("User", "Address")
stmt = select(literal_column("1")).join_from(User, Address)
self.assert_compile(
stmt,
"SELECT 1 FROM users JOIN addresses "
"ON users.id = addresses.user_id",
)
def test_join_from_w_relationship(self):
User, Address = self.classes("User", "Address")
stmt = select(literal_column("1")).join_from(
User, Address, User.addresses
)
self.assert_compile(
stmt,
"SELECT 1 FROM users JOIN addresses "
"ON users.id = addresses.user_id",
)
def test_join_from_alised_w_relationship(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
stmt = select(literal_column("1")).join_from(u1, Address, u1.addresses)
self.assert_compile(
stmt,
"SELECT 1 FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
)
def test_join_conflicting_right_side(self):
User, Address = self.classes("User", "Address")
stmt = select(User).join(Address, User.orders)
assert_raises_message(
exc.InvalidRequestError,
"Join target .*Address.* does not correspond to the right side "
"of join condition User.orders",
stmt.compile,
)
def test_join_from_conflicting_left_side_plain(self):
User, Address, Order = self.classes("User", "Address", "Order")
stmt = select(User).join_from(User, Address, Order.address)
assert_raises_message(
exc.InvalidRequestError,
r"explicit from clause .*User.* does not match .* Order.address",
stmt.compile,
)
def test_join_from_conflicting_left_side_mapper_vs_aliased(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
stmt = select(User).join_from(User, Address, u1.addresses)
assert_raises_message(
exc.InvalidRequestError,
# the display of the attribute here is not consistent vs.
# the straight aliased class, should improve this.
r"explicit from clause .*User.* does not match left side .*"
r"of relationship attribute AliasedClass_User.addresses",
stmt.compile,
)
def test_join_from_conflicting_left_side_aliased_vs_mapper(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
stmt = select(u1).join_from(u1, Address, User.addresses)
assert_raises_message(
exc.InvalidRequestError,
r"explicit from clause aliased\(User\) does not match left "
"side of relationship attribute User.addresses",
stmt.compile,
)
def test_join_from_we_can_explicitly_tree_joins(self):
User, Address, Order, Item, Keyword = self.classes(
"User", "Address", "Order", "Item", "Keyword"
)
stmt = (
select(User)
.join(User.addresses)
.join_from(User, Order, User.orders)
.join(Order.items)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id JOIN orders "
"ON users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items "
"ON items.id = order_items_1.item_id",
)
def test_join_from_w_filter_by(self):
User, Address, Order, Item, Keyword = self.classes(
"User", "Address", "Order", "Item", "Keyword"
)
stmt = (
select(User)
.filter_by(name="n1")
.join(User.addresses)
.filter_by(email_address="a1")
.join_from(User, Order, User.orders)
.filter_by(description="d1")
.join(Order.items)
.filter_by(description="d2")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"WHERE users.name = :name_1 "
"AND addresses.email_address = :email_address_1 "
"AND orders.description = :description_1 "
"AND items.description = :description_2",
checkparams={
"name_1": "n1",
"email_address_1": "a1",
"description_1": "d1",
"description_2": "d2",
},
)
class LoadersInSubqueriesTest(QueryTest, AssertsCompiledSQL):
"""The Query object calls eanble_eagerloads(False) when you call
.subquery(). With Core select, we don't have that information, we instead
have to look at the "toplevel" flag to know where we are. make sure
the many different combinations that these two objects and still
too many flags at the moment work as expected on the outside.
"""
__dialect__ = "default"
run_setup_mappers = None
@testing.fixture
def joinedload_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, lazy="joined")},
)
mapper(Address, addresses)
return User, Address
def test_no_joinedload_in_subquery_select_rows(self, joinedload_fixture):
User, Address = joinedload_fixture
sess = fixture_session()
stmt1 = sess.query(User).subquery()
stmt1 = sess.query(stmt1)
stmt2 = select(User).subquery()
stmt2 = select(stmt2)
expected = (
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users) AS anon_1"
)
self.assert_compile(
stmt1._final_statement(legacy_query_style=False),
expected,
)
self.assert_compile(stmt2, expected)
def test_no_joinedload_in_subquery_select_entity(self, joinedload_fixture):
User, Address = joinedload_fixture
sess = fixture_session()
stmt1 = sess.query(User).subquery()
ua = aliased(User, stmt1)
stmt1 = sess.query(ua)
stmt2 = select(User).subquery()
ua = aliased(User, stmt2)
stmt2 = select(ua)
expected = (
"SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS id, users.name AS name FROM users) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.id = addresses_1.user_id"
)
self.assert_compile(
stmt1._final_statement(legacy_query_style=False),
expected,
)
self.assert_compile(stmt2, expected)
# TODO: need to test joinedload options, deferred mappings, deferred
# options. these are all loader options that should *only* have an
# effect on the outermost statement, never a subquery.
class ExtraColsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = None
@testing.fixture
def query_expression_fixture(self):
users, User = (
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=util.OrderedDict([("value", query_expression())]),
)
return User
@testing.fixture
def column_property_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties=util.OrderedDict(
[
("concat", column_property((users.c.id * 2))),
(
"count",
column_property(
select(func.count(addresses.c.id))
.where(
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
),
]
),
)
mapper(
Address,
addresses,
properties={
"user": relationship(
User,
)
},
)
return User, Address
@testing.fixture
def plain_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
)
mapper(
Address,
addresses,
properties={
"user": relationship(
User,
)
},
)
return User, Address
def test_no_joinedload_embedded(self, plain_fixture):
User, Address = plain_fixture
stmt = select(Address).options(joinedload(Address.user))
subq = stmt.subquery()
s2 = select(subq)
self.assert_compile(
s2,
"SELECT anon_1.id, anon_1.user_id, anon_1.email_address "
"FROM (SELECT addresses.id AS id, addresses.user_id AS "
"user_id, addresses.email_address AS email_address "
"FROM addresses) AS anon_1",
)
def test_with_expr_one(self, query_expression_fixture):
User = query_expression_fixture
stmt = select(User).options(
with_expression(User.value, User.name + "foo")
)
self.assert_compile(
stmt,
"SELECT users.name || :name_1 AS anon_1, users.id, "
"users.name FROM users",
)
def test_with_expr_two(self, query_expression_fixture):
User = query_expression_fixture
stmt = select(User.id, User.name, (User.name + "foo").label("foo"))
subq = stmt.subquery()
u1 = aliased(User, subq)
stmt = select(u1).options(with_expression(u1.value, subq.c.foo))
self.assert_compile(
stmt,
"SELECT anon_1.foo, anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name, "
"users.name || :name_1 AS foo FROM users) AS anon_1",
)
def test_joinedload_outermost(self, plain_fixture):
User, Address = plain_fixture
stmt = select(Address).options(joinedload(Address.user))
# render joined eager loads with stringify
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, addresses.email_address, "
"users_1.id AS id_1, users_1.name FROM addresses "
"LEFT OUTER JOIN users AS users_1 "
"ON users_1.id = addresses.user_id",
)
def test_contains_eager_outermost(self, plain_fixture):
User, Address = plain_fixture
stmt = (
select(Address)
.join(Address.user)
.options(contains_eager(Address.user))
)
# render joined eager loads with stringify
self.assert_compile(
stmt,
"SELECT users.id, users.name, addresses.id AS id_1, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_column_properties(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
stmt = select(User)
self.assert_compile(
stmt,
"SELECT users.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users.id = addresses.user_id) AS anon_2, users.id, "
"users.name FROM users",
checkparams={"id_1": 2},
)
def test_column_properties_can_we_use(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
# User, Address = column_property_fixture
# stmt = select(User)
# TODO: shouldn't we be able to get at count ?
# stmt = stmt.where(stmt.selected_columns.count > 5)
# self.assert_compile(stmt, "")
def test_column_properties_subquery(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
stmt = select(User)
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(stmt.subquery())
# TODO: shouldnt we be able to get to stmt.subquery().c.count ?
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.id, anon_2.name "
"FROM (SELECT users.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users.id = addresses.user_id) AS anon_3, users.id AS id, "
"users.name AS name FROM users) AS anon_2",
checkparams={"id_1": 2},
)
def test_column_properties_subquery_two(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
# col properties will retain anonymous labels, however will
# adopt the .key within the subquery collection so they can
# be addressed.
stmt = select(
User.id,
User.name,
User.concat,
User.count,
)
subq = stmt.subquery()
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(subq).where(subq.c.concat == "foo")
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, anon_1.anon_2, anon_1.anon_3 "
"FROM (SELECT users.id AS id, users.name AS name, "
"users.id * :id_1 AS anon_2, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_3 "
"FROM users) AS anon_1 WHERE anon_1.anon_2 = :param_1",
checkparams={"id_1": 2, "param_1": "foo"},
)
def test_column_properties_aliased_subquery(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
u1 = aliased(User)
stmt = select(u1)
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(stmt.subquery())
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.id, anon_2.name "
"FROM (SELECT users_1.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users_1.id = addresses.user_id) AS anon_3, "
"users_1.id AS id, users_1.name AS name "
"FROM users AS users_1) AS anon_2",
checkparams={"id_1": 2},
)
class RelationshipNaturalCompileTest(QueryTest, AssertsCompiledSQL):
"""test using core join() with relationship attributes.
as __clause_element__() produces a workable SQL expression, this should
be generally possible.
However, it can't work for many-to-many relationships, as these
require two joins. Only the ORM can look at the entities and decide
that there's a separate "secondary" table to be rendered as a separate
join.
"""
__dialect__ = "default"
def test_of_type_implicit_join(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
a1 = aliased(Address)
stmt1 = select(u1).where(u1.addresses.of_type(a1))
stmt2 = (
fixture_session()
.query(u1)
.filter(u1.addresses.of_type(a1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT users_1.id, users_1.name FROM users AS users_1, "
"addresses AS addresses_1 WHERE users_1.id = addresses_1.user_id"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_of_type_explicit_join(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
a1 = aliased(Address)
stmt = select(u1).join(u1.addresses.of_type(a1))
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name FROM users AS users_1 "
"JOIN addresses AS addresses_1 "
"ON users_1.id = addresses_1.user_id",
)
def test_many_to_many_explicit_join(self):
Item, Keyword = self.classes("Item", "Keyword")
stmt = select(Item).join(Keyword, Item.keywords)
self.assert_compile(
stmt,
"SELECT items.id, items.description FROM items "
"JOIN item_keywords AS item_keywords_1 "
"ON items.id = item_keywords_1.item_id "
"JOIN keywords ON keywords.id = item_keywords_1.keyword_id",
)
def test_many_to_many_implicit_join(self):
Item, Keyword = self.classes("Item", "Keyword")
stmt = select(Item).where(Item.keywords)
# this was the intent of the primary + secondary clauseelement.
# it can do enough of the right thing in an implicit join
# context.
self.assert_compile(
stmt,
"SELECT items.id, items.description FROM items, "
"item_keywords AS item_keywords_1, keywords "
"WHERE items.id = item_keywords_1.item_id "
"AND keywords.id = item_keywords_1.keyword_id",
)
class InheritedTest(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
class ExplicitWithPolymorhpicTest(
_poly_fixtures._PolymorphicUnions, AssertsCompiledSQL
):
__dialect__ = "default"
default_punion = (
"(SELECT pjoin.person_id AS person_id, "
"pjoin.company_id AS company_id, "
"pjoin.name AS name, pjoin.type AS type, "
"pjoin.status AS status, pjoin.engineer_name AS engineer_name, "
"pjoin.primary_language AS primary_language, "
"pjoin.manager_name AS manager_name "
"FROM (SELECT engineers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name "
"FROM people JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) AS pjoin) "
"AS anon_1"
)
def test_subquery_col_expressions_wpoly_one(self):
Person, Manager, Engineer = self.classes(
"Person", "Manager", "Engineer"
)
wp1 = with_polymorphic(Person, [Manager, Engineer])
subq1 = select(wp1).subquery()
wp2 = with_polymorphic(Person, [Engineer, Manager])
subq2 = select(wp2).subquery()
# first thing we see, is that when we go through with_polymorphic,
# the entities that get placed into the aliased class go through
# Mapper._mappers_from_spec(), which matches them up to the
# existing Mapper.self_and_descendants collection, meaning,
# the order is the same every time. Assert here that's still
# happening. If a future internal change modifies this assumption,
# that's not necessarily bad, but it would change things.
eq_(
subq1.c.keys(),
[
"person_id",
"company_id",
"name",
"type",
"person_id_1",
"status",
"engineer_name",
"primary_language",
"person_id_1",
"status_1",
"manager_name",
],
)
eq_(
subq2.c.keys(),
[
"person_id",
"company_id",
"name",
"type",
"person_id_1",
"status",
"engineer_name",
"primary_language",
"person_id_1",
"status_1",
"manager_name",
],
)
def test_subquery_col_expressions_wpoly_two(self):
Person, Manager, Engineer = self.classes(
"Person", "Manager", "Engineer"
)
wp1 = with_polymorphic(Person, [Manager, Engineer])
subq1 = select(wp1).subquery()
stmt = select(subq1).where(
or_(
subq1.c.engineer_name == "dilbert",
subq1.c.manager_name == "dogbert",
)
)
self.assert_compile(
stmt,
"SELECT anon_1.person_id, anon_1.company_id, anon_1.name, "
"anon_1.type, anon_1.person_id AS person_id_1, anon_1.status, "
"anon_1.engineer_name, anon_1.primary_language, "
"anon_1.person_id AS person_id_2, anon_1.status AS status_1, "
"anon_1.manager_name FROM "
"%s WHERE "
"anon_1.engineer_name = :engineer_name_1 "
"OR anon_1.manager_name = :manager_name_1" % (self.default_punion),
)
class ImplicitWithPolymorphicTest(
_poly_fixtures._PolymorphicUnions, AssertsCompiledSQL
):
"""Test a series of mappers with a very awkward with_polymorphic setting,
that tables and columns are rendered using the selectable in the correct
contexts. PolymorphicUnions represent the most awkward and verbose
polymorphic fixtures you can have. expressions need to be maximally
accurate in terms of the mapped selectable in order to produce correct
queries, which also will be really wrong if that mapped selectable is not
in use.
"""
__dialect__ = "default"
def test_select_columns_where_baseclass(self):
Person = self.classes.Person
stmt = (
select(Person.person_id, Person.name)
.where(Person.name == "some name")
.order_by(Person.person_id)
)
sess = fixture_session()
q = (
sess.query(Person.person_id, Person.name)
.filter(Person.name == "some name")
.order_by(Person.person_id)
)
expected = (
"SELECT pjoin.person_id, pjoin.name FROM "
"(SELECT engineers.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.status AS status, engineers.engineer_name AS "
"engineer_name, engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name FROM people "
"JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) AS "
"pjoin WHERE pjoin.name = :name_1 ORDER BY pjoin.person_id"
)
self.assert_compile(stmt, expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
expected,
)
def test_select_where_baseclass(self):
Person = self.classes.Person
stmt = (
select(Person)
.where(Person.name == "some name")
.order_by(Person.person_id)
)
sess = fixture_session()
q = (
sess.query(Person)
.filter(Person.name == "some name")
.order_by(Person.person_id)
)
expected = (
"SELECT pjoin.person_id, pjoin.company_id, pjoin.name, "
"pjoin.type, pjoin.status, pjoin.engineer_name, "
"pjoin.primary_language, pjoin.manager_name FROM "
"(SELECT engineers.person_id AS person_id, people.company_id "
"AS company_id, people.name AS name, people.type AS type, "
"engineers.status AS status, engineers.engineer_name AS "
"engineer_name, engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name FROM people "
"JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) AS "
"pjoin WHERE pjoin.name = :name_1 ORDER BY pjoin.person_id"
)
self.assert_compile(stmt, expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
expected,
)
def test_select_where_subclass(self):
Engineer = self.classes.Engineer
# what will *not* work with Core, that the ORM does for now,
# is that if you do where/orderby Person.column, it will de-adapt
# the Person columns from the polymorphic union
stmt = (
select(Engineer)
.where(Engineer.name == "some name")
.order_by(Engineer.person_id)
)
sess = fixture_session()
q = (
sess.query(Engineer)
.filter(Engineer.name == "some name")
.order_by(Engineer.person_id)
)
plain_expected = ( # noqa
"SELECT engineers.person_id, people.person_id, people.company_id, "
"people.name, "
"people.type, engineers.status, "
"engineers.engineer_name, engineers.primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1 ORDER BY engineers.person_id"
)
# when we have disambiguating labels turned on
disambiguate_expected = ( # noqa
"SELECT engineers.person_id, people.person_id AS person_id_1, "
"people.company_id, "
"people.name, "
"people.type, engineers.status, "
"engineers.engineer_name, engineers.primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1 ORDER BY engineers.person_id"
)
# these change based on how we decide to apply labels
# in context.py
self.assert_compile(stmt, disambiguate_expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
disambiguate_expected,
)
def test_select_where_columns_subclass(self):
Engineer = self.classes.Engineer
# what will *not* work with Core, that the ORM does for now,
# is that if you do where/orderby Person.column, it will de-adapt
# the Person columns from the polymorphic union
# After many attempts to get the JOIN to render, by annotating
# the columns with the "join" that they come from and trying to
# get Select() to render out that join, there's no approach
# that really works without stepping on other assumptions, so
# add select_from(Engineer) explicitly. It's still puzzling why the
# ORM seems to know how to make this decision more effectively
# when the select() has the same amount of information.
stmt = (
select(Engineer.person_id, Engineer.name)
.where(Engineer.name == "some name")
.select_from(Engineer)
.order_by(Engineer.person_id)
)
sess = fixture_session()
q = (
sess.query(Engineer.person_id, Engineer.name)
.filter(Engineer.name == "some name")
.order_by(Engineer.person_id)
)
expected = (
"SELECT engineers.person_id, people.name "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1 ORDER BY engineers.person_id"
)
self.assert_compile(stmt, expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
expected,
)
class RelationshipNaturalInheritedTest(InheritedTest, AssertsCompiledSQL):
__dialect__ = "default"
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id"
)
default_pjoin = (
"(people LEFT OUTER "
"JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id "
"LEFT OUTER JOIN boss ON managers.person_id = boss.boss_id) "
"ON companies.company_id = people.company_id"
)
flat_aliased_pjoin = (
"(people AS people_1 LEFT OUTER JOIN engineers AS "
"engineers_1 ON people_1.person_id = engineers_1.person_id "
"LEFT OUTER JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id "
"LEFT OUTER JOIN boss AS boss_1 ON "
"managers_1.person_id = boss_1.boss_id) "
"ON companies.company_id = people_1.company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language "
"AS engineers_primary_language, managers.person_id "
"AS managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name, "
"boss.boss_id AS boss_boss_id, boss.golf_swing AS boss_golf_swing "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON "
"people.person_id = managers.person_id LEFT OUTER JOIN boss "
"ON managers.person_id = boss.boss_id) AS anon_1 "
"ON companies.company_id = anon_1.people_company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id "
"JOIN paperwork ON people.person_id = paperwork.person_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id "
"WHERE people.name = :name_1"
)
poly_columns = "SELECT people.person_id FROM people"
def test_straight(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt1 = select(Company).select_from(
orm_join(Company, Person, Company.employees)
)
stmt2 = select(Company).join(Company.employees)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.straight_company_to_person_expected)
self.assert_compile(stmt2, self.straight_company_to_person_expected)
self.assert_compile(stmt3, self.straight_company_to_person_expected)
def test_columns(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt = select(Person.person_id)
self.assert_compile(stmt, self.poly_columns)
def test_straight_whereclause(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt1 = (
select(Company)
.select_from(orm_join(Company, Person, Company.employees))
.where(Person.name == "ed")
)
stmt2 = (
select(Company).join(Company.employees).where(Person.name == "ed")
)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
.filter(Person.name == "ed")
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.c_to_p_whereclause)
self.assert_compile(stmt2, self.c_to_p_whereclause)
self.assert_compile(stmt3, self.c_to_p_whereclause)
def test_two_level(self):
Company, Person, Paperwork = self.classes(
"Company", "Person", "Paperwork"
)
stmt1 = select(Company).select_from(
orm_join(Company, Person, Company.employees).join(
Paperwork, Person.paperwork
)
)
stmt2 = select(Company).join(Company.employees).join(Person.paperwork)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
.join(Person.paperwork)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.person_paperwork_expected)
self.assert_compile(stmt2, self.person_paperwork_expected)
self.assert_compile(stmt3, self.person_paperwork_expected)
def test_wpoly_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
p1 = with_polymorphic(Person, "*")
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(Company.employees.of_type(p1))
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name "
"FROM companies JOIN %s" % self.default_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
def test_wpoly_aliased_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
s = fixture_session()
p1 = with_polymorphic(Person, "*", aliased=True)
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(p1, Company.employees.of_type(p1))
stmt3 = (
s.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN %s" % self.aliased_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
def test_wpoly_aliased_flat_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
p1 = with_polymorphic(Person, "*", aliased=True, flat=True)
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(p1, Company.employees.of_type(p1))
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN %s" % self.flat_aliased_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
class RelNaturalAliasedJoinsTest(
_poly_fixtures._PolymorphicAliasedJoins, RelationshipNaturalInheritedTest
):
# this is the label style for the polymorphic selectable, not the
# outside query
label_style = LABEL_STYLE_TABLENAME_PLUS_COL
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language AS "
"engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin ON companies.company_id = "
"pjoin.people_company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language AS "
"engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.people_company_id "
"JOIN paperwork ON pjoin.people_person_id = paperwork.person_id"
)
default_pjoin = (
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id AS "
"engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, managers.status "
"AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin "
"ON companies.company_id = pjoin.people_company_id"
)
flat_aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id "
"AS engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, "
"managers.status AS managers_status, managers.manager_name "
"AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin_1 ON companies.company_id = pjoin_1.people_company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id AS "
"engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, managers.status "
"AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.people_company_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id AS "
"engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, managers.status "
"AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.people_company_id "
"WHERE pjoin.people_name = :people_name_1"
)
poly_columns = (
"SELECT pjoin.people_person_id FROM (SELECT people.person_id AS "
"people_person_id, people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.person_id AS engineers_person_id, engineers.status "
"AS engineers_status, engineers.engineer_name AS "
"engineers_engineer_name, engineers.primary_language AS "
"engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin"
)
class RelNaturalAliasedJoinsDisamTest(
_poly_fixtures._PolymorphicAliasedJoins, RelationshipNaturalInheritedTest
):
# this is the label style for the polymorphic selectable, not the
# outside query
label_style = LABEL_STYLE_DISAMBIGUATE_ONLY
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS person_id, people.company_id "
"AS company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, managers.person_id "
"AS person_id_2, managers.status AS status_1, managers.manager_name "
"AS manager_name FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id "
"JOIN paperwork ON pjoin.person_id = paperwork.person_id"
)
default_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, engineers.primary_language "
"AS primary_language, managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin "
"ON companies.company_id = pjoin.company_id"
)
flat_aliased_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin_1 ON companies.company_id = pjoin_1.company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, engineers.primary_language "
"AS primary_language, managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.company_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id "
"WHERE pjoin.name = :name_1"
)
poly_columns = (
"SELECT pjoin.person_id FROM (SELECT people.person_id AS "
"person_id, people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin"
)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
"""older tests from test_query. Here, they are converted to use
future selects with ORM compilation.
"""
__dialect__ = "default"
def test_select_from_entity(self):
User = self.classes.User
self.assert_compile(
select(literal_column("*")).select_from(User),
"SELECT * FROM users",
)
def test_where_relationship(self):
User = self.classes.User
stmt1 = select(User).where(User.addresses)
stmt2 = (
fixture_session()
.query(User)
.filter(User.addresses)
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT users.id, users.name FROM users, addresses "
"WHERE users.id = addresses.user_id"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_where_m2m_relationship(self):
Item = self.classes.Item
expected = (
"SELECT items.id, items.description FROM items, "
"item_keywords AS item_keywords_1, keywords "
"WHERE items.id = item_keywords_1.item_id "
"AND keywords.id = item_keywords_1.keyword_id"
)
stmt1 = select(Item).where(Item.keywords)
stmt2 = (
fixture_session()
.query(Item)
.filter(Item.keywords)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_inline_select_from_entity(self):
User = self.classes.User
expected = "SELECT * FROM users"
stmt1 = select(literal_column("*")).select_from(User)
stmt2 = (
fixture_session()
.query(literal_column("*"))
.select_from(User)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_select_from_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
stmt1 = select(literal_column("*")).select_from(ua)
stmt2 = (
fixture_session()
.query(literal_column("*"))
.select_from(ua)
._final_statement(legacy_query_style=False)
)
expected = "SELECT * FROM users AS ua"
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_correlate_entity(self):
User = self.classes.User
Address = self.classes.Address
expected = (
"SELECT users.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_1 "
"FROM users, addresses"
)
stmt1 = select(
User.name,
Address.id,
select(func.count(Address.id))
.where(User.id == Address.user_id)
.correlate(User)
.scalar_subquery(),
)
stmt2 = (
fixture_session()
.query(
User.name,
Address.id,
select(func.count(Address.id))
.where(User.id == Address.user_id)
.correlate(User)
.scalar_subquery(),
)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_correlate_aliased_entity(self):
User = self.classes.User
Address = self.classes.Address
uu = aliased(User, name="uu")
stmt1 = select(
uu.name,
Address.id,
select(func.count(Address.id))
.where(uu.id == Address.user_id)
.correlate(uu)
.scalar_subquery(),
)
stmt2 = (
fixture_session()
.query(
uu.name,
Address.id,
select(func.count(Address.id))
.where(uu.id == Address.user_id)
.correlate(uu)
.scalar_subquery(),
)
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT uu.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE uu.id = addresses.user_id) AS anon_1 "
"FROM users AS uu, addresses"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_entity(self):
User = self.classes.User
expected = "SELECT users.id, users.name FROM users"
stmt1 = select(User)
stmt2 = (
fixture_session()
.query(User)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_columns(self):
User = self.classes.User
expected = "SELECT users.id, users.name FROM users"
stmt1 = select(User.id, User.name)
stmt2 = (
fixture_session()
.query(User.id, User.name)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_aliased_columns(self):
User = self.classes.User
ua = aliased(User, name="ua")
stmt1 = select(ua.id, ua.name)
stmt2 = (
fixture_session()
.query(ua.id, ua.name)
._final_statement(legacy_query_style=False)
)
expected = "SELECT ua.id, ua.name FROM users AS ua"
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
stmt1 = select(ua)
stmt2 = (
fixture_session()
.query(ua)
._final_statement(legacy_query_style=False)
)
expected = "SELECT ua.id, ua.name FROM users AS ua"
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_core_join_in_select_from_no_onclause(self):
User = self.classes.User
Address = self.classes.Address
self.assert_compile(
select(User).select_from(core_join(User, Address)),
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
def test_join_to_entity_no_onclause(self):
User = self.classes.User
Address = self.classes.Address
self.assert_compile(
select(User).join(Address),
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
def test_insert_from_query(self):
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = s.query(User.id, User.name).filter_by(name="ed")
self.assert_compile(
insert(Address).from_select(("id", "email_address"), q),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
def test_insert_from_query_col_attr(self):
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = s.query(User.id, User.name).filter_by(name="ed")
self.assert_compile(
insert(Address).from_select(
(Address.id, Address.email_address), q
),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
def test_update_from_entity(self):
from sqlalchemy.sql import update
User = self.classes.User
self.assert_compile(
update(User), "UPDATE users SET id=:id, name=:name"
)
self.assert_compile(
update(User).values(name="ed").where(User.id == 5),
"UPDATE users SET name=:name WHERE users.id = :id_1",
checkparams={"id_1": 5, "name": "ed"},
)
self.assert_compile(
update(User).values({User.name: "ed"}).where(User.id == 5),
"UPDATE users SET name=:name WHERE users.id = :id_1",
checkparams={"id_1": 5, "name": "ed"},
)
def test_delete_from_entity(self):
from sqlalchemy.sql import delete
User = self.classes.User
self.assert_compile(delete(User), "DELETE FROM users")
self.assert_compile(
delete(User).where(User.id == 5),
"DELETE FROM users WHERE users.id = :id_1",
checkparams={"id_1": 5},
)
def test_insert_from_entity(self):
from sqlalchemy.sql import insert
User = self.classes.User
self.assert_compile(
insert(User), "INSERT INTO users (id, name) VALUES (:id, :name)"
)
self.assert_compile(
insert(User).values(name="ed"),
"INSERT INTO users (name) VALUES (:name)",
checkparams={"name": "ed"},
)
def test_col_prop_builtin_function(self):
class Foo(object):
pass
mapper(
Foo,
self.tables.users,
properties={
"foob": column_property(
func.coalesce(self.tables.users.c.name)
)
},
)
stmt1 = select(Foo).where(Foo.foob == "somename").order_by(Foo.foob)
stmt2 = (
fixture_session()
.query(Foo)
.filter(Foo.foob == "somename")
.order_by(Foo.foob)
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT coalesce(users.name) AS coalesce_1, "
"users.id, users.name FROM users "
"WHERE coalesce(users.name) = :param_1 "
"ORDER BY coalesce_1"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
|
py
|
1a58369d7507721a67feb14b286d865d32966883
|
class Table():
dir_local = '../data/'
header = 0
deli = ';'
def __init__(self, name, csv_url, fields):
self.name = name
self.csv_url = csv_url
self.fields = fields
@property
def name(self):
return self.__name
@property
def fields(self):
return self.__fields
@property
def csv_url(self):
return self.__csv_url
@property
def csv_local(self):
return self.dir_local + self.__name + '.csv'
@property
def headers(self):
return self.header
@property
def delimiter(self):
return self.deli
@name.setter
def name(self, name):
self.__name = name
@fields.setter
def fields(self, fields):
self.__fields = fields
@csv_url.setter
def csv_url(self, csv_url):
self.__csv_url = csv_url
|
py
|
1a5837a4a9c4bf261a29ba3f67840411c7e35d28
|
import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
grouping_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_grouping_so.so'))
def query_ball_point(radius, nsample, xyz1, xyz2):
'''
Input:
radius: float32, ball search radius
nsample: int32, number of points selected in each ball region
xyz1: (batch_size, ndataset, 3) float32 array, input points
xyz2: (batch_size, npoint, 3) float32 array, query points
Output:
idx: (batch_size, npoint, nsample) int32 array, indices to input points
pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region
'''
#return grouping_module.query_ball_point(radius, nsample, xyz1, xyz2)
return grouping_module.query_ball_point(xyz1, xyz2, radius, nsample)
ops.NoGradient('QueryBallPoint')
def select_top_k(k, dist):
'''
Input:
k: int32, number of k SMALLEST elements selected
dist: (b,m,n) float32 array, distance matrix, m query points, n dataset points
Output:
idx: (b,m,n) int32 array, first k in n are indices to the top k
dist_out: (b,m,n) float32 array, first k in n are the top k
'''
return grouping_module.selection_sort(dist, k)
ops.NoGradient('SelectionSort')
def group_point(points, idx):
'''
Input:
points: (batch_size, ndataset, channel) float32 array, points to sample from
idx: (batch_size, npoint, nsample) int32 array, indices to points
Output:
out: (batch_size, npoint, nsample, channel) float32 array, values sampled from points
'''
return grouping_module.group_point(points, idx)
@tf.RegisterGradient('GroupPoint')
def _group_point_grad(op, grad_out):
points = op.inputs[0]
idx = op.inputs[1]
return [grouping_module.group_point_grad(points, idx, grad_out), None]
def knn_point(k, xyz1, xyz2):
'''
Input:
k: int32, number of k in k-nn search
xyz1: (batch_size, ndataset, c) float32 array, input points
xyz2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
b = xyz1.get_shape()[0].value
n = xyz1.get_shape()[1].value
c = xyz1.get_shape()[2].value
m = xyz2.get_shape()[1].value
print(b, n, c, m)
print(xyz1, (b,1,n,c))
xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1])
xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1])
dist = tf.reduce_sum((xyz1-xyz2)**2, -1)
print(dist, k)
outi, out = select_top_k(k, dist)
idx = tf.slice(outi, [0,0,0], [-1,-1,k])
val = tf.slice(out, [0,0,0], [-1,-1,k])
print(idx, val)
#val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU
return(val, idx)
if __name__=='__main__':
knn=True
import numpy as np
import time
np.random.seed(100)
pts = np.random.random((32,512,64)).astype('float32')
tmp1 = np.random.random((32,512,3)).astype('float32')
tmp2 = np.random.random((32,128,3)).astype('float32')
with tf.device('/gpu:1'):
points = tf.constant(pts)
xyz1 = tf.constant(tmp1)
xyz2 = tf.constant(tmp2)
radius = 0.1
nsample = 64
if knn:
_, idx = knn_point(nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
else:
idx, _ = query_ball_point(radius, nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
#grouped_points_grad = tf.ones_like(grouped_points)
#points_grad = tf.gradients(grouped_points, points, grouped_points_grad)
with tf.Session('') as sess:
now = time.time()
for _ in range(100):
ret = sess.run(grouped_points)
print(time.time() - now)
print(ret.shape, ret.dtype)
print(ret)
|
py
|
1a58386a70a1ffbe6353fb1db814d522d1e5fb56
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import warnings
from transformers import AutoTokenizer, MarianConfig, MarianTokenizer, TranslationPipeline, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFMarianModel, TFMarianMTModel
@require_tf
class TFMarianModelTester:
config_cls = MarianConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFMarianModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
head_mask = inputs_dict["head_mask"]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
if head_mask is None:
head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class TFMarianModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else ()
all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
test_head_masking = True
def setUp(self):
self.model_tester = TFMarianModelTester(self)
self.config_tester = ConfigTester(self, config_class=MarianConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
model_class = self.all_generative_model_classes[0]
input_ids = {
"decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
"input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
}
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pre-trained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
def test_mixed_precision(self):
# TODO JP: Make Marian float16 compliant
pass
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
@require_tf
class AbstractMarianIntegrationTest(unittest.TestCase):
maxDiff = 1000 # show more chars for failing integration tests
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self) -> MarianTokenizer:
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
warnings.simplefilter("error")
model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True)
assert isinstance(model, TFMarianMTModel)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer.prepare_seq2seq_batch(
src_texts=self.src_text, **tokenizer_kwargs, return_tensors="tf"
)
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_MT_EN(AbstractMarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks if pad_token_id logits not set to LARGE_NEGATIVE."""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_en_zh(AbstractMarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@slow
def test_batch_generation_en_zh(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_en_ROMANCE(AbstractMarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
@slow
def test_pipeline(self):
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="tf")
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
|
py
|
1a58388dfd2705098b38ce5142260b940d11359e
|
from __future__ import annotations
from typing import TYPE_CHECKING
from dearpygui import core as dpgcore
from dearpygui_obj import _register_item_type
from dearpygui_obj.data import DrawPos, DrawPropertyPos, DrawPropertyColorRGBA
from dearpygui_obj.wrapper.widget import Widget, ItemWidget
from dearpygui_obj.wrapper.drawing import DrawCommand, DrawProperty
if TYPE_CHECKING:
from typing import Any, Optional, Tuple, Sequence
from dearpygui_obj.data import Pos2D, ColorRGBA
@_register_item_type('mvAppItemType::Drawing')
class DrawingCanvas(Widget, ItemWidget):
"""A widget that displays the result of drawing commands."""
def __init__(self, size: Tuple[int, int] = (300, 300), *, name_id: str = None, **config):
super().__init__(size=size, name_id=name_id, **config)
def _setup_add_widget(self, dpg_args) -> None:
dpgcore.add_drawing(self.id, **dpg_args)
def clear(self) -> None:
"""Clears the drawing.
Warning:
Any :class:`.DrawCommand` objects created using this canvas must not be used after this
method is called.
This includes reading or writing to any properties of :class:`DrawCommand` objects.
"""
dpgcore.clear_drawing(self.id)
def get_mouse_pos(self) -> Optional[Tuple[int, int]]:
"""Get the mouse position within the drawing, or ``None`` if the drawing is not hovered."""
if not self.is_hovered():
return None
return dpgcore.get_drawing_mouse_pos()
def draw_line(self, p1: Pos2D, p2: Pos2D, color: ColorRGBA, thickness: int) -> DrawLine:
"""See :class:`.DrawLine`"""
return DrawLine(self, p1, p2, color, thickness)
def draw_rectangle(self, pmin: Pos2D, pmax: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawRectangle:
"""See :class:`.DrawRectangle` for keyword arguments."""
return DrawRectangle(self, pmin, pmax, color, **kwargs)
def draw_circle(self, center: Pos2D, radius: float, color: ColorRGBA, **kwargs: Any) -> DrawCircle:
"""See :class:`.DrawCircle` for keyword arguments."""
return DrawCircle(self, center, radius, color, **kwargs)
def draw_text(self, pos: Pos2D, text: str, **kwargs) -> DrawText:
"""See :class:`.DrawText` for keyword arguments."""
return DrawText(self, pos, text, **kwargs)
def draw_arrow(self, p1: Pos2D, p2: Pos2D, color: ColorRGBA, thickness: int, arrow_size: int) -> DrawArrow:
"""See :class:`.DrawArrow` for keyword arguments."""
return DrawArrow(self, p1, p2, color, thickness, arrow_size)
def draw_polyline(self, points: Sequence[Pos2D], color: ColorRGBA, **kwargs: Any) -> DrawPolyLine:
"""See :class:`.DrawPolyLine` for keyword arguments."""
return DrawPolyLine(self, points, color, **kwargs)
def draw_triangle(self, p1: Pos2D, p2: Pos2D, p3: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawTriangle:
"""See :class:`.DrawTriangle` for keyword arguments."""
return DrawTriangle(self, p1, p2, p3, color, **kwargs)
def draw_quad(self, p1: Pos2D, p2: Pos2D, p3: Pos2D, p4: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawQuad:
"""See :class:`.DrawQuod` for keyword arguments."""
return DrawQuad(self, p1, p2, p3, p4, color, **kwargs)
def draw_polygon(self, points: Sequence[Pos2D], color: ColorRGBA, **kwargs) -> DrawPolygon:
"""See :class:`.DrawPolygon` for keyword arguments."""
return DrawPolygon(self, points, color, **kwargs)
def draw_bezier_curve(self, p1: Pos2D, p2: Pos2D, p3: Pos2D, p4: Pos2D, color: ColorRGBA, **kwargs: Any) -> DrawBezierCurve:
"""See :class:`.DrawBezierCurve` for keyword arguments."""
return DrawBezierCurve(self, p1, p2, p3, p4, color, **kwargs)
class DrawLine(DrawCommand):
"""Draws a line."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
thickness: int = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_line(self.canvas.id, tag=self.id, **draw_args)
class DrawRectangle(DrawCommand):
"""Draws a rectangle."""
pmin: Pos2D = DrawPropertyPos()
pmax: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
rounding: float = DrawProperty()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_rectangle(self.canvas.id, tag=self.id, **draw_args)
class DrawCircle(DrawCommand):
"""Draws a circle."""
center: Pos2D = DrawPropertyPos()
radius: float = DrawProperty()
color: ColorRGBA = DrawPropertyColorRGBA()
segments: int = DrawProperty()
thickness: float = DrawProperty()
fill: ColorRGBA = DrawPropertyColorRGBA()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_circle(self.canvas.id, tag=self.id, **draw_args)
class DrawText(DrawCommand):
"""Draws text."""
pos: Pos2D = DrawPropertyPos()
text: str = DrawProperty()
color: ColorRGBA = DrawPropertyColorRGBA()
font_size: int = DrawProperty(key='size')
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_text(self.canvas.id, tag=self.id, **draw_args)
class DrawArrow(DrawCommand):
"""Draw a line with an arrowhead."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
thickness: int = DrawProperty()
arrow_size: int = DrawProperty(key='size')
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_arrow(self.canvas.id, tag=self.id, **draw_args)
class DrawPolyLine(DrawCommand):
"""Draws connected lines."""
@DrawProperty()
def points(self) -> Sequence[Pos2D]:
return [ DrawPos(*p) for p in self.get_config()['points'] ]
@points.getconfig
def points(self, value: Sequence[Pos2D]):
return { 'points' : [ list(p) for p in value ] }
color: ColorRGBA = DrawPropertyColorRGBA()
closed: bool = DrawProperty()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_polyline(self.canvas.id, tag=self.id, **draw_args)
class DrawTriangle(DrawCommand):
"""Draws a triangle."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
p3: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_triangle(self.canvas.id, tag=self.id, **draw_args)
class DrawQuad(DrawCommand):
"""Draws a quadrilateral."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
p3: Pos2D = DrawPropertyPos()
p4: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_quad(self.canvas.id, tag=self.id, **draw_args)
class DrawPolygon(DrawCommand):
"""Draws a polygon."""
@DrawProperty()
def points(self) -> Sequence[Pos2D]:
return [ DrawPos(*p) for p in self.get_config()['points'] ]
@points.getconfig
def points(self, value: Sequence[Pos2D]):
return { 'points' : [ list(p) for p in value ] }
color: ColorRGBA = DrawPropertyColorRGBA()
fill: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_polygon(self.canvas.id, tag=self.id, **draw_args)
class DrawBezierCurve(DrawCommand):
"""Draws a bezier curve."""
p1: Pos2D = DrawPropertyPos()
p2: Pos2D = DrawPropertyPos()
p3: Pos2D = DrawPropertyPos()
p4: Pos2D = DrawPropertyPos()
color: ColorRGBA = DrawPropertyColorRGBA()
thickness: float = DrawProperty()
segments: int = DrawProperty()
def _draw_internal(self, draw_args) -> None:
dpgcore.draw_bezier_curve(self.canvas.id, tag=self.id, **draw_args)
## class DrawImage TODO
__all__ = [
'DrawingCanvas',
'DrawLine',
'DrawRectangle',
'DrawCircle',
'DrawText',
'DrawArrow',
'DrawPolyLine',
'DrawTriangle',
'DrawQuad',
'DrawPolygon',
'DrawBezierCurve',
]
|
py
|
1a5838dd4494fafc9d87ed103aee77d4e6a5c7c4
|
from .testtools import virtuese as vs
from .testtools import pickyinvestor
import datetime
import numpy as np
import pandas as pd
import os
from .models import TradeCalendar
from .models import Position
def getDateIDs():
"""Get a dictionary mapping date to id in the database.
"""
tradeID = {}
tradeDays = TradeCalendar.objects.all()
for tradeDay in tradeDays:
tradeID[tradeDay.trade_date.strftime("%Y-%m-%d")] = tradeDay.id
return tradeID
def getIndex(information):
"""
Adjust order so that indexes is a list such that:
indexes[] are the index for information to be storaged correctly.
information[indexes[0]] - book
information[indexes[1]] - ts_code
information[indexes[2]] - trade_date
information[indexes[3]] - position
information[indexes[4]] - value
information[indexes[5]] - wavg_cost
information[indexes[6]] - return
information[indexes[7]] - pct_return
"""
indexes = []
indexes.append(information.index("book"))
indexes.append(information.index("ts_code"))
indexes.append(information.index("trade_date"))
indexes.append(information.index("position"))
indexes.append(information.index("value"))
indexes.append(information.index("wavg_cost"))
indexes.append(information.index("return"))
indexes.append(information.index("pct_return"))
return indexes
def simulate(params):
response = {}
try:
startDate = params['startDate']
endDate = params['endDate']
stockPool = params['stockPool']
strategy = params['strategy']
testval = [startDate,endDate,stockPool,strategy]
startDate = '20200301'
endDate = '20200603'
stockPool = []
strategy = ""
backtest = vs.VirtualSE()
backtest.setRunningInterval(startDate, endDate)
backtest.setBacktestingPriceEngine("backward")
# backtest.setStockPool(stockPool)
backtest.setBrokerDB("aTest.db")
backtest.setMarketExpressDB("externalDB.db")
strategy = pickyinvestor.PickyInvestor()
backtest.setTradingStrategy(strategy)
backtest.execute()
transactionDataFile = backtest.getTransactionData()
tradeID = getDateIDs()
skipFirstLine = True
for line in transactionDataFile:
if skipFirstLine:
skipFirstLine = False
indexes = getIndex(line.split()[0].split(","))
Position.objects.all().delete()
statements = []
continue
infomation = line.split()[0].split(",")
value = infomation[indexes[4]] if infomation[indexes[4]]!="" else 0
return_field = infomation[indexes[6]] if infomation[indexes[6]]!="" else 0
pct_return = infomation[indexes[7]] if infomation[indexes[7]]!="" else 0
statements.append(Position(
book=infomation[indexes[0]],
ts_code=infomation[indexes[1]],
trade_day_id=tradeID[infomation[indexes[2]]],
position=infomation[indexes[3]],
value=value,
wavg_cost=infomation[indexes[5]],
return_field=return_field,
pct_return=pct_return,
))
Position.objects.bulk_create(statements)
transactionDataFile.close()
backtest.clear()
return testval
except Exception as e:
print(str(e))
|
py
|
1a583b26a0581dac3a5793642df65e6fd62c0eeb
|
#!/usr/bin/python3.6
import sys, os, importlib
from .system import console, execute
from .util import glob_with_extensions, glob_folders_with_name_match
from .build_config import BuildConfig
from .build_target import BuildTarget
from .build_dependency import BuildDependency
from .dependency_chain import load_dependency_chain, execute_task_chain, find_dependency, get_full_flattened_deps
from .init_project import mama_init_project
def print_title():
console(f'========= Mama Build Tool ==========')
def print_usage():
console('mama [actions...] [args...]')
console(' actions:')
console(' init - create initial mamafile.py and CMakeLists.txt')
console(' list - list all mama dependencies on this project')
console(' build - configure and build main project or specific target, this can clone, but does not pull')
console(' update - update and build target dependencies after calling git pull')
console(' deploy - runs PAPA deploy stage by gathering all libraries and assets')
console(' serve - Equivalent of `update build deploy`')
console(' clean - clean main project or specific target')
console(' rebuild - clean, update and build main project or specific target')
console(' reclone - wipe specific target dependency and clone it again')
console(' wipe - alias of reclone')
console(' test - run tests for main project or specific target')
console(' start=arg - start a specific tool via mamafile.start(args)')
console(' add - add new dependency')
console(' new - create new mama build file')
console(' open=<tgt> - open a project file')
console(' help - shows this help list')
console(' install utils:')
console(' install-clang6 - configures and installs clang6 for linux')
console(' install-msbuild - configures and installs MSBuild for linux')
console(' args:')
console(' windows - build for windows')
console(' linux - build for linux')
console(' macos - build for macos')
console(' ios - build for ios')
console(' android - build for android')
console(' android-N - build for android targeting specific API level, ex: android-26')
console(' clang - prefer clang for linux (default on linux/macos/ios/android)')
console(' gcc - prefer gcc for linux')
console(' fortran - enable automatic fortran detection (or configure this in mamafile)')
console(' release - (default) CMake configuration RelWithDebInfo')
console(' debug - CMake configuration Debug')
console(' arch=x86 - Override cross-compiling architecture: (x86, x64, arm, arm64)')
console(' x86 - Shorthand for arch=x86, all shorthands: x86 x64 arm arm64')
console(' jobs=N - Max number of parallel compilations. (default=system.core.count)')
console(' target=P - Name of the target')
console(' all - Short for target=all')
console(' silent - Greatly reduces verbosity')
console(' verbose - Greatly increases verbosity for build dependencies and cmake')
console(' examples:')
console(' mama init Initialize a new project. Tries to create mamafile.py and CMakeLists.txt')
console(' mama build Update and build main project only. This only clones, but does not update!')
console(' mama build x86 opencv Cross compile build target opencv to x86 architecture')
console(' mama build android Cross compile to arm64 android NDK')
console(' mama build android-26 arm Cross compile to armv7 android NDK API level 26')
console(' mama update Update all dependencies by doing git pull and build.')
console(' mama clean Cleans main project only.')
console(' mama clean x86 opencv Cleans main project only.')
console(' mama clean all Cleans EVERYTHING in the dependency chain for current arch.')
console(' mama rebuild Cleans, update and build main project only.')
console(' mama build dep1 Update and build dep1 only.')
console(' mama update dep1 Update and build the specified target.')
console(' mama serve android Update, build and deploy for Android')
console(' mama wipe dep1 Wipe target dependency completely and clone again.')
console(' mama test Run tests on main project.')
console(' mama test=arg Run tests on main project with an argument.')
console(' mama test="arg1 arg2" Run tests on main project with multiple arguments.')
console(' mama test dep1 Run tests on target dependency project.')
console(' mama start=dbtool Call main project mamafile start() with args [`dbtool`].')
console(' environment:')
console(' setenv("NINJA") Path to NINJA build executable')
console(' setenv("ANDROID_HOME") Path to Android SDK if auto-detect fails')
def open_project(config: BuildConfig, root_dependency: BuildDependency):
name = config.target if config.target and config.target != 'all' else config.open
found = root_dependency if name == 'root' else find_dependency(root_dependency, name)
if not found:
raise KeyError(f'No project named {name}')
if config.windows:
solutions = glob_with_extensions(found.build_dir, ['.sln'])
if not solutions:
raise EnvironmentError('Could not find any Visual Studio solutions!')
execute(f'start {solutions[0]}', echo=True)
elif config.macos or config.ios:
projects = glob_folders_with_name_match(found.build_dir, ['.xcodeproj'])
if not projects:
raise EnvironmentError('Could not find any Xcode projects!')
execute(f'open {projects[0]}', echo=True)
elif config.linux:
raise EnvironmentError('Linux IDE selection not implemented. Try opening this folder with CLion.')
#execute(f'xdg-open', echo=True)
elif config.android:
raise EnvironmentError('Android IDE selection not implemented. Try opening this folder with Android Studio.')
def set_target_from_unused_args(config: BuildConfig):
for arg in config.unused_args:
if config.target:
console(f"ERROR: Deduced Target='{arg}' from unused argument, but target is already set to '{config.target}'")
exit(-1)
else:
config.target = arg
def check_config_target(config: BuildConfig, root: BuildDependency):
if config.target and config.target != 'all':
dep = find_dependency(root, config.target)
if dep is None:
console(f"ERROR: specified target='{config.target}' not found!")
exit(-1)
def main():
if sys.version_info < (3, 6):
console('FATAL ERROR: MamaBuild requires Python 3.6')
exit(-1)
if len(sys.argv) == 1 or 'help' in sys.argv:
print_title()
print_usage()
exit(-1)
config = BuildConfig(sys.argv[1:])
if config.print:
print_title()
source_dir = os.getcwd()
name = os.path.basename(source_dir)
root = BuildDependency(name, config, BuildTarget, src=source_dir, is_root=True)
if config.mama_init:
mama_init_project(root)
return
if config.convenient_install:
config.run_convenient_installs()
return
has_cmake = root.cmakelists_exists()
if not root.mamafile_exists() and not has_cmake:
console('FATAL ERROR: mamafile.py not found and CMakeLists.txt not found')
exit(-1)
if config.unused_args:
set_target_from_unused_args(config)
if config.update:
if not config.target:
config.target = 'all'
if config.print: console(f'Updating all targets')
else:
if config.print: console(f'Updating {config.target} target')
if config.rebuild:
config.build = True
config.clean = True
if config.clean and not config.target:
root.clean()
load_dependency_chain(root)
check_config_target(config, root)
if config.list:
print(f'Dependency List: {get_full_flattened_deps(root)}')
if config.target:
dep = find_dependency(root, config.target)
if dep:
target:BuildTarget = dep.target
target.package()
inc, libs = target.get_target_products(config.target)
inc, libs = '\n '.join(inc.split(';')), '\n '.join(libs.split(';'))
print(f"target {config.target} includes:\n {inc}")
print(f"target {config.target} libraries:\n {libs}")
return
if config.android: config.init_ndk_path()
if config.raspi: config.init_raspi_path()
execute_task_chain(root)
if config.open:
open_project(config, root)
def __main__():
main()
if __name__ == '__main__':
main()
|
py
|
1a583c1ea158cb38876df90fa324d3c2467ab2a6
|
while True:
n = input()
if n == "END": break
if n == "1":
print(1)
continue
ans = 1
p = len(n)
tmp = 0
while True:
if tmp == p:
break
tmp = p
p = len(str(p))
ans += 1
print(ans)
|
py
|
1a583d69c9ac93998808294660cb94b30854d109
|
import FWCore.ParameterSet.Config as cms
# Reconstruction geometry services
# Tracking Geometry
from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *
#Tracker
from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *
from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *
from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *
#Muon
from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *
from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *
# Alignment
from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *
from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *
from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *
# Calorimeters
from Geometry.CaloEventSetup.CaloTopology_cfi import *
from Geometry.CaloEventSetup.CaloGeometry_cff import *
from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *
from Geometry.EcalMapping.EcalMapping_cfi import *
from Geometry.EcalMapping.EcalMappingRecord_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDConstants_cff import *
from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *
|
py
|
1a583df746aebdf72a442c0d3f5ee3e9cdbc22c2
|
import numpy as np
import os.path
def subset_x_y(target, features, start_index:int, end_index:int):
"""Keep only the rows for X and y sets from the specified indexes
Parameters
----------
target : pd.DataFrame
Dataframe containing the target
features : pd.DataFrame
Dataframe containing all features
features : int
Index of the starting observation
features : int
Index of the ending observation
Returns
-------
pd.DataFrame
Subsetted Pandas dataframe containing the target
pd.DataFrame
Subsetted Pandas dataframe containing all features
"""
return features[start_index:end_index], target[start_index:end_index]
def split_sets_random(df, target_col, test_ratio=0.2, to_numpy=False):
"""Split sets randomly
Parameters
----------
df : pd.DataFrame
Input dataframe
target_col : str
Name of the target column
test_ratio : float
Ratio used for the validation and testing sets (default: 0.2)
Returns
-------
Numpy Array
Features for the training set
Numpy Array
Target for the training set
Numpy Array
Features for the validation set
Numpy Array
Target for the validation set
Numpy Array
Features for the testing set
Numpy Array
Target for the testing set
"""
from sklearn.model_selection import train_test_split
features, target = pop_target(df=df, target_col=target_col, to_numpy=to_numpy)
X_data, X_test, y_data, y_test = train_test_split(features, target, test_size=test_ratio, random_state=8)
val_ratio = test_ratio / (1 - test_ratio)
X_train, X_val, y_train, y_val = train_test_split(X_data, y_data, test_size=val_ratio, random_state=8)
return X_train, y_train, X_val, y_val, X_test, y_test
def save_sets(X_train=None, y_train=None, X_val=None, y_val=None, X_test=None, y_test=None, path='../data/processed/'):
import numpy as np
if X_train is not None:
np.save(f'{path}X_train', X_train)
if X_val is not None:
np.save(f'{path}X_val', X_val)
if X_test is not None:
np.save(f'{path}X_test', X_test)
if y_train is not None:
np.save(f'{path}y_train', y_train)
if y_val is not None:
np.save(f'{path}y_val', y_val)
if y_test is not None:
np.save(f'{path}y_test', y_test)
def load_sets(path='../data/processed/', val=False):
import numpy as np
import os.path
X_train = np.load(f'{path}X_train.npy') if os.path.isfile(f'{path}X_train.npy') else None
X_val = np.load(f'{path}X_val.npy' ) if os.path.isfile(f'{path}X_val.npy') else None
X_test = np.load(f'{path}X_test.npy' ) if os.path.isfile(f'{path}X_test.npy') else None
y_train = np.load(f'{path}y_train.npy') if os.path.isfile(f'{path}y_train.npy') else None
y_val = np.load(f'{path}y_val.npy' ) if os.path.isfile(f'{path}y_val.npy') else None
y_test = np.load(f'{path}y_test.npy' ) if os.path.isfile(f'{path}y_test.npy') else None
return X_train, y_train, X_val, y_val, X_test, y_test
def pop_target(df, target_col, to_numpy=False):
"""Extract target variable from dataframe and convert to nympy arrays if required
Parameters
----------
df : pd.DataFrame
Dataframe
target_col : str
Name of the target variable
to_numpy : bool
Flag stating to convert to numpy array or not
Returns
-------
pd.DataFrame/Numpy array
Subsetted Pandas dataframe containing all features
pd.DataFrame/Numpy array
Subsetted Pandas dataframe containing the target
"""
df_copy = df.copy()
target = df_copy.pop(target_col)
if to_numpy:
df_copy = df_copy.to_numpy()
target = target.to_numpy()
return df_copy, target
|
py
|
1a583f6512531c7b2e77672b544bdadc2bd80428
|
from scrapy import cmdline
cmdline.execute(['scrapy', 'crawl', 'dangdang'])
|
py
|
1a5840b21583b5ad99473eabcf847d96bd555f4a
|
# pylint: disable=missing-docstring,no-self-use,no-member,misplaced-comparison-constant,expression-not-assigned
import logging
from unittest.mock import patch, Mock
import pytest
from expecter import expect
import yorm
from yorm import common
from yorm.decorators import attr
from yorm.types import Dictionary, List
from yorm.types import String, Integer
from . import strip
log = logging.getLogger(__name__)
# CLASSES #####################################################################
@attr(abc=Integer)
class SampleDictionary(Dictionary):
"""Sample dictionary container."""
@attr(var1=Integer)
@attr(var2=String)
class SampleDictionaryWithInitialization(Dictionary):
"""Sample dictionary container with initialization."""
def __init__(self, var1, var2, var3):
super().__init__()
self.var1 = var1
self.var2 = var2
self.var3 = var3
@attr(all=String)
class StringList(List):
"""Sample list container."""
class UnknownList(List):
"""Sample list container."""
# TESTS #######################################################################
class TestDictionary:
"""Unit tests for the `Dictionary` container."""
obj = {'abc': 123}
class SampleClass:
def __init__(self):
self.abc = 42
class SampleClass2:
def __init__(self):
self.unmapped = Mock()
data_value = [
(obj, obj),
(None, {'abc': 0}),
("key=value", {'key': "value", 'abc': 0}),
("key=", {'key': "", 'abc': 0}),
("key", {'key': None, 'abc': 0}),
]
value_data = [
(obj, obj),
(SampleClass(), {'abc': 42}),
(SampleClass2(), {'abc': 0}),
([], {'abc': 0}),
]
def setup_method(self, _):
"""Reset the class' mapped attributes before each test."""
common.attrs[SampleDictionary] = {'abc': Integer}
@pytest.mark.parametrize("data,value", data_value)
def test_to_value(self, data, value):
"""Verify input data is converted to values."""
assert value == SampleDictionary.to_value(data)
@pytest.mark.parametrize("value,data", value_data)
def test_to_data(self, value, data):
"""Verify values are converted to output data."""
assert data == SampleDictionary.to_data(value)
def test_not_implemented(self):
"""Verify `Dictionary` cannot be used directly."""
with pytest.raises(NotImplementedError):
Dictionary()
def test_dict_as_object(self):
"""Verify a `Dictionary` can be used as an attribute."""
dictionary = SampleDictionaryWithInitialization(1, 2, 3.0)
value = {'var1': 1, 'var2': '2'}
value2 = dictionary.to_value(dictionary)
assert value == value2
# keys are not accessible as attributes
assert not hasattr(value2, 'var1')
assert not hasattr(value2, 'var2')
assert not hasattr(value2, 'var3')
def test_unknown_attrributes_are_ignored(self):
obj = SampleDictionary.create_default()
obj.update_value({'key': "value", 'abc': 7}, auto_track=False)
assert {'abc': 7} == obj
class TestList:
"""Unit tests for the `List` container."""
obj = ["a", "b", "c"]
data_value = [
(obj, obj),
(None, []),
([None], []),
("a b c", ["a", "b", "c"]),
("a,b,c", ["a", "b", "c"]),
("abc", ["abc"]),
("a\nb\nc", ["a", "b", "c"]),
(4.2, ['4.2']),
(("a", "b"), ["a", "b"]),
]
value_data = [
(obj, obj),
([], [None]),
]
@pytest.mark.parametrize("data,value", data_value)
def test_to_value(self, data, value):
"""Verify input data is converted to values."""
assert value == StringList.to_value(data)
@pytest.mark.parametrize("value,data", value_data)
def test_to_data(self, value, data):
"""Verify values are converted to output data."""
assert data == StringList.to_data(value)
def test_item_type(self):
"""Verify list item type can be determined."""
assert String == StringList.item_type
def test_item_type_none(self):
"""Verify list item type defaults to None."""
assert None is UnknownList.item_type
def test_not_implemented(self):
"""Verify `List` cannot be used directly."""
with pytest.raises(NotImplementedError):
List()
with pytest.raises(NotImplementedError):
UnknownList()
def test_shortened_syntax(self):
cls = List.of_type(Integer)
expect(cls.__name__) == "IntegerList"
expect(common.attrs[cls]) == {'all': Integer}
class TestExtensions:
"""Unit tests for extensions to the container classes."""
class FindMixin:
def find(self, value):
for value2 in self:
if value.lower() == value2.lower():
return value2
return None
@yorm.attr(a=yorm.types.String)
class MyDictionary(Dictionary, FindMixin):
pass
@yorm.attr(all=yorm.types.String)
class MyList(List, FindMixin):
pass
def test_converted_dict_keeps_type(self):
my_dict = self.MyDictionary()
my_dict['a'] = 1
my_dict2 = self.MyDictionary.to_value(my_dict)
assert 'a' == my_dict2.find('A')
assert None is my_dict2.find('B')
def test_converted_list_keeps_type(self):
my_list = self.MyList()
my_list.append('a')
my_list2 = self.MyList.to_value(my_list)
assert 'a' == my_list2.find('A')
assert None is my_list2.find('B')
@patch('yorm.settings.fake', True)
class TestReservedNames:
class MyObject:
def __init__(self, items=None):
self.items = items or []
def __repr__(self):
return "<my_object>"
def test_list_named_items(self):
my_object = self.MyObject()
yorm.sync_object(my_object, "fake/path", {'items': StringList})
log.info("Appending value to list of items...")
my_object.items.append('foo')
log.info("Checking object contents...")
assert strip("""
items:
- foo
""") == my_object.__mapper__.text
log.info("Writing new file contents...")
my_object.__mapper__.text = strip("""
items:
- bar
""")
log.info("Checking file contents...")
assert ['bar'] == my_object.items
|
py
|
1a5840f3265e0873788881f8c40905b34ec8119a
|
'''
Created on 2015/12/29
:author: hubo
'''
from __future__ import print_function
from vlcp.utils.connector import async_processor, async_to_async, Connector,\
generator_to_async
from vlcp.event.event import withIndices, Event, M_
from vlcp.config import defaultconfig
from vlcp.server.module import Module, call_api
import functools
import threading
import signal
from vlcp.event.runnable import RoutineContainer
from vlcp.event.runnable import RoutineException
import pdb
import code
from vlcp.config.config import manager
from vlcp.protocol.protocol import Protocol
from vlcp.event.connection import Client
import os
import socket
import re
from vlcp.event.core import InterruptedBySignalException
from queue import Queue, PriorityQueue
import traceback
import sys
import _thread as thread
def console_help():
print(Console._full_help)
def restore_console():
if not hasattr(Console, '_instance') or not Console._instance:
raise ValueError('Console is not loaded')
Console._instance.restore_console()
@withIndices('type')
class ConsoleEvent(Event):
canignore = False
@withIndices()
class ConsoleServiceCall(Event):
pass
@withIndices('waiter')
class ConsoleServiceCancel(Event):
pass
@withIndices('socket')
class SocketInjectDone(Event):
pass
@withIndices()
class InterruptPoller(Event):
pass
class Waiter(object):
def __init__(self):
self.event = threading.Event()
self.event.clear()
self.exception = None
self.result = None
def wait(self, timeout = None):
self.event.wait(timeout)
if self.exception:
raise self.exception
else:
return self.result
def raise_exception(self, exc):
self.exception = exc
self.event.set()
def send_result(self, val):
self.result = val
self.event.set()
@defaultconfig
class Console(Module):
'''
VLCP debugging console.
Besides the normal functions of Python interactive console,
Following variables are provided for debugging purpose:
server, manager, container
Following functions can be used to control VLCP running:
callapi, capture, sendevent, subroutine, execute, breakpoint, syscall,
resume, debug, restore_console, console_help
For details call console_help()
'''
_full_help = '''
VLCP debugging console.
Besides the normal functions of python interactive console,
following variables are provided for debugging purpose:
server - current running VLCP server
manager - current configuration manager
container - internal used routine container
Following functions can be used to control VLCP running:
callapi(modulename, functionname, **kwargs)
- Call module API modulename/functionname with kwargs, return result
capture(matchers, blocking = False, breakpoint = False, captureonce = False, callback = None)
- Capture events matched with specified matchers and print the event. Other parameters:
- blocking: if True, wait until the events are captured
- breakpoint: if True, suspend the event loop and wait for resume()
- captureonce: if True, remove the matchers on first capture
- callback: func(event, matcher) called on every capture if specified
sendevent(event, emerge = False)
- Send specified event to scheduler. if merge = True, send immediately without block
subroutine(routine)
- create a new routine in container.
execute(routine)
- execute the routine in container, and return the return value
breakpoint()
- stop running and wait for resume().
syscall(syscall_func)
- execute syscall_func in syscall context
resume()
- resume from breakpoint
debug()
- resume from breakpoint with pdb.set_trace() to enter pdb debugging. Suspend the interactive console
to work with pdb.
restore_console()
- Prepare to continue in pdb and resume the console. Type in pdb:
clear
import vlcp.service.debugging.console
vlcp.service.debugging.console.restore_console()
continue
console_help()
- show this help
'''
service = False
# Directly start VLCP in the console mode. By default, the console module creates a
# telnet server and wait for a connection. The console can be used in the telnet session.
# With startinconsole = True, the module uses stdin/stdout to create the console.
_default_startinconsole = False
# Default telnet connection URL, this is a passive connection on port 9923, so use::
#
# telnet localhost 9923
#
# to connect to the console.
_default_telnetconsole = 'ptcp://localhost:9923/'
# If SSL is configured (with pssl://...), specify the private key file
_default_key = None
# If SSL is configured, specify the certificate file
_default_certificate = None
# If SSL is configured, specify the CA file
_default_ca_certs = None
async def _service_routine(self):
self.apiroutine.subroutine(self._intercept_main())
csc = ConsoleServiceCall.createMatcher()
while True:
ev = await csc
self.apiroutine.subroutine(ev.routine, True)
async def _service_call_routine(self, waiter, call):
try:
r = await self.apiroutine.with_exception(call, ConsoleServiceCancel.createMatcher(waiter))
except RoutineException:
pass
except Exception as exc:
waiter.raise_exception(exc)
else:
waiter.send_result(r)
async def _intercept_main(self):
cr = self.apiroutine.currentroutine
self.sendEventQueue = Queue()
_console_connect_event = threading.Event()
_console_connect_event.clear()
await self.apiroutine.wait_for_send(ConsoleEvent('initproxy'))
if not self.startinconsole:
p = Protocol()
p.persist = True
p.createqueue = False
async def init(connection):
sock = connection.socket
self.telnet_socket = sock
self.scheduler.unregisterPolling(connection.socket)
connection.socket = None
connection.connected = False
_console_connect_event.set()
await SocketInjectDone.createMatcher(sock)
p.init = init
p.reconnect_init = init
Client(self.telnetconsole, p, self.scheduler, self.key, self.certificate, self.ca_certs).start()
def syscall_threaded_main(scheduler, processor):
# Detach self
scheduler.unregisterall(cr)
scheduler.syscallfunc = None
scheduler.syscallrunnable = None
self._threaded_main_quit = False
def threaded_main():
try:
scheduler.main(False, False)
finally:
self._threaded_main_quit = True
_console_connect_event.set()
t = threading.Thread(target=threaded_main)
t.daemon = True
t.start()
try:
if self.startinconsole:
self._interactive()
else:
while not self._threaded_main_quit:
try:
while not _console_connect_event.is_set():
# There is a bug in Python 2.x that wait without timeout cannot be
# interrupted by signal
_console_connect_event.wait(3600)
if self._threaded_main_quit:
break
except InterruptedBySignalException:
# This signal should interrupt the poller, but poller is not in the main thread
# Send an event through the proxy will do the trick
self.sendEventQueue.put((InterruptPoller(),))
continue
pstdin_r, pstdin_w = os.pipe()
pstdout_r, pstdout_w = os.pipe()
orig_stdin = sys.stdin
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
pstdin = os.fdopen(pstdin_r, 'rU')
pstdout = os.fdopen(pstdout_w, 'w')
sys.stdin = pstdin
sys.stdout = pstdout
sys.stderr = pstdout
sock = self.telnet_socket
sock.setblocking(True)
self.telnet_socket = None
_console_connect_event.clear()
t = threading.Thread(target=self._telnet_server, args=(pstdin_w, pstdout_r, sock, orig_stdout))
t.daemon = True
t.start()
try:
self._interactive()
except SystemExit:
pass
if not t.is_alive():
break
self.sendEventQueue.put((SocketInjectDone(sock),))
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
pstdin.close()
except Exception:
pass
try:
pstdout.close()
except Exception:
pass
sys.stdin = orig_stdin
sys.stdout = orig_stdout
sys.stderr = orig_stderr
except SystemExit:
pass
finally:
async def _quit():
scheduler.quit()
self.sendEventQueue.put((ConsoleServiceCall(routine=_quit()),))
self.sendEventQueue.put(None)
if self.startinconsole:
print('Wait for scheduler end, this may take some time...')
t.join()
# Cannot inject the event loop from yield_()
await self.apiroutine.do_events()
await self.apiroutine.syscall(syscall_threaded_main, True)
def _telnet_server_writer(self, queue, sock):
lastseq = -1
while True:
t, seq, val = queue.get()
if t < 0:
break
if t != 2 or seq >= lastseq:
try:
sock.sendall(val)
except Exception:
break
if t == 0:
lastseq = seq
def _telnet_server_writer2(self, pstdout_r, queue, lock, orig_stdout):
while True:
data = os.read(pstdout_r, 1024)
if data == '':
os.close(pstdout_r)
break
data, _ = re.subn(br'\r?\n', b'\r\n', data)
lock.acquire()
try:
self._telnet_seq += 1
seq = self._telnet_seq
finally:
lock.release()
queue.put((2, seq, data))
def _telnet_server(self, pstdin_w, pstdout_r, sock, orig_stdout):
queue = PriorityQueue()
inputbuffer = b''
self._telnet_seq = 0
try:
t = threading.Thread(target=self._telnet_server_writer, args=(queue, sock))
t.daemon = True
t.start()
lock = threading.Lock()
def writeall(data):
start = 0
while start < len(data):
size = os.write(pstdin_w, data[start:])
start += size
def sendcontrol(t, data):
lock.acquire()
try:
self._telnet_seq += 1
seq = self._telnet_seq
finally:
lock.release()
queue.put((t, seq, data))
t2 = threading.Thread(target=self._telnet_server_writer2, args=(pstdout_r, queue, lock, orig_stdout))
t2.daemon = True
t2.start()
escaping = False
option = None
while True:
newdata = sock.recv(1024)
if newdata == b'':
break
for i in range(0, len(newdata)):
c = newdata[i:i+1]
if escaping:
if option == b'\xfd' and c == b'\x06':
sendcontrol(1, b'\xff\xfb\x06')
option = None
escaping = False
elif option == b'\xfd' or option == b'\xfe':
sendcontrol(1, b'\xff\xfc' + c)
option = None
escaping = False
elif option == b'\xfb' or option == b'\xfc':
sendcontrol(1, b'\xff\xfe' + c)
option = None
escaping = False
elif c in (b'\xfb', b'\xfc', b'\xfd', b'\xfe'):
option = c
else:
option = None
if c == b'\xf3' or c == b'\xf4':
thread.interrupt_main()
escaping = False
else:
if c == b'\x03':
thread.interrupt_main()
elif c == b'\x08':
inputbuffer = inputbuffer[:-1]
elif c == b'\x00':
inputbuffer += b'\n'
writeall(inputbuffer)
inputbuffer = b''
elif c == b'\r' or c == b'\n':
inputbuffer += c
writeall(inputbuffer)
inputbuffer = b''
elif c == b'\xff':
escaping = True
else:
inputbuffer += c
except OSError:
pass
except IOError:
pass
finally:
try:
os.close(pstdin_w)
except Exception:
pass
queue.put((-1, -1, -1))
def _interactive(self):
lsignal = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
_breakpoint_event = threading.Event()
_current_thread = threading.current_thread().ident
_enter_pdb = [False]
def _async_run(call):
self.sendEventQueue.put((ConsoleServiceCall(routine = call),))
def _async(func):
@functools.wraps(func)
def f(*args, **kwargs):
_async_run(func(*args, **kwargs))
return f
def _service_call_customized(factory):
waiter = Waiter()
self.sendEventQueue.put((ConsoleServiceCall(routine=factory(waiter)),))
try:
return waiter.wait()
except:
self.sendEventQueue.put((ConsoleServiceCancel(waiter),))
raise
def execute(call):
return _service_call_customized(lambda waiter: self._service_call_routine(waiter, call))
def _service(func):
@functools.wraps(func)
def f(*args, **kwargs):
return execute(func(*args, **kwargs))
return f
@_service
def callapi(modulename, functionname, **kwargs):
return call_api(self.apiroutine, modulename, functionname, kwargs)
@_service
async def sendevent(event, emerge = False):
if emerge:
self.apiroutine.scheduler.emergesend(event)
else:
await self.apiroutine.wait_for_send(event)
@_service
async def subroutine(routine):
return self.apiroutine.subroutine(routine)
@_service
async def syscall(syscall_func):
return self.apiroutine.syscall(syscall_func)
def breakpoint():
in_thread = threading.current_thread().ident
if in_thread == _current_thread:
_breakpoint()
else:
print('Enter VLCP debugging breakpoint:')
traceback.print_stack()
print('Call resume() to continue the event loop, or debug() to enter pdb')
_breakpoint_event.clear()
_breakpoint_event.wait()
if _enter_pdb[0]:
pdb.set_trace()
else:
print('Resume from breakpoint.')
@_async
async def _breakpoint():
breakpoint()
def resume():
_enter_pdb[0] = False
_breakpoint_event.set()
@_async
async def restore_console():
self._restore_console_event.set()
self.restore_console = restore_console
def debug():
_enter_pdb[0] = True
self._restore_console_event.clear()
_breakpoint_event.set()
# Switch to event loop thread, suspend the main thread, wait for restore_console
self._restore_console_event.wait()
_capture_breakpoint = breakpoint
def capture(matchers, blocking = False, breakpoint = False, captureonce = False, callback = None):
async def _capture_service(waiter):
if blocking:
csm = ConsoleServiceCancel.createMatcher(waiter)
else:
waiter.send_result(self.apiroutine.currentroutine)
firsttime = True
while firsttime or not captureonce:
if blocking:
ev, m = await M_(*(tuple(matchers) + (csm,)))
else:
ev, m = await M_(*matchers)
if blocking and m is csm:
# Cancelled
return
print('Event Captured: Capture %r with %r' % (ev, m))
if firsttime and blocking:
waiter.send_result((ev, m, self.apiroutine.currentroutine))
firsttime = False
if callback:
try:
callback(ev, m)
except Exception:
print('Exception while running callback:')
traceback.print_exc()
if breakpoint:
_capture_breakpoint()
return _service_call_customized(_capture_service)
code.interact(self.__doc__ + '\n' + 'Python ' + str(sys.version) + ' on ' + str(sys.platform),
None,
{'server':self.server,'manager':manager, 'container':self.apiroutine,
'callapi':callapi, 'capture':capture, 'sendevent':sendevent,
'subroutine':subroutine, 'breakpoint':breakpoint, 'syscall':syscall,
'resume':resume, 'debug':debug, 'restore_console':restore_console,
'console_help':console_help,'execute':execute})
finally:
signal.signal(signal.SIGINT, lsignal)
def __init__(self, server):
'''
Constructor
'''
Module.__init__(self, server)
self._ce_matcher = ConsoleEvent.createMatcher()
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._service_routine
self._restore_console_event = threading.Event()
@generator_to_async(True, False)
def proxy(event, matcher):
while True:
events = self.sendEventQueue.get()
if events is None:
break
yield events
@async_to_async(True, False)
@async_processor
def processor(event, matcher, queueout):
if event.type == 'initproxy':
proxy(event, matcher, queueout)
self.connector = Connector(processor, (self._ce_matcher,), self.scheduler, False)
self.routines.append(self.apiroutine)
self.routines.append(self.connector)
if __name__ == '__main__':
from vlcp.server import main
manager['module.console.startinconsole'] = True
modules = list(sys.argv[1:]) + ['__main__.Console']
main(None, modules)
|
py
|
1a58413d4dc437ee3ea8c288475bdb1f4fa88c77
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import pdb
def find_and_print_lowest_value(x,y, x_key, y_key):
idx = np.argmin(y)
x_min = x[idx]
y_min = y[idx]
print('The lowest value of %s is %.8f at %s %.2f' % (y_key, y_min, x_key, x_min), flush=True)
def plot_result(result, plot_key, save_dir, plot_values=None, print_lowest_value=False):
# result is a dictionary of lists
# result[plot_key] is the horizontal axis
# result[key] is vertical axis
# we plot all other keys except plot_key against plot_key in result if plot_values is None
# plot_values could aslo be a list of keys
# we only plot those keys specified in plot_values against plot_key
# print('\n Comparing current ckpt with previous saved ckpts', flush=True)
os.makedirs(save_dir, exist_ok=True)
x = np.array(result[plot_key])
order = np.argsort(x)
x = x[order]
if len(result[plot_key]) > 1:
for key in result.keys():
plot = not key == plot_key
if not plot_values is None:
plot = plot and key in plot_values
if plot:
plt.figure()
if isinstance(result[key], dict):
for sub_key in result[key].keys():
y = np.array(result[key][sub_key])
y = y[order]
plt.plot(x, y, marker = '.', label=sub_key)
if print_lowest_value:
find_and_print_lowest_value(x, y, plot_key, key+'-'+sub_key)
plt.xlabel(plot_key)
plt.legend()
else:
y = np.array(result[key])
y = y[order]
plt.plot(x, y, marker = '.')
plt.xlabel(plot_key)
plt.ylabel(key)
if print_lowest_value:
find_and_print_lowest_value(x, y, plot_key, key)
save_file = os.path.join(save_dir, key+'.png')
plt.savefig(save_file)
plt.close()
print('have save the figure for %s to the file %s' % (key, save_file), flush=True)
else:
print('Do not plot because there is only 1 value in plot key', flush=True)
return 0
if __name__ == '__main__':
file_name = '../exp_shapenet/T1000_betaT0.02_shape_generation_noise_reduce_factor_5_corrected/eval_results/total_eval_result.pkl'
handle = open(file_name, 'rb')
result = pickle.load(handle)
handle.close()
plot_key = 'iter'
save_dir = './'
plot_result(result, plot_key, save_dir)
# pdb.set_trace()
|
py
|
1a58413d53273f7f19d5e1018683367828556a36
|
from .occupation_model_template import *
from .zheng07_components import *
from .leauthaud11_components import *
from .cacciato09_components import *
from .tinker13_components import *
from .zu_mandelbaum15_components import *
|
py
|
1a584171381597d98f63e83fb67a13d09b7dc5bf
|
"""
Regularizer class for that also supports GPU code
Michael Chen [email protected]
David Ren [email protected]
March 04, 2018
"""
import arrayfire as af
import numpy as np
from opticaltomography import settings
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class Regularizer:
"""
Highest-level Regularizer class that is responsible for parsing user arguments to create proximal operators
All proximal operators operate on complex variables (real & imaginary part separately)
Pure Real:
pure_real: boolean, whether or not to enforce object to be purely real
Pure imaginary:
pure_imag: boolean, whether or not to enforce object to be purely imaginary
Positivity:
positivity_real(positivity_imag): boolean, whether or not to enforce positivity for real(imaginary) part
Negativity:
negativity_real(negativity_imag): boolean, whether or not to enforce negativity for real(imaginary) part
LASSO (L1 regularizer):
lasso: boolean, whether or not to use LASSO proximal operator
lasso_parameter: threshold for LASSO
Total variation (3D only):
total_variation: boolean, whether or not to use total variation regularization
total_variation_gpu: boolean, whether or not to use GPU implementation
total_variation_parameter: scalar, regularization parameter (lambda)
total_variation_maxitr: integer, number of each iteration for total variation
"""
def __init__(self, configs = None, verbose = True, **kwargs):
#Given all parameters, construct all proximal operators
self.prox_list = []
reg_params = kwargs
if configs != None:
reg_params = self._parseConfigs(configs)
#Purely real
if reg_params.get("pure_real", False):
self.prox_list.append(PureReal())
#Purely imaginary
if reg_params.get("pure_imag", False):
self.prox_list.append(Pureimag())
#Total Variation
if reg_params.get("total_variation", False):
if reg_params.get("total_variation_gpu", False):
self.prox_list.append(TotalVariationGPU(**reg_params))
else:
self.prox_list.append(TotalVariationCPU(**reg_params))
#L1 Regularizer (LASSO)
elif reg_params.get("lasso", False):
self.prox_list.append(Lasso(reg_params.get("lasso_parameter", 1.0)))
#Others
else:
#Positivity
positivity_real = reg_params.get("positivity_real", False)
positivity_imag = reg_params.get("positivity_imag", False)
if positivity_real or positivity_imag:
self.prox_list.append(Positivity(positivity_real, positivity_imag))
#Negativity
negativity_real = reg_params.get("negativity_real", False)
negativity_imag = reg_params.get("negativity_imag", False)
if negativity_real or negativity_imag:
self.prox_list.append(Negativity(negativity_real, negativity_imag))
if verbose:
for prox_op in self.prox_list:
print("Regularizer -", prox_op.proximal_name)
def _parseConfigs(self, configs):
params = {}
params["pure_real"] = configs.pure_real
params["pure_imag"] = configs.pure_imag
#Total variation
params["total_variation"] = configs.total_variation
params["total_variation_gpu"] = configs.total_variation_gpu
params["total_variation_maxitr"] = configs.max_iter_tv
params["total_variation_order"] = configs.order_tv
params["total_variation_parameter"] = configs.reg_tv
#LASSO
params["lasso"] = configs.lasso
params["lasso_parameter"] = configs.reg_lasso
#Positivity/Negativity
if configs.positivity_real[0]:
if configs.positivity_real[1] == "larger":
params["positivity_real"] = True
else:
params["negativity_real"] = True
if configs.positivity_imag[0]:
if configs.positivity_imag[1] == "larger":
params["positivity_imag"] = True
else:
params["negativity_imag"] = True
return params
def computeCost(self, x):
cost = 0.0
for prox_op in self.prox_list:
cost_temp = prox_op.computeCost(x)
if cost_temp != None:
cost += cost_temp
return cost
def applyRegularizer(self, x):
for prox_op in self.prox_list:
x = prox_op.computeProx(x)
return x
class ProximalOperator():
def __init__(self, proximal_name):
self.proximal_name = proximal_name
def computeCost(self):
pass
def computeProx(self):
pass
def setParameter(self):
pass
def _boundRealValue(self, x, value = 0, flag_project = True):
"""If flag is true, only values that are greater than 'value' are preserved"""
if flag_project:
x[x < value] = 0
return x
class TotalVariationGPU(ProximalOperator):
def __init__(self, **kwargs):
proximal_name = "Total Variation"
parameter = kwargs.get("total_variation_parameter", 1.0)
maxitr = kwargs.get("total_variation_maxitr", 15)
self.order = kwargs.get("total_variation_order", 1)
self.pure_real = kwargs.get("pure_real", False)
self.pure_imag = kwargs.get("pure_imag", False)
#real part
if kwargs.get("positivity_real", False):
self.realProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_real")
elif kwargs.get("negativity_real", False):
self.realProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_real")
else:
self.realProjector = lambda x: x
#imaginary part
if kwargs.get("positivity_imag", False):
self.imagProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_imag")
elif kwargs.get("negativity_imag", False):
self.imagProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_imag")
else:
self.imagProjector = lambda x: x
self.setParameter(parameter, maxitr)
super().__init__(proximal_name)
def setParameter(self, parameter, maxitr):
self.parameter = parameter
self.maxitr = maxitr
def computeCost(self, x):
return None
def _computeTVNorm(self, x):
x_norm = x**2
x_norm = af.sum(x_norm, dim = 3)**0.5
x_norm[x_norm<1.0] = 1.0
return x_norm
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(af.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(af.imag(x), self.imagProjector)
else:
x = self._computeProxReal(af.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(af.imag(x), self.imagProjector)
return x
def _filterD(self, x, axis):
assert axis<3, "This function only supports matrix up to 3 dimension!"
if self.order == 1:
if axis == 0:
Dx = x - af.shift(x, 1, 0, 0)
elif axis == 1:
Dx = x - af.shift(x, 0, 1, 0)
else:
Dx = x - af.shift(x, 0, 0, 1)
elif self.order == 2:
if axis == 0:
Dx = x - 2*af.shift(x, 1, 0, 0) + af.shift(x, 2, 0, 0)
elif axis == 1:
Dx = x - 2*af.shift(x, 0, 1, 0) + af.shift(x, 0, 2, 0)
else:
Dx = x - 2*af.shift(x, 0, 0, 1) + af.shift(x, 0, 0, 2)
elif self.order == 3:
if axis == 0:
Dx = x - 3*af.shift(x, 1, 0, 0) + 3*af.shift(x, 2, 0, 0) - af.shift(x, 3, 0, 0)
elif axis == 1:
Dx = x - 3*af.shift(x, 0, 1, 0) + 3*af.shift(x, 0, 2, 0) - af.shift(x, 0, 3, 0)
else:
Dx = x - 3*af.shift(x, 0, 0, 1) + 3*af.shift(x, 0, 0, 2) - af.shift(x, 0, 0, 3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return Dx
def _filterDT(self, x):
if self.order == 1:
DTx = x[:, :, :, 0] - af.shift(x[ :, :, :, 0], -1, 0, 0) + \
x[:, :, :, 1] - af.shift(x[ :, :, :, 1], 0, -1, 0) + \
x[:, :, :, 2] - af.shift(x[ :, :, :, 2], 0, 0, -1)
elif self.order == 2:
DTx = x[:, :, :, 0] - 2*af.shift(x[ :, :, :, 0], -1, 0, 0) + af.shift(x[ :, :, :, 0], -2, 0, 0) + \
x[:, :, :, 1] - 2*af.shift(x[ :, :, :, 1], 0, -1, 0) + af.shift(x[ :, :, :, 1], 0, -2, 0) + \
x[:, :, :, 2] - 2*af.shift(x[ :, :, :, 2], 0, 0, -1) + af.shift(x[ :, :, :, 2], 0, 0, -2)
elif self.order == 3:
DTx = x[:, :, :, 0] - 3*af.shift(x[ :, :, :, 0], -1, 0, 0) + 3*af.shift(x[ :, :, :, 0], -2, 0, 0) - af.shift(x[ :, :, :, 0], -3, 0, 0) + \
x[:, :, :, 1] - 3*af.shift(x[ :, :, :, 1], 0, -1, 0) + 3*af.shift(x[ :, :, :, 1], 0, -2, 0) - af.shift(x[ :, :, :, 1], 0, -3, 0) + \
x[:, :, :, 2] - 3*af.shift(x[ :, :, :, 2], 0, 0, -1) + 3*af.shift(x[ :, :, :, 2], 0, 0, -2) - af.shift(x[ :, :, :, 2], 0, 0, -3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return DTx
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_k1 = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
grad_u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], dtype = af_float_datatype)
def _gradUpdate():
grad_u_hat = x - self.parameter * self._filterDT(u_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat[:, :, :] = x
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=0)
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=1)
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=2)
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :, 0] /= u_k1_norm
u_k1[ :, :, :, 1] /= u_k1_norm
u_k1[ :, :, :, 2] /= u_k1_norm
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class TotalVariationCPU(TotalVariationGPU):
def _computeTVNorm(self, x):
u_k1_norm = af.to_array(x)
u_k1_norm[:, :, :, :] *= u_k1_norm
u_k1_norm = af.sum(u_k1_norm, dim = 3)**0.5
u_k1_norm[u_k1_norm<1.0] = 1.0
return np.array(u_k1_norm)
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(np.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(np.imag(x), self.imagProjector)
else:
x = self._computeProxReal(np.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(np.imag(x), self.imagProjector)
return af.to_array(x)
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = np.zeros(x.shape + (3,), dtype = np_float_datatype);
u_k1 = u_k.copy()
u_hat = u_k.copy()
def _gradUpdate():
u_hat_af = af.to_array(u_hat)
DTu_hat = u_hat_af[:, :, :, 0] - af.shift(u_hat_af[ :, :, :, 0], -1, 0, 0) + \
u_hat_af[:, :, :, 1] - af.shift(u_hat_af[ :, :, :, 1], 0, -1, 0) + \
u_hat_af[:, :, :, 2] - af.shift(u_hat_af[ :, :, :, 2], 0, 0, -1)
grad_u_hat = x - np.array(self.parameter * DTu_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat = x.copy()
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 0))
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 1))
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 2))
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :] /= u_k1_norm[:, :, :, np.newaxis]
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class Positivity(ProximalOperator):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, positivity_real, positivity_imag, proximal_name = "Positivity"):
super().__init__(proximal_name)
self.real = positivity_real
self.imag = positivity_imag
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._boundRealValue(af.real(x), 0, self.real) +\
1.0j * self._boundRealValue(af.imag(x), 0, self.imag)
else:
x = self._boundRealValue(np.real(x), 0, self.real) +\
1.0j * self._boundRealValue(np.imag(x), 0, self.imag)
return x
class Negativity(Positivity):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, negativity_real, negativity_imag):
super().__init__(negativity_real, negativity_imag, "Negativity")
def computeProx(self, x):
return (-1.) * super().computeProx((-1.) * x)
class PureReal(ProximalOperator):
"""Enforce real constraint on a complex, imaginary part will be cleared"""
def __init__(self):
super().__init__("Pure real")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = af.real(x) + 1j*0.0
else:
x = np.real(x) + 1j*0.0
return x
class Pureimag(ProximalOperator):
"""Enforce imaginary constraint on a complex, real part will be cleared"""
def __init__(self):
super().__init__("Pure imaginary")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = 1j*af.imag(x)
else:
x = 1j*x.imag
return x
class Lasso(ProximalOperator):
"""||x||_1 regularizer, soft thresholding with certain parameter"""
def __init__(self, parameter):
super().__init__("LASSO")
self.setParameter(parameter)
def _softThreshold(self, x):
if type(x).__module__ == "arrayfire.array":
#POTENTIAL BUG: af.sign implementation does not agree with documentation
x = (af.sign(x)-0.5)*(-2.0) * (af.abs(x) - self.parameter) * (af.abs(x) > self.parameter)
else:
x = np.sign(x) * (np.abs(x) - self.parameter) * (np.abs(x) > self.parameter)
return x
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
return af.norm(af.moddims(x, np.prod(x.shape)), norm_type = af.NORM.VECTOR_1)
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._softThreshold(af.real(x)) + 1.0j * self._softThreshold(af.imag(x))
else:
x = self._softThreshold(np.real(x)) + 1.0j * self._softThreshold(np.imag(x))
return x
#TODO: implement Tikhonov
class Tikhonov(ProximalOperator):
def __init__(self):
pass
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
pass
def computeProx(self, x):
return x
#TODO: implement pure amplitude constraint
class PureAmplitude(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
#TODO: implement pure phase constraint
class PurePhase(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
|
py
|
1a5842860ef8251c186b87b2602037e174262b7c
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Element handle module."""
import copy
import logging
import math
import os.path
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from pyppeteer.connection import CDPSession
from pyppeteer.execution_context import ExecutionContext, JSHandle
from pyppeteer.errors import ElementHandleError, NetworkError
from pyppeteer.helper import debugError
from pyppeteer.util import merge_dict
if TYPE_CHECKING:
from pyppeteer.frame_manager import Frame, FrameManager # noqa: F401
logger = logging.getLogger(__name__)
class ElementHandle(JSHandle):
"""ElementHandle class.
This class represents an in-page DOM element. ElementHandle can be created
by the :meth:`pyppeteer.page.Page.querySelector` method.
ElementHandle prevents DOM element from garbage collection unless the
handle is disposed. ElementHandles are automatically disposed when their
origin frame gets navigated.
ElementHandle isinstance can be used as arguments in
:meth:`pyppeteer.page.Page.querySelectorEval` and
:meth:`pyppeteer.page.Page.evaluate` methods.
"""
def __init__(self, context: ExecutionContext, client: CDPSession,
remoteObject: dict, page: Any,
frameManager: 'FrameManager') -> None:
super().__init__(context, client, remoteObject)
self._client = client
self._remoteObject = remoteObject
self._page = page
self._frameManager = frameManager
self._disposed = False
def asElement(self) -> 'ElementHandle':
"""Return this ElementHandle."""
return self
async def contentFrame(self) -> Optional['Frame']:
"""Return the content frame for the element handle.
Return ``None`` if this handle is not referencing iframe.
"""
nodeInfo = await self._client.send('DOM.describeNode', {
'objectId': self._remoteObject.get('objectId'),
})
node_obj = nodeInfo.get('node', {})
if not isinstance(node_obj.get('frameId'), str):
return None
return self._frameManager.frame(node_obj['frameId'])
async def _scrollIntoViewIfNeeded(self) -> None:
error = await self.executionContext.evaluate('''
async element => {
if (!element.isConnected)
return 'Node is detached from document';
if (element.nodeType !== Node.ELEMENT_NODE)
return 'Node is not of type HTMLElement';
const visibleRatio = await new Promise(resolve => {
const observer = new IntersectionObserver(entries => {
resolve(entries[0].intersectionRatio);
observer.disconnect();
});
observer.observe(element);
});
if (visibleRatio !== 1.0)
element.scrollIntoView({
block: 'center',
inline: 'center',
behavior: 'instant',
});
return false;
}''', self)
if error:
raise ElementHandleError(error)
async def _clickablePoint(self) -> Dict[str, float]: # noqa: C901
result = None
try:
result = await self._client.send('DOM.getContentQuads', {
'objectId': self._remoteObject.get('objectId'),
})
except Exception as e:
debugError(logger, e)
if not result or not result.get('quads'):
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
quads = []
for _quad in result.get('quads'):
_q = self._fromProtocolQuad(_quad)
if _computeQuadArea(_q) > 1:
quads.append(_q)
if not quads:
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
quad = quads[0]
x = 0
y = 0
for point in quad:
x += point['x']
y += point['y']
return {'x': x / 4, 'y': y / 4}
async def _getBoxModel(self) -> Optional[Dict]:
try:
result: Optional[Dict] = await self._client.send(
'DOM.getBoxModel',
{'objectId': self._remoteObject.get('objectId')},
)
except NetworkError as e:
debugError(logger, e)
result = None
return result
def _fromProtocolQuad(self, quad: List[int]) -> List[Dict[str, int]]:
return [
{'x': quad[0], 'y': quad[1]},
{'x': quad[2], 'y': quad[3]},
{'x': quad[4], 'y': quad[5]},
{'x': quad[6], 'y': quad[7]},
]
async def hover(self) -> None:
"""Move mouse over to center of this element.
If needed, this method scrolls element into view. If this element is
detached from DOM tree, the method raises an ``ElementHandleError``.
"""
await self._scrollIntoViewIfNeeded()
obj = await self._clickablePoint()
x = obj.get('x', 0)
y = obj.get('y', 0)
await self._page.mouse.move(x, y)
async def click(self, options: dict = None, **kwargs: Any) -> None:
"""Click the center of this element.
If needed, this method scrolls element into view. If the element is
detached from DOM, the method raises ``ElementHandleError``.
``options`` can contain the following fields:
* ``button`` (str): ``left``, ``right``, of ``middle``, defaults to
``left``.
* ``clickCount`` (int): Defaults to 1.
* ``delay`` (int|float): Time to wait between ``mousedown`` and
``mouseup`` in milliseconds. Defaults to 0.
"""
options = merge_dict(options, kwargs)
await self._scrollIntoViewIfNeeded()
obj = await self._clickablePoint()
x = obj.get('x', 0)
y = obj.get('y', 0)
await self._page.mouse.click(x, y, options)
async def uploadFile(self, *filePaths: str) -> dict:
"""Upload files."""
files = [os.path.abspath(p) for p in filePaths]
objectId = self._remoteObject.get('objectId')
return await self._client.send(
'DOM.setFileInputFiles',
{'objectId': objectId, 'files': files}
)
async def tap(self) -> None:
"""Tap the center of this element.
If needed, this method scrolls element into view. If the element is
detached from DOM, the method raises ``ElementHandleError``.
"""
await self._scrollIntoViewIfNeeded()
center = await self._clickablePoint()
x = center.get('x', 0)
y = center.get('y', 0)
await self._page.touchscreen.tap(x, y)
async def focus(self) -> None:
"""Focus on this element."""
await self.executionContext.evaluate(
'element => element.focus()', self)
async def type(self, text: str, options: Dict = None, **kwargs: Any
) -> None:
"""Focus the element and then type text.
Details see :meth:`pyppeteer.input.Keyboard.type` method.
"""
options = merge_dict(options, kwargs)
await self.focus()
await self._page.keyboard.type(text, options)
async def press(self, key: str, options: Dict = None, **kwargs: Any
) -> None:
"""Press ``key`` onto the element.
This method focuses the element, and then uses
:meth:`pyppeteer.input.keyboard.down` and
:meth:`pyppeteer.input.keyboard.up`.
:arg str key: Name of key to press, such as ``ArrowLeft``.
This method accepts the following options:
* ``text`` (str): If specified, generates an input event with this
text.
* ``delay`` (int|float): Time to wait between ``keydown`` and
``keyup``. Defaults to 0.
"""
options = merge_dict(options, kwargs)
await self.focus()
await self._page.keyboard.press(key, options)
async def boundingBox(self) -> Optional[Dict[str, float]]:
"""Return bounding box of this element.
If the element is not visible, return ``None``.
This method returns dictionary of bounding box, which contains:
* ``x`` (int): The X coordinate of the element in pixels.
* ``y`` (int): The Y coordinate of the element in pixels.
* ``width`` (int): The width of the element in pixels.
* ``height`` (int): The height of the element in pixels.
"""
result = await self._getBoxModel()
if not result:
return None
quad = result['model']['border']
x = min(quad[0], quad[2], quad[4], quad[6])
y = min(quad[1], quad[3], quad[5], quad[7])
width = max(quad[0], quad[2], quad[4], quad[6]) - x
height = max(quad[1], quad[3], quad[5], quad[7]) - y
return {'x': x, 'y': y, 'width': width, 'height': height}
async def boxModel(self) -> Optional[Dict]:
"""Return boxes of element.
Return ``None`` if element is not visible. Boxes are represented as an
list of points; each Point is a dictionary ``{x, y}``. Box points are
sorted clock-wise.
Returned value is a dictionary with the following fields:
* ``content`` (List[Dict]): Content box.
* ``padding`` (List[Dict]): Padding box.
* ``border`` (List[Dict]): Border box.
* ``margin`` (List[Dict]): Margin box.
* ``width`` (int): Element's width.
* ``height`` (int): Element's height.
"""
result = await self._getBoxModel()
if not result:
return None
model = result.get('model', {})
return {
'content': self._fromProtocolQuad(model.get('content')),
'padding': self._fromProtocolQuad(model.get('padding')),
'border': self._fromProtocolQuad(model.get('border')),
'margin': self._fromProtocolQuad(model.get('margin')),
'width': model.get('width'),
'height': model.get('height'),
}
async def screenshot(self, options: Dict = None, **kwargs: Any) -> bytes:
"""Take a screenshot of this element.
If the element is detached from DOM, this method raises an
``ElementHandleError``.
Available options are same as :meth:`pyppeteer.page.Page.screenshot`.
"""
options = merge_dict(options, kwargs)
needsViewportReset = False
boundingBox = await self.boundingBox()
if not boundingBox:
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
original_viewport = copy.deepcopy(self._page.viewport)
if (boundingBox['width'] > original_viewport['width'] or
boundingBox['height'] > original_viewport['height']):
newViewport = {
'width': max(
original_viewport['width'],
math.ceil(boundingBox['width'])
),
'height': max(
original_viewport['height'],
math.ceil(boundingBox['height'])
),
}
new_viewport = copy.deepcopy(original_viewport)
new_viewport.update(newViewport)
await self._page.setViewport(new_viewport)
needsViewportReset = True
await self._scrollIntoViewIfNeeded()
boundingBox = await self.boundingBox()
if not boundingBox:
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
_obj = await self._client.send('Page.getLayoutMetrics')
pageX = _obj['layoutViewport']['pageX']
pageY = _obj['layoutViewport']['pageY']
clip = {}
clip.update(boundingBox)
clip['x'] = clip['x'] + pageX
clip['y'] = clip['y'] + pageY
opt = {'clip': clip}
opt.update(options)
imageData = await self._page.screenshot(opt)
if needsViewportReset:
await self._page.setViewport(original_viewport)
return imageData
async def querySelector(self, selector: str) -> Optional['ElementHandle']:
"""Return first element which matches ``selector`` under this element.
If no element matches the ``selector``, returns ``None``.
"""
handle = await self.executionContext.evaluateHandle(
'(element, selector) => element.querySelector(selector)',
self, selector,
)
element = handle.asElement()
if element:
return element
await handle.dispose()
return None
async def querySelectorAll(self, selector: str) -> List['ElementHandle']:
"""Return all elements which match ``selector`` under this element.
If no element matches the ``selector``, returns empty list (``[]``).
"""
arrayHandle = await self.executionContext.evaluateHandle(
'(element, selector) => element.querySelectorAll(selector)',
self, selector,
)
properties = await arrayHandle.getProperties()
await arrayHandle.dispose()
result = []
for prop in properties.values():
elementHandle = prop.asElement()
if elementHandle:
result.append(elementHandle)
return result # type: ignore
async def querySelectorEval(self, selector: str, pageFunction: str,
*args: Any) -> Any:
"""Run ``Page.querySelectorEval`` within the element.
This method runs ``document.querySelector`` within the element and
passes it as the first argument to ``pageFunction``. If there is no
element matching ``selector``, the method raises
``ElementHandleError``.
If ``pageFunction`` returns a promise, then wait for the promise to
resolve and return its value.
``ElementHandle.Jeval`` is a shortcut of this method.
Example:
.. code:: python
tweetHandle = await page.querySelector('.tweet')
assert (await tweetHandle.querySelectorEval('.like', 'node => node.innerText')) == 100
assert (await tweetHandle.Jeval('.retweets', 'node => node.innerText')) == 10
""" # noqa: E501
elementHandle = await self.querySelector(selector)
if not elementHandle:
raise ElementHandleError(
f'Error: failed to find element matching selector "{selector}"'
)
result = await self.executionContext.evaluate(
pageFunction, elementHandle, *args)
await elementHandle.dispose()
return result
async def querySelectorAllEval(self, selector: str, pageFunction: str,
*args: Any) -> Any:
"""Run ``Page.querySelectorAllEval`` within the element.
This method runs ``Array.from(document.querySelectorAll)`` within the
element and passes it as the first argument to ``pageFunction``. If
there is no element matching ``selector``, the method raises
``ElementHandleError``.
If ``pageFunction`` returns a promise, then wait for the promise to
resolve and return its value.
Example:
.. code:: html
<div class="feed">
<div class="tweet">Hello!</div>
<div class="tweet">Hi!</div>
</div>
.. code:: python
feedHandle = await page.J('.feed')
assert (await feedHandle.JJeval('.tweet', '(nodes => nodes.map(n => n.innerText))')) == ['Hello!', 'Hi!']
""" # noqa: E501
arrayHandle = await self.executionContext.evaluateHandle(
'(element, selector) => Array.from(element.querySelectorAll(selector))', # noqa: E501
self, selector
)
result = await self.executionContext.evaluate(
pageFunction, arrayHandle, *args)
await arrayHandle.dispose()
return result
#: alias to :meth:`querySelector`
J = querySelector
#: alias to :meth:`querySelectorAll`
JJ = querySelectorAll
#: alias to :meth:`querySelectorEval`
Jeval = querySelectorEval
#: alias to :meth:`querySelectorAllEval`
JJeval = querySelectorAllEval
async def xpath(self, expression: str) -> List['ElementHandle']:
"""Evaluate the XPath expression relative to this elementHandle.
If there are no such elements, return an empty list.
:arg str expression: XPath string to be evaluated.
"""
arrayHandle = await self.executionContext.evaluateHandle(
'''(element, expression) => {
const document = element.ownerDocument || element;
const iterator = document.evaluate(expression, element, null,
XPathResult.ORDERED_NODE_ITERATOR_TYPE);
const array = [];
let item;
while ((item = iterator.iterateNext()))
array.push(item);
return array;
}''', self, expression)
properties = await arrayHandle.getProperties()
await arrayHandle.dispose()
result = []
for property in properties.values():
elementHandle = property.asElement()
if elementHandle:
result.append(elementHandle)
return result
#: alias to :meth:`xpath`
Jx = xpath
async def isIntersectingViewport(self) -> bool:
"""Return ``True`` if the element is visible in the viewport."""
return await self.executionContext.evaluate('''async element => {
const visibleRatio = await new Promise(resolve => {
const observer = new IntersectionObserver(entries => {
resolve(entries[0].intersectionRatio);
observer.disconnect();
});
observer.observe(element);
});
return visibleRatio > 0;
}''', self)
def _computeQuadArea(quad: List[Dict]) -> float:
area = 0
for i, _ in enumerate(quad):
p1 = quad[i]
p2 = quad[(i + 1) % len(quad)]
area += (p1['x'] * p2['y'] - p2['x'] * p1['y']) / 2
return area
|
py
|
1a5842de3eedf7f4f0678e030ea528114bf1b290
|
from abc import ABC, abstractmethod
from collections import Counter
from functools import reduce
from re import split
from sys import version_info
import pandas as pd
from flashtext import KeywordProcessor
from scattertext.ScatterChart import check_topic_model_string_format
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromTopicModelBase(ABC):
def __init__(self, topic_model):
self._topic_model = topic_model
self._lexicon_df = self._get_lexicon_df_from_topic_model(topic_model)
def _get_lexicon_df_from_topic_model(self, topic_model):
return (pd.DataFrame(pd.Series(topic_model)
.apply(pd.Series)
.reset_index())
.melt(id_vars=['index'])
[['index', 'value']]
.rename(columns={'index': 'cat', 'value': 'term'})
.set_index('term'))
def _analyze(self, doc):
text_df = (pd.DataFrame(pd.Series(self._get_terms_from_doc(doc)))
.join(self._lexicon_df)
.dropna()
.groupby('cat')
.sum())
return text_df
def get_doc_metadata(self, doc, prefix=''):
feature_counter = Counter()
if version_info[0] >= 3:
doc = str(doc)
for category, score in self._analyze(doc).to_dict()[0].items():
feature_counter[prefix + category] = int(score)
return feature_counter
@abstractmethod
def _get_terms_from_doc(self, doc):
pass
class FeatsFromTopicModel(FeatsFromTopicModelBase, FeatsFromSpacyDoc):
def __init__(self,
topic_model,
use_lemmas=False,
entity_types_to_censor=set(),
entity_types_to_use=None,
tag_types_to_censor=set(),
strip_final_period=False,
keyword_processor_args={'case_sensitive': False}):
self._keyword_processor = KeywordProcessor(**keyword_processor_args)
self._topic_model = topic_model.copy()
if keyword_processor_args.get('case_sensitive', None) is False:
for k, v in self._topic_model.items():
self._topic_model[k] = [e.lower() for e in v]
for keyphrase in reduce(lambda x, y: set(x) | set(y), self._topic_model.values()):
self._keyword_processor.add_keyword(keyphrase)
FeatsFromSpacyDoc.__init__(self, use_lemmas, entity_types_to_censor,
tag_types_to_censor, strip_final_period)
FeatsFromTopicModelBase.__init__(self, topic_model)
def get_top_model_term_lists(self):
return self._topic_model
def _get_terms_from_doc(self, doc):
return Counter(self._keyword_processor.extract_keywords(str(doc)))
def get_feats(self, doc):
return Counter(self._get_terms_from_doc(str(doc)))
"""
class FeatsFromTopicModel(FeatsFromSpacyDoc, FeatsFromTopicModelBase):
def __init__(self,
topic_model,
use_lemmas=False,
entity_types_to_censor=set(),
tag_types_to_censor=set(),
strip_final_period=False,
**kwargs):
'''
Parameters
----------
topic_model : dict
{topicmodelname: [term1, term2, ....], ...}
Other parameters from FeatsFromSpacyDoc.__init__
'''
check_topic_model_string_format(topic_model)
self._topic_model = topic_model
self._lexicon_df = self._get_lexicon_df_from_topic_model(topic_model)
super(FeatsFromTopicModel, self).__init__(use_lemmas,
entity_types_to_censor,
tag_types_to_censor,
strip_final_period)
def _get_terms_from_doc(self, doc):
return Counter(t for t in split(r"(\W)", doc.lower()) if t.strip())
def has_metadata_term_list(self):
return True
def get_top_model_term_lists(self):
return self._topic_model
"""
|
py
|
1a584363edc676e9cdfbb9ffd49e6bce61304e6b
|
# Generated by Django 3.0.4 on 2020-03-09 21:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='create_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='post',
name='published_date',
field=models.DateTimeField(blank=True, null=True),
),
]
|
py
|
1a58438c9f85a98891706aad1f962ac03e8d2c50
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import difflib
import six
import sys
import time
from packaging.version import Version
from mock import Mock, patch
from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor
from cassandra.cluster import Cluster
from cassandra.encoder import Encoder
from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names,
RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser,
group_keys_by_replica, NO_VALID_REPLICA)
from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, execute_until_pass,
BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase,
BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION,
get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local,
greaterthancass20)
from tests.integration import greaterthancass21
def setup_module():
use_singledc()
class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase):
@local
def test_broadcast_listen_address(self):
"""
Check to ensure that the broadcast and listen adresss is populated correctly
@since 3.3
@jira_ticket PYTHON-332
@expected_result They are populated for C*> 2.1.6, 2.2.0
@test_category metadata
"""
# All nodes should have the broadcast_address set
for host in self.cluster.metadata.all_hosts():
self.assertIsNotNone(host.broadcast_address)
con = self.cluster.control_connection.get_connections()[0]
local_host = con.host
# The control connection node should have the listen address set.
listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()]
self.assertTrue(local_host in listen_addrs)
def test_host_release_version(self):
"""
Checks the hosts release version and validates that it is equal to the
Cassandra version we are using in our test harness.
@since 3.3
@jira_ticket PYTHON-301
@expected_result host.release version should match our specified Cassandra version.
@test_category metadata
"""
for host in self.cluster.metadata.all_hosts():
self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION.base_version))
@local
class MetaDataRemovalTest(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1','127.0.0.2', '127.0.0.3', '126.0.0.186'])
self.cluster.connect()
def tearDown(self):
self.cluster.shutdown()
def test_bad_contact_point(self):
"""
Checks to ensure that hosts that are not resolvable are excluded from the contact point list.
@since 3.6
@jira_ticket PYTHON-549
@expected_result Invalid hosts on the contact list should be excluded
@test_category metadata
"""
self.assertEqual(len(self.cluster.metadata.all_hosts()), 3)
class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase):
def test_schema_metadata_disable(self):
"""
Checks to ensure that schema metadata_enabled, and token_metadata_enabled
flags work correctly.
@since 3.3
@jira_ticket PYTHON-327
@expected_result schema metadata will not be populated when schema_metadata_enabled is fause
token_metadata will be missing when token_metadata is set to false
@test_category metadata
"""
# Validate metadata is missing where appropriate
no_schema = Cluster(schema_metadata_enabled=False)
no_schema_session = no_schema.connect()
self.assertEqual(len(no_schema.metadata.keyspaces), 0)
self.assertEqual(no_schema.metadata.export_schema_as_string(), '')
no_token = Cluster(token_metadata_enabled=False)
no_token_session = no_token.connect()
self.assertEqual(len(no_token.metadata.token_map.token_to_host_owner), 0)
# Do a simple query to ensure queries are working
query = "SELECT * FROM system.local"
no_schema_rs = no_schema_session.execute(query)
no_token_rs = no_token_session.execute(query)
self.assertIsNotNone(no_schema_rs[0])
self.assertIsNotNone(no_token_rs[0])
no_schema.shutdown()
no_token.shutdown()
def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None):
clustering_cols = clustering_cols or []
other_cols = other_cols or []
statement = "CREATE TABLE %s.%s (" % (self.keyspace_name, self.function_table_name)
if len(partition_cols) == 1 and not clustering_cols:
statement += "%s text PRIMARY KEY, " % protect_name(partition_cols[0])
else:
statement += ", ".join("%s text" % protect_name(col) for col in partition_cols)
statement += ", "
statement += ", ".join("%s text" % protect_name(col) for col in clustering_cols + other_cols)
if len(partition_cols) != 1 or clustering_cols:
statement += ", PRIMARY KEY ("
if len(partition_cols) > 1:
statement += "(" + ", ".join(protect_names(partition_cols)) + ")"
else:
statement += protect_name(partition_cols[0])
if clustering_cols:
statement += ", "
statement += ", ".join(protect_names(clustering_cols))
statement += ")"
statement += ")"
return statement
def check_create_statement(self, tablemeta, original):
recreate = tablemeta.as_cql_query(formatted=False)
self.assertEqual(original, recreate[:len(original)])
execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
execute_until_pass(self.session, recreate)
# create the table again, but with formatting enabled
execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
recreate = tablemeta.as_cql_query(formatted=True)
execute_until_pass(self.session, recreate)
def get_table_metadata(self):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_table_name)
return self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name]
def test_basic_table_meta_properties(self):
create_statement = self.make_create_statement(["a"], [], ["b", "c"])
self.session.execute(create_statement)
self.cluster.refresh_schema_metadata()
meta = self.cluster.metadata
self.assertNotEqual(meta.cluster_name, None)
self.assertTrue(self.keyspace_name in meta.keyspaces)
ksmeta = meta.keyspaces[self.keyspace_name]
self.assertEqual(ksmeta.name, self.keyspace_name)
self.assertTrue(ksmeta.durable_writes)
self.assertEqual(ksmeta.replication_strategy.name, 'SimpleStrategy')
self.assertEqual(ksmeta.replication_strategy.replication_factor, 1)
self.assertTrue(self.function_table_name in ksmeta.tables)
tablemeta = ksmeta.tables[self.function_table_name]
self.assertEqual(tablemeta.keyspace_name, ksmeta.name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
cc = self.cluster.control_connection._connection
parser = get_schema_parser(cc, CASSANDRA_VERSION.base_version, 1)
for option in tablemeta.options:
self.assertIn(option, parser.recognized_table_options)
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_protected(self):
create_statement = self.make_create_statement(["Aa"], ["Bb"], ["Cc"])
create_statement += ' WITH CLUSTERING ORDER BY ("Bb" ASC)'
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'Aa'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'Bb'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'Aa', u'Bb', u'Cc'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_more_columns(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b', u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual(
[u'a', u'b', u'c', u'd', u'e', u'f'],
sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_primary_key(self):
create_statement = self.make_create_statement(["a", "b"], [], ["c"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d", "e"])
create_statement += " WITH CLUSTERING ORDER BY (c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd', u'e'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_compact(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_cluster_column_ordering_reversed_metadata(self):
"""
Simple test to ensure that the metatdata associated with cluster ordering is surfaced is surfaced correctly.
Creates a table with a few clustering keys. Then checks the clustering order associated with clustering columns
and ensure it's set correctly.
@since 3.0.0
@jira_ticket PYTHON-402
@expected_result is_reversed is set on DESC order, and is False on ASC
@test_category metadata
"""
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
b_column = tablemeta.columns['b']
self.assertFalse(b_column.is_reversed)
c_column = tablemeta.columns['c']
self.assertTrue(c_column.is_reversed)
def test_compound_primary_keys_more_columns_compact(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b', u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_primary_key_compact(self):
create_statement = self.make_create_statement(["a", "b"], [], ["c"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key_compact(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d"])
create_statement += " WITH CLUSTERING ORDER BY (c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
@lessthancass30
def test_cql_compatibility(self):
# having more than one non-PK column is okay if there aren't any
# clustering columns
create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.assertTrue(tablemeta.is_cql_compatible)
# It will be cql compatible after CASSANDRA-10857
# since compact storage is being dropped
tablemeta.clustering_key = ["foo", "bar"]
tablemeta.columns["foo"] = None
tablemeta.columns["bar"] = None
self.assertTrue(tablemeta.is_cql_compatible)
def test_compound_primary_keys_ordering(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_more_columns_ordering(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b DESC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key_ordering(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d", "e"])
create_statement += " WITH CLUSTERING ORDER BY (c DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_indexes(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
execute_until_pass(self.session, create_statement)
d_index = "CREATE INDEX d_index ON %s.%s (d)" % (self.keyspace_name, self.function_table_name)
e_index = "CREATE INDEX e_index ON %s.%s (e)" % (self.keyspace_name, self.function_table_name)
execute_until_pass(self.session, d_index)
execute_until_pass(self.session, e_index)
tablemeta = self.get_table_metadata()
statements = tablemeta.export_as_string().strip()
statements = [s.strip() for s in statements.split(';')]
statements = list(filter(bool, statements))
self.assertEqual(3, len(statements))
self.assertIn(d_index, statements)
self.assertIn(e_index, statements)
# make sure indexes are included in KeyspaceMetadata.export_as_string()
ksmeta = self.cluster.metadata.keyspaces[self.keyspace_name]
statement = ksmeta.export_as_string()
self.assertIn('CREATE INDEX d_index', statement)
self.assertIn('CREATE INDEX e_index', statement)
@greaterthancass21
def test_collection_indexes(self):
self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map<text, text>)"
% (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index1 ON %s.%s (keys(b))"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
self.assertIn('(keys(b))', tablemeta.export_as_string())
self.session.execute("DROP INDEX %s.index1" % (self.keyspace_name,))
self.session.execute("CREATE INDEX index2 ON %s.%s (b)"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
target = ' (b)' if CASSANDRA_VERSION < Version("3.0") else 'values(b))' # explicit values in C* 3+
self.assertIn(target, tablemeta.export_as_string())
# test full indexes on frozen collections, if available
if CASSANDRA_VERSION >= Version("2.1.3"):
self.session.execute("DROP TABLE %s.%s" % (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen<map<text, text>>)"
% (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index3 ON %s.%s (full(b))"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
self.assertIn('(full(b))', tablemeta.export_as_string())
def test_compression_disabled(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH compression = {}"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
expected = "compression = {}" if CASSANDRA_VERSION < Version("3.0") else "compression = {'enabled': 'false'}"
self.assertIn(expected, tablemeta.export_as_string())
def test_non_size_tiered_compaction(self):
"""
test options for non-size-tiered compaction strategy
Creates a table with LeveledCompactionStrategy, specifying one non-default option. Verifies that the option is
present in generated CQL, and that other legacy table parameters (min_threshold, max_threshold) are not included.
@since 2.6.0
@jira_ticket PYTHON-352
@expected_result the options map for LeveledCompactionStrategy does not contain min_threshold, max_threshold
@test_category metadata
"""
create_statement = self.make_create_statement(["a"], [], ["b", "c"])
create_statement += "WITH COMPACTION = {'class': 'LeveledCompactionStrategy', 'tombstone_threshold': '0.3'}"
self.session.execute(create_statement)
table_meta = self.get_table_metadata()
cql = table_meta.export_as_string()
self.assertIn("'tombstone_threshold': '0.3'", cql)
self.assertIn("LeveledCompactionStrategy", cql)
self.assertNotIn("min_threshold", cql)
self.assertNotIn("max_threshold", cql)
def test_refresh_schema_metadata(self):
"""
test for synchronously refreshing all cluster metadata
test_refresh_schema_metadata tests all cluster metadata is refreshed when calling refresh_schema_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the cluster, creating a new keyspace, using the first cluster
object, and verifies that the cluster metadata has not changed in the second cluster object. It then calls
refresh_schema_metadata() and verifies that the cluster metadata is updated in the second cluster object.
Similarly, it then proceeds to altering keyspace, table, UDT, UDF, and UDA metadata and subsequently verfies
that these metadata is updated when refresh_schema_metadata() is called.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Cluster, keyspace, table, UDT, UDF, and UDA metadata should be refreshed when refresh_schema_metadata() is called.
@test_category metadata
"""
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
# Cluster metadata modification
self.session.execute("CREATE KEYSPACE new_keyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}")
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.refresh_schema_metadata()
self.assertIn("new_keyspace", cluster2.metadata.keyspaces)
# Keyspace metadata modification
self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name))
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.refresh_schema_metadata()
self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
# Table metadata modification
table_name = "test"
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name))
cluster2.refresh_schema_metadata()
self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name))
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.refresh_schema_metadata()
self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
if PROTOCOL_VERSION >= 3:
# UDT metadata modification
self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster2.refresh_schema_metadata()
self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types)
if PROTOCOL_VERSION >= 4:
# UDF metadata modification
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
cluster2.refresh_schema_metadata()
self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions)
# UDA metadata modification
self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int)
SFUNC sum_int
STYPE int
INITCOND 0"""
.format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
cluster2.refresh_schema_metadata()
self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates)
# Cluster metadata modification
self.session.execute("DROP KEYSPACE new_keyspace")
self.assertIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.refresh_schema_metadata()
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.shutdown()
def test_refresh_keyspace_metadata(self):
"""
test for synchronously refreshing keyspace metadata
test_refresh_keyspace_metadata tests that keyspace metadata is refreshed when calling refresh_keyspace_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the keyspace, disabling durable_writes, using the first cluster
object, and verifies that the keyspace metadata has not changed in the second cluster object. Finally, it calls
refresh_keyspace_metadata() and verifies that the keyspace metadata is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Keyspace metadata should be refreshed when refresh_keyspace_metadata() is called.
@test_category metadata
"""
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name))
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.refresh_keyspace_metadata(self.keyspace_name)
self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.shutdown()
def test_refresh_table_metadata(self):
"""
test for synchronously refreshing table metadata
test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the table, adding a new column, using the first cluster
object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls
test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Table metadata should be refreshed when refresh_table_metadata() is called.
@test_category metadata
"""
table_name = "test"
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name))
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.refresh_table_metadata(self.keyspace_name, table_name)
self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.shutdown()
@greaterthanorequalcass30
def test_refresh_metadata_for_mv(self):
"""
test for synchronously refreshing materialized view metadata
test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling
test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object
with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events.
It then creates a new materialized view , using the first cluster object, and verifies that the materialized view
metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the
materialized view name as the table name, and verifies that the materialized view metadata is updated in the
second cluster object.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be refreshed when refresh_table_metadata() is called.
@test_category metadata
"""
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
try:
self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)"
.format(self.keyspace_name, self.function_table_name))
self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
cluster2.refresh_table_metadata(self.keyspace_name, "mv1")
self.assertIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
finally:
cluster2.shutdown()
original_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIs(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'])
self.cluster.refresh_materialized_view_metadata(self.keyspace_name, 'mv1')
current_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNot(current_meta, original_meta)
self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'])
self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query())
cluster3 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster3.connect()
try:
self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)"
.format(self.keyspace_name, self.function_table_name))
self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2')
self.assertIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
finally:
cluster3.shutdown()
def test_refresh_user_type_metadata(self):
"""
test for synchronously refreshing UDT metadata in keyspace
test_refresh_user_type_metadata tests that UDT metadata in a keyspace is refreshed when calling refresh_user_type_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the keyspace, creating a new UDT, using the first cluster
object, and verifies that the UDT metadata has not changed in the second cluster object. Finally, it calls
refresh_user_type_metadata() and verifies that the UDT metadata in the keyspace is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDT metadata in the keyspace should be refreshed when refresh_user_type_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest("Protocol 3+ is required for UDTs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster2.refresh_user_type_metadata(self.keyspace_name, "user")
self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types)
cluster2.shutdown()
@greaterthancass20
def test_refresh_user_type_metadata_proto_2(self):
"""
Test to insure that protocol v1/v2 surface UDT metadata changes
@since 3.7.0
@jira_ticket PYTHON-106
@expected_result UDT metadata in the keyspace should be updated regardless of protocol version
@test_category metadata
"""
supported_versions = get_supported_protocol_versions()
if 2 not in supported_versions: # 1 and 2 were dropped in the same version
raise unittest.SkipTest("Protocol versions 1 and 2 are not supported in Cassandra version ".format(CASSANDRA_VERSION))
for protocol_version in (1, 2):
cluster = Cluster(protocol_version=protocol_version)
session = cluster.connect()
self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {})
session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertIn("user", cluster.metadata.keyspaces[self.keyspace_name].user_types)
self.assertIn("age", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
self.assertIn("name", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
session.execute("ALTER TYPE {0}.user ADD flag boolean".format(self.keyspace_name))
self.assertIn("flag", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
session.execute("ALTER TYPE {0}.user RENAME flag TO something".format(self.keyspace_name))
self.assertIn("something", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
session.execute("DROP TYPE {0}.user".format(self.keyspace_name))
self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster.shutdown()
def test_refresh_user_function_metadata(self):
"""
test for synchronously refreshing UDF metadata in keyspace
test_refresh_user_function_metadata tests that UDF metadata in a keyspace is refreshed when calling
refresh_user_function_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such
that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new
UDF, using the first cluster object, and verifies that the UDF metadata has not changed in the second cluster
object. Finally, it calls refresh_user_function_metadata() and verifies that the UDF metadata in the keyspace
is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDF metadata in the keyspace should be refreshed when refresh_user_function_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol 4+ is required for UDFs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
cluster2.refresh_user_function_metadata(self.keyspace_name, UserFunctionDescriptor("sum_int", ["int", "int"]))
self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions)
cluster2.shutdown()
def test_refresh_user_aggregate_metadata(self):
"""
test for synchronously refreshing UDA metadata in keyspace
test_refresh_user_aggregate_metadata tests that UDA metadata in a keyspace is refreshed when calling
refresh_user_aggregate_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such
that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new
UDA, using the first cluster object, and verifies that the UDA metadata has not changed in the second cluster
object. Finally, it calls refresh_user_aggregate_metadata() and verifies that the UDF metadata in the keyspace
is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDA metadata in the keyspace should be refreshed when refresh_user_aggregate_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol 4+ is required for UDAs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int)
SFUNC sum_int
STYPE int
INITCOND 0"""
.format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
cluster2.refresh_user_aggregate_metadata(self.keyspace_name, UserAggregateDescriptor("sum_agg", ["int"]))
self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates)
cluster2.shutdown()
@greaterthanorequalcass30
def test_multiple_indices(self):
"""
test multiple indices on the same column.
Creates a table and two indices. Ensures that both indices metatdata is surface appropriately.
@since 3.0.0
@jira_ticket PYTHON-276
@expected_result IndexMetadata is appropriately surfaced
@test_category metadata
"""
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map<text, int>)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index_1 ON {0}.{1}(b)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index_2 ON {0}.{1}(keys(b))".format(self.keyspace_name, self.function_table_name))
indices = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].indexes
self.assertEqual(len(indices), 2)
index_1 = indices["index_1"]
index_2 = indices['index_2']
self.assertEqual(index_1.table_name, "test_multiple_indices")
self.assertEqual(index_1.name, "index_1")
self.assertEqual(index_1.kind, "COMPOSITES")
self.assertEqual(index_1.index_options["target"], "values(b)")
self.assertEqual(index_1.keyspace_name, "schemametadatatests")
self.assertEqual(index_2.table_name, "test_multiple_indices")
self.assertEqual(index_2.name, "index_2")
self.assertEqual(index_2.kind, "COMPOSITES")
self.assertEqual(index_2.index_options["target"], "keys(b)")
self.assertEqual(index_2.keyspace_name, "schemametadatatests")
@greaterthanorequalcass30
def test_table_extensions(self):
s = self.session
ks = self.keyspace_name
ks_meta = s.cluster.metadata.keyspaces[ks]
t = self.function_table_name
v = t + 'view'
s.execute("CREATE TABLE %s.%s (k text PRIMARY KEY, v int)" % (ks, t))
s.execute("CREATE MATERIALIZED VIEW %s.%s AS SELECT * FROM %s.%s WHERE v IS NOT NULL PRIMARY KEY (v, k)" % (ks, v, ks, t))
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertFalse(table_meta.extensions)
self.assertFalse(view_meta.extensions)
original_table_cql = table_meta.export_as_string()
original_view_cql = view_meta.export_as_string()
# extensions registered, not present
# --------------------------------------
class Ext0(RegisteredTableExtension):
name = t
@classmethod
def after_table_cql(cls, table_meta, ext_key, ext_blob):
return "%s %s %s %s" % (cls.name, table_meta.name, ext_key, ext_blob)
class Ext1(Ext0):
name = t + '##'
self.assertFalse(table_meta.extensions)
self.assertFalse(view_meta.extensions)
self.assertIn(Ext0.name, _RegisteredExtensionType._extension_registry)
self.assertIn(Ext1.name, _RegisteredExtensionType._extension_registry)
self.assertEqual(len(_RegisteredExtensionType._extension_registry), 2)
self.cluster.refresh_table_metadata(ks, t)
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertEqual(table_meta.export_as_string(), original_table_cql)
self.assertEqual(view_meta.export_as_string(), original_view_cql)
update_t = s.prepare('UPDATE system_schema.tables SET extensions=? WHERE keyspace_name=? AND table_name=?') # for blob type coercing
update_v = s.prepare('UPDATE system_schema.views SET extensions=? WHERE keyspace_name=? AND view_name=?')
# extensions registered, one present
# --------------------------------------
ext_map = {Ext0.name: six.b("THA VALUE")}
[(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v)))
for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts
self.cluster.refresh_table_metadata(ks, t)
self.cluster.refresh_materialized_view_metadata(ks, v)
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertIn(Ext0.name, table_meta.extensions)
new_cql = table_meta.export_as_string()
self.assertNotEqual(new_cql, original_table_cql)
self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertNotIn(Ext1.name, new_cql)
self.assertIn(Ext0.name, view_meta.extensions)
new_cql = view_meta.export_as_string()
self.assertNotEqual(new_cql, original_view_cql)
self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertNotIn(Ext1.name, new_cql)
# extensions registered, one present
# --------------------------------------
ext_map = {Ext0.name: six.b("THA VALUE"),
Ext1.name: six.b("OTHA VALUE")}
[(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v)))
for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts
self.cluster.refresh_table_metadata(ks, t)
self.cluster.refresh_materialized_view_metadata(ks, v)
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertIn(Ext0.name, table_meta.extensions)
self.assertIn(Ext1.name, table_meta.extensions)
new_cql = table_meta.export_as_string()
self.assertNotEqual(new_cql, original_table_cql)
self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertIn(Ext1.after_table_cql(table_meta, Ext1.name, ext_map[Ext1.name]), new_cql)
self.assertIn(Ext0.name, view_meta.extensions)
self.assertIn(Ext1.name, view_meta.extensions)
new_cql = view_meta.export_as_string()
self.assertNotEqual(new_cql, original_view_cql)
self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertIn(Ext1.after_table_cql(view_meta, Ext1.name, ext_map[Ext1.name]), new_cql)
class TestCodeCoverage(unittest.TestCase):
def test_export_schema(self):
"""
Test export schema functionality
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types)
cluster.shutdown()
def test_export_keyspace_schema(self):
"""
Test export keyspace schema functionality
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
for keyspace in cluster.metadata.keyspaces:
keyspace_metadata = cluster.metadata.keyspaces[keyspace]
self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types)
self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types)
cluster.shutdown()
def assert_equal_diff(self, received, expected):
if received != expected:
diff_string = '\n'.join(difflib.unified_diff(expected.split('\n'),
received.split('\n'),
'EXPECTED', 'RECEIVED',
lineterm=''))
self.fail(diff_string)
def assert_startswith_diff(self, received, prefix):
if not received.startswith(prefix):
prefix_lines = prefix.split('\n')
diff_string = '\n'.join(difflib.unified_diff(prefix_lines,
received.split('\n')[:len(prefix_lines)],
'EXPECTED', 'RECEIVED',
lineterm=''))
self.fail(diff_string)
@greaterthancass20
def test_export_keyspace_schema_udts(self):
"""
Test udt exports
"""
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest(
"Protocol 3.0+ is required for UDT change events, currently testing against %r"
% (PROTOCOL_VERSION,))
if sys.version_info[0:2] != (2, 7):
raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("""
CREATE KEYSPACE export_udts
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
AND durable_writes = true;
""")
session.execute("""
CREATE TYPE export_udts.street (
street_number int,
street_name text)
""")
session.execute("""
CREATE TYPE export_udts.zip (
zipcode int,
zip_plus_4 int)
""")
session.execute("""
CREATE TYPE export_udts.address (
street_address frozen<street>,
zip_code frozen<zip>)
""")
session.execute("""
CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>)
""")
expected_prefix = """CREATE KEYSPACE export_udts WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;
CREATE TYPE export_udts.street (
street_number int,
street_name text
);
CREATE TYPE export_udts.zip (
zipcode int,
zip_plus_4 int
);
CREATE TYPE export_udts.address (
street_address frozen<street>,
zip_code frozen<zip>
);
CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>"""
self.assert_startswith_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_prefix)
table_meta = cluster.metadata.keyspaces['export_udts'].tables['users']
expected_prefix = """CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>"""
self.assert_startswith_diff(table_meta.export_as_string(), expected_prefix)
cluster.shutdown()
@greaterthancass21
def test_case_sensitivity(self):
"""
Test that names that need to be escaped in CREATE statements are
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'AnInterestingKeyspace'
cfname = 'AnInterestingTable'
session.execute("DROP KEYSPACE IF EXISTS {0}".format(ksname))
session.execute("""
CREATE KEYSPACE "%s"
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""" % (ksname,))
session.execute("""
CREATE TABLE "%s"."%s" (
k int,
"A" int,
"B" int,
"MyColumn" int,
PRIMARY KEY (k, "A"))
WITH CLUSTERING ORDER BY ("A" DESC)
""" % (ksname, cfname))
session.execute("""
CREATE INDEX myindex ON "%s"."%s" ("MyColumn")
""" % (ksname, cfname))
session.execute("""
CREATE INDEX "AnotherIndex" ON "%s"."%s" ("B")
""" % (ksname, cfname))
ksmeta = cluster.metadata.keyspaces[ksname]
schema = ksmeta.export_as_string()
self.assertIn('CREATE KEYSPACE "AnInterestingKeyspace"', schema)
self.assertIn('CREATE TABLE "AnInterestingKeyspace"."AnInterestingTable"', schema)
self.assertIn('"A" int', schema)
self.assertIn('"B" int', schema)
self.assertIn('"MyColumn" int', schema)
self.assertIn('PRIMARY KEY (k, "A")', schema)
self.assertIn('WITH CLUSTERING ORDER BY ("A" DESC)', schema)
self.assertIn('CREATE INDEX myindex ON "AnInterestingKeyspace"."AnInterestingTable" ("MyColumn")', schema)
self.assertIn('CREATE INDEX "AnotherIndex" ON "AnInterestingKeyspace"."AnInterestingTable" ("B")', schema)
cluster.shutdown()
def test_already_exists_exceptions(self):
"""
Ensure AlreadyExists exception is thrown when hit
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'test3rf'
cfname = 'test'
ddl = '''
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}'''
self.assertRaises(AlreadyExists, session.execute, ddl % ksname)
ddl = '''
CREATE TABLE %s.%s (
k int PRIMARY KEY,
v int )'''
self.assertRaises(AlreadyExists, session.execute, ddl % (ksname, cfname))
cluster.shutdown()
@local
def test_replicas(self):
"""
Ensure cluster.metadata.get_replicas return correctly when not attached to keyspace
"""
if murmur3 is None:
raise unittest.SkipTest('the murmur3 extension is not available')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.assertEqual(cluster.metadata.get_replicas('test3rf', 'key'), [])
cluster.connect('test3rf')
self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), [])
host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0]
self.assertEqual(host.datacenter, 'dc1')
self.assertEqual(host.rack, 'r1')
cluster.shutdown()
def test_token_map(self):
"""
Test token mappings
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect('test3rf')
ring = cluster.metadata.token_map.ring
owners = list(cluster.metadata.token_map.token_to_host_owner[token] for token in ring)
get_replicas = cluster.metadata.token_map.get_replicas
for ksname in ('test1rf', 'test2rf', 'test3rf'):
self.assertNotEqual(list(get_replicas(ksname, ring[0])), [])
for i, token in enumerate(ring):
self.assertEqual(set(get_replicas('test3rf', token)), set(owners))
self.assertEqual(set(get_replicas('test2rf', token)), set([owners[i], owners[(i + 1) % 3]]))
self.assertEqual(set(get_replicas('test1rf', token)), set([owners[i]]))
cluster.shutdown()
class TokenMetadataTest(unittest.TestCase):
"""
Test of TokenMap creation and other behavior.
"""
@local
def test_token(self):
expected_node_count = len(get_cluster().nodes)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
tmap = cluster.metadata.token_map
self.assertTrue(issubclass(tmap.token_class, Token))
self.assertEqual(expected_node_count, len(tmap.ring))
cluster.shutdown()
class KeyspaceAlterMetadata(unittest.TestCase):
"""
Test verifies that table metadata is preserved on keyspace alter
"""
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
name = self._testMethodName.lower()
crt_ks = '''
CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true''' % name
self.session.execute(crt_ks)
def tearDown(self):
name = self._testMethodName.lower()
self.session.execute('DROP KEYSPACE %s' % name)
self.cluster.shutdown()
def test_keyspace_alter(self):
"""
Table info is preserved upon keyspace alter:
Create table
Verify schema
Alter ks
Verify that table metadata is still present
PYTHON-173
"""
name = self._testMethodName.lower()
self.session.execute('CREATE TABLE %s.d (d INT PRIMARY KEY)' % name)
original_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertEqual(original_keyspace_meta.durable_writes, True)
self.assertEqual(len(original_keyspace_meta.tables), 1)
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % name)
new_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertEqual(new_keyspace_meta.durable_writes, False)
class IndexMapTests(unittest.TestCase):
keyspace_name = 'index_map_tests'
@property
def table_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
try:
if cls.keyspace_name in cls.cluster.metadata.keyspaces:
cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name)
cls.session.execute(
"""
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};
""" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
except Exception:
cls.cluster.shutdown()
raise
@classmethod
def teardown_class(cls):
try:
cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name)
finally:
cls.cluster.shutdown()
def create_basic_table(self):
self.session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int)" % self.table_name)
def drop_basic_table(self):
self.session.execute("DROP TABLE %s" % self.table_name)
def test_index_updates(self):
self.create_basic_table()
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertNotIn('b_idx', ks_meta.indexes)
self.assertNotIn('a_idx', table_meta.indexes)
self.assertNotIn('b_idx', table_meta.indexes)
self.session.execute("CREATE INDEX a_idx ON %s (a)" % self.table_name)
self.session.execute("ALTER TABLE %s ADD b int" % self.table_name)
self.session.execute("CREATE INDEX b_idx ON %s (b)" % self.table_name)
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes['a_idx'], IndexMetadata)
self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata)
self.assertIsInstance(table_meta.indexes['a_idx'], IndexMetadata)
self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata)
# both indexes updated when index dropped
self.session.execute("DROP INDEX a_idx")
# temporarily synchronously refresh the schema metadata, until CASSANDRA-9391 is merged in
self.cluster.refresh_table_metadata(self.keyspace_name, self.table_name)
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata)
self.assertNotIn('a_idx', table_meta.indexes)
self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata)
# keyspace index updated when table dropped
self.drop_basic_table()
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotIn(self.table_name, ks_meta.tables)
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertNotIn('b_idx', ks_meta.indexes)
def test_index_follows_alter(self):
self.create_basic_table()
idx = self.table_name + '_idx'
self.session.execute("CREATE INDEX %s ON %s (a)" % (idx, self.table_name))
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata)
self.assertIsInstance(table_meta.indexes[idx], IndexMetadata)
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
old_meta = ks_meta
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIsNot(ks_meta, old_meta)
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata)
self.assertIsInstance(table_meta.indexes[idx], IndexMetadata)
self.drop_basic_table()
class FunctionTest(unittest.TestCase):
"""
Base functionality for Function and Aggregate metadata test classes
"""
def setUp(self):
"""
Tests are skipped if run with native protocol version < 4
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Function metadata requires native protocol version 4+")
@property
def function_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
if PROTOCOL_VERSION >= 4:
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.keyspace_name = cls.__name__.lower()
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions
cls.keyspace_aggregate_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].aggregates
@classmethod
def teardown_class(cls):
if PROTOCOL_VERSION >= 4:
cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name)
cls.cluster.shutdown()
class Verified(object):
def __init__(self, test_case, meta_class, element_meta, **function_kwargs):
self.test_case = test_case
self.function_kwargs = dict(function_kwargs)
self.meta_class = meta_class
self.element_meta = element_meta
def __enter__(self):
tc = self.test_case
expected_meta = self.meta_class(**self.function_kwargs)
tc.assertNotIn(expected_meta.signature, self.element_meta)
tc.session.execute(expected_meta.as_cql_query())
tc.assertIn(expected_meta.signature, self.element_meta)
generated_meta = self.element_meta[expected_meta.signature]
self.test_case.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
tc = self.test_case
tc.session.execute("DROP %s %s.%s" % (self.meta_class.__name__, tc.keyspace_name, self.signature))
tc.assertNotIn(self.signature, self.element_meta)
@property
def signature(self):
return SignatureDescriptor.format_signature(self.function_kwargs['name'],
self.function_kwargs['argument_types'])
class VerifiedFunction(Verified):
def __init__(self, test_case, **kwargs):
super(FunctionTest.VerifiedFunction, self).__init__(test_case, Function, test_case.keyspace_function_meta, **kwargs)
class VerifiedAggregate(Verified):
def __init__(self, test_case, **kwargs):
super(FunctionTest.VerifiedAggregate, self).__init__(test_case, Aggregate, test_case.keyspace_aggregate_meta, **kwargs)
class FunctionMetadata(FunctionTest):
def make_function_kwargs(self, called_on_null=True):
return {'keyspace': self.keyspace_name,
'name': self.function_name,
'argument_types': ['double', 'int'],
'argument_names': ['d', 'i'],
'return_type': 'double',
'language': 'java',
'body': 'return new Double(0.0);',
'called_on_null_input': called_on_null}
def test_functions_after_udt(self):
"""
Test to to ensure functions come after UDTs in in keyspace dump
test_functions_after_udt creates a basic function. Then queries that function and make sure that in the results
that UDT's are listed before any corresponding functions, when we dump the keyspace
Ideally we would make a function that takes a udt type, but this presently fails because C* c059a56 requires
udt to be frozen to create, but does not store meta indicating frozen
SEE https://issues.apache.org/jira/browse/CASSANDRA-9186
Maybe update this after release
kwargs = self.make_function_kwargs()
kwargs['argument_types'][0] = "frozen<%s>" % udt_name
expected_meta = Function(**kwargs)
with self.VerifiedFunction(self, **kwargs):
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result UDT's should come before any functions
@test_category function
"""
self.assertNotIn(self.function_name, self.keyspace_function_meta)
udt_name = 'udtx'
self.session.execute("CREATE TYPE %s (x int)" % udt_name)
with self.VerifiedFunction(self, **self.make_function_kwargs()):
# udts must come before functions in keyspace dump
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
type_idx = keyspace_cql.rfind("CREATE TYPE")
func_idx = keyspace_cql.find("CREATE FUNCTION")
self.assertNotIn(-1, (type_idx, func_idx), "TYPE or FUNCTION not found in keyspace_cql: " + keyspace_cql)
self.assertGreater(func_idx, type_idx)
def test_function_same_name_diff_types(self):
"""
Test to verify to that functions with different signatures are differentiated in metadata
test_function_same_name_diff_types Creates two functions. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result function with the same name but different signatures should be surfaced separately
@test_category function
"""
# Create a function
kwargs = self.make_function_kwargs()
with self.VerifiedFunction(self, **kwargs):
# another function: same name, different type sig.
self.assertGreater(len(kwargs['argument_types']), 1)
self.assertGreater(len(kwargs['argument_names']), 1)
kwargs['argument_types'] = kwargs['argument_types'][:1]
kwargs['argument_names'] = kwargs['argument_names'][:1]
# Ensure they are surfaced separately
with self.VerifiedFunction(self, **kwargs):
functions = [f for f in self.keyspace_function_meta.values() if f.name == self.function_name]
self.assertEqual(len(functions), 2)
self.assertNotEqual(functions[0].argument_types, functions[1].argument_types)
def test_function_no_parameters(self):
"""
Test to verify CQL output for functions with zero parameters
Creates a function with no input parameters, verify that CQL output is correct.
@since 2.7.1
@jira_ticket PYTHON-392
@expected_result function with no parameters should generate proper CQL
@test_category function
"""
kwargs = self.make_function_kwargs()
kwargs['argument_types'] = []
kwargs['argument_names'] = []
kwargs['return_type'] = 'bigint'
kwargs['body'] = 'return System.currentTimeMillis() / 1000L;'
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name'])
def test_functions_follow_keyspace_alter(self):
"""
Test to verify to that functions maintain equality after a keyspace is altered
test_functions_follow_keyspace_alter creates a function then alters a the keyspace associated with that function.
After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions are the same after parent keyspace is altered
@test_category function
"""
# Create function
with self.VerifiedFunction(self, **self.make_function_kwargs()):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
# After keyspace alter ensure that we maintain function equality.
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.functions, new_keyspace_meta.functions)
finally:
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name)
def test_function_cql_called_on_null(self):
"""
Test to verify to that that called on null argument is honored on function creation.
test_functions_follow_keyspace_alter create two functions. One with the called_on_null_input set to true,
the other with it set to false. We then verify that the metadata constructed from those function is correctly
reflected
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions metadata correctly reflects called_on_null_input flag.
@test_category function
"""
kwargs = self.make_function_kwargs()
kwargs['called_on_null_input'] = True
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*")
kwargs['called_on_null_input'] = False
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*")
class AggregateMetadata(FunctionTest):
@classmethod
def setup_class(cls):
if PROTOCOL_VERSION >= 4:
super(AggregateMetadata, cls).setup_class()
cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 's + i';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 's + i + j';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list<text>)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS ''''' + l';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list<text>, i int)
CALLED ON NULL INPUT
RETURNS list<text>
LANGUAGE java AS 'if (i != null) s.add(i.toString()); return s;';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION update_map(s map<int, int>, i int)
RETURNS NULL ON NULL INPUT
RETURNS map<int, int>
LANGUAGE java AS 's.put(new Integer(i), new Integer(i)); return s;';""")
cls.session.execute("""CREATE TABLE IF NOT EXISTS t
(k int PRIMARY KEY, v int)""")
for x in range(4):
cls.session.execute("INSERT INTO t (k,v) VALUES (%s, %s)", (x, x))
cls.session.execute("INSERT INTO t (k) VALUES (%s)", (4,))
def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_cond=None):
return {'keyspace': self.keyspace_name,
'name': self.function_name + '_aggregate',
'argument_types': ['int'],
'state_func': state_func,
'state_type': state_type,
'final_func': final_func,
'initial_condition': init_cond,
'return_type': "does not matter for creation"}
def test_return_type_meta(self):
"""
Test to verify to that the return type of a an aggregate is honored in the metadata
test_return_type_meta creates an aggregate then ensures the return type of the created
aggregate is correctly surfaced in the metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregate has the correct return typ in the metadata
@test_category aggregate
"""
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='1')) as va:
self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int')
def test_init_cond(self):
"""
Test to verify that various initial conditions are correctly surfaced in various aggregate functions
test_init_cond creates several different types of aggregates, and given various initial conditions it verifies that
they correctly impact the aggregate's execution
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial conditions are correctly evaluated as part of the aggregates
@test_category aggregate
"""
# This is required until the java driver bundled with C* is updated to support v4
c = Cluster(protocol_version=3)
s = c.connect(self.keyspace_name)
encoder = Encoder()
expected_values = range(4)
# int32
for init_cond in (-1, 0, 1):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=cql_init)) as va:
sum_res = s.execute("SELECT %s(v) AS sum FROM t" % va.function_kwargs['name'])[0].sum
self.assertEqual(sum_res, int(init_cond) + sum(expected_values))
# list<text>
for init_cond in ([], ['1', '2']):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>', init_cond=cql_init)) as va:
list_res = s.execute("SELECT %s(v) AS list_res FROM t" % va.function_kwargs['name'])[0].list_res
self.assertListEqual(list_res[:len(init_cond)], init_cond)
self.assertEqual(set(i for i in list_res[len(init_cond):]),
set(str(i) for i in expected_values))
# map<int,int>
expected_map_values = dict((i, i) for i in expected_values)
expected_key_set = set(expected_values)
for init_cond in ({}, {1: 2, 3: 4}, {5: 5}):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map<int, int>', init_cond=cql_init)) as va:
map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res
self.assertDictContainsSubset(expected_map_values, map_res)
init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set)
self.assertDictContainsSubset(init_not_updated, map_res)
c.shutdown()
def test_aggregates_after_functions(self):
"""
Test to verify that aggregates are listed after function in metadata
test_aggregates_after_functions creates an aggregate, and then verifies that they are listed
after any function creations when the keypspace dump is preformed
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are declared after any functions
@test_category aggregate
"""
# functions must come before functions in keyspace dump
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>')):
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
func_idx = keyspace_cql.find("CREATE FUNCTION")
aggregate_idx = keyspace_cql.rfind("CREATE AGGREGATE")
self.assertNotIn(-1, (aggregate_idx, func_idx), "AGGREGATE or FUNCTION not found in keyspace_cql: " + keyspace_cql)
self.assertGreater(aggregate_idx, func_idx)
def test_same_name_diff_types(self):
"""
Test to verify to that aggregates with different signatures are differentiated in metadata
test_same_name_diff_types Creates two Aggregates. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates with the same name but different signatures should be surfaced separately
@test_category function
"""
kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')
with self.VerifiedAggregate(self, **kwargs):
kwargs['state_func'] = 'sum_int_two'
kwargs['argument_types'] = ['int', 'int']
with self.VerifiedAggregate(self, **kwargs):
aggregates = [a for a in self.keyspace_aggregate_meta.values() if a.name == kwargs['name']]
self.assertEqual(len(aggregates), 2)
self.assertNotEqual(aggregates[0].argument_types, aggregates[1].argument_types)
def test_aggregates_follow_keyspace_alter(self):
"""
Test to verify to that aggregates maintain equality after a keyspace is altered
test_aggregates_follow_keyspace_alter creates a function then alters a the keyspace associated with that
function. After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are the same after parent keyspace is altered
@test_category function
"""
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.aggregates, new_keyspace_meta.aggregates)
finally:
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name)
def test_cql_optional_params(self):
"""
Test to verify that the initial_cond and final_func parameters are correctly honored
test_cql_optional_params creates various aggregates with different combinations of initial_condition,
and final_func parameters set. It then ensures they are correctly honored.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial_condition and final_func parameters are honored correctly
@test_category function
"""
kwargs = self.make_aggregate_kwargs('extend_list', 'list<text>')
encoder = Encoder()
# no initial condition, final func
self.assertIsNone(kwargs['initial_condition'])
self.assertIsNone(kwargs['final_func'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), -1)
self.assertEqual(cql.find('FINALFUNC'), -1)
# initial condition, no final func
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
search_string = "INITCOND %s" % kwargs['initial_condition']
self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql))
self.assertEqual(cql.find('FINALFUNC'), -1)
# no initial condition, final func
kwargs['initial_condition'] = None
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), -1)
search_string = 'FINALFUNC "%s"' % kwargs['final_func']
self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql))
# both
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
init_cond_idx = cql.find("INITCOND %s" % kwargs['initial_condition'])
final_func_idx = cql.find('FINALFUNC "%s"' % kwargs['final_func'])
self.assertNotIn(-1, (init_cond_idx, final_func_idx))
self.assertGreater(init_cond_idx, final_func_idx)
class BadMetaTest(unittest.TestCase):
"""
Test behavior when metadata has unexpected form
Verify that new cluster/session can still connect, and the CQL output indicates the exception with a warning.
PYTHON-370
"""
class BadMetaException(Exception):
pass
@property
def function_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.keyspace_name = cls.__name__.lower()
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
connection = cls.cluster.control_connection._connection
cls.parser_class = get_schema_parser(connection, CASSANDRA_VERSION.base_version, timeout=20).__class__
cls.cluster.control_connection.reconnect = Mock()
@classmethod
def teardown_class(cls):
drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster)
def test_bad_keyspace(self):
with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException):
self.cluster.refresh_keyspace_metadata(self.keyspace_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_table(self):
self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name)
with patch.object(self.parser_class, '_build_column_metadata', side_effect=self.BadMetaException):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_index(self):
self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name)
self.session.execute('CREATE INDEX ON %s(v)' % self.function_name)
with patch.object(self.parser_class, '_build_index_metadata', side_effect=self.BadMetaException):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
@greaterthancass20
def test_bad_user_type(self):
self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name)
with patch.object(self.parser_class, '_build_user_type', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
@greaterthancass21
def test_bad_user_function(self):
self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""" % self.function_name)
with patch.object(self.parser_class, '_build_function', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
@greaterthancass21
def test_bad_user_aggregate(self):
self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""")
self.session.execute("""CREATE AGGREGATE %s(int)
SFUNC sum_int
STYPE int
INITCOND 0""" % self.function_name)
with patch.object(self.parser_class, '_build_aggregate', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
class DynamicCompositeTypeTest(BasicSharedKeyspaceUnitTestCase):
def test_dct_alias(self):
"""
Tests to make sure DCT's have correct string formatting
Constructs a DCT and check the format as generated. To insure it matches what is expected
@since 3.6.0
@jira_ticket PYTHON-579
@expected_result DCT subtypes should always have fully qualified names
@test_category metadata
"""
self.session.execute("CREATE TABLE {0}.{1} ("
"k int PRIMARY KEY,"
"c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)',"
"c2 Text)".format(self.ks_name, self.function_table_name))
dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name)
# Format can very slightly between versions, strip out whitespace for consistency sake
self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(s=>org.apache.cassandra.db.marshal.UTF8Type,i=>org.apache.cassandra.db.marshal.Int32Type)'" in dct_table.as_cql_query().replace(" ", ""))
@greaterthanorequalcass30
class Materia3lizedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase):
def setUp(self):
self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name))
def tearDown(self):
self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name))
self.session.execute("DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
def test_materialized_view_metadata_creation(self):
"""
test for materialized view metadata creation
test_materialized_view_metadata_creation tests that materialized view metadata properly created implicitly in
both keyspace and table metadata under "views". It creates a simple base table and then creates a view based
on that table. It then checks that the materialized view metadata is contained in the keyspace and table
metadata. Finally, it checks that the keyspace_name and the base_table_name in the view metadata is properly set.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be created with a new view is created.
@test_category metadata
"""
self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertEqual(self.keyspace_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].keyspace_name)
self.assertEqual(self.function_table_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].base_table_name)
def test_materialized_view_metadata_alter(self):
"""
test for materialized view metadata alteration
test_materialized_view_metadata_alter tests that materialized view metadata is properly updated implicitly in the
table metadata once that view is updated. It creates a simple base table and then creates a view based
on that table. It then alters that materalized view and checks that the materialized view metadata is altered in
the table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be updated with the view is altered.
@test_category metadata
"""
self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] )
self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name))
self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"])
def test_materialized_view_metadata_drop(self):
"""
test for materialized view metadata dropping
test_materialized_view_metadata_drop tests that materialized view metadata is properly removed implicitly in
both keyspace and table metadata once that view is dropped. It creates a simple base table and then creates a view
based on that table. It then drops that materalized view and checks that the materialized view metadata is removed
from the keyspace and table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be removed with the view is dropped.
@test_category metadata
"""
self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name))
self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name))
@greaterthanorequalcass30
class MaterializedViewMetadataTestComplex(BasicSegregatedKeyspaceUnitTestCase):
def test_create_view_metadata(self):
"""
test to ensure that materialized view metadata is properly constructed
test_create_view_metadata tests that materialized views metadata is properly constructed. It runs a simple
query to construct a materialized view, then proceeds to inspect the metadata associated with that MV.
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately.
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score INT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
self.session.execute(create_mv)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['monthlyhigh']
self.assertIsNotNone(score_table.views["monthlyhigh"])
self.assertIsNotNone(len(score_table.views), 1)
# Make sure user is a partition key, and not null
self.assertEqual(len(score_table.partition_key), 1)
self.assertIsNotNone(score_table.columns['user'])
self.assertTrue(score_table.columns['user'], score_table.partition_key[0])
# Validate clustering keys
self.assertEqual(len(score_table.clustering_key), 4)
self.assertIsNotNone(score_table.columns['game'])
self.assertTrue(score_table.columns['game'], score_table.clustering_key[0])
self.assertIsNotNone(score_table.columns['year'])
self.assertTrue(score_table.columns['year'], score_table.clustering_key[1])
self.assertIsNotNone(score_table.columns['month'])
self.assertTrue(score_table.columns['month'], score_table.clustering_key[2])
self.assertIsNotNone(score_table.columns['day'])
self.assertTrue(score_table.columns['day'], score_table.clustering_key[3])
self.assertIsNotNone(score_table.columns['score'])
# Validate basic mv information
self.assertEqual(mv.keyspace_name, self.keyspace_name)
self.assertEqual(mv.name, "monthlyhigh")
self.assertEqual(mv.base_table_name, "scores")
self.assertFalse(mv.include_all_columns)
# Validate that all columns are preset and correct
mv_columns = list(mv.columns.values())
self.assertEqual(len(mv_columns), 6)
game_column = mv_columns[0]
self.assertIsNotNone(game_column)
self.assertEqual(game_column.name, 'game')
self.assertEqual(game_column, mv.partition_key[0])
year_column = mv_columns[1]
self.assertIsNotNone(year_column)
self.assertEqual(year_column.name, 'year')
self.assertEqual(year_column, mv.partition_key[1])
month_column = mv_columns[2]
self.assertIsNotNone(month_column)
self.assertEqual(month_column.name, 'month')
self.assertEqual(month_column, mv.partition_key[2])
def compare_columns(a, b, name):
self.assertEqual(a.name, name)
self.assertEqual(a.name, b.name)
self.assertEqual(a.table, b.table)
self.assertEqual(a.cql_type, b.cql_type)
self.assertEqual(a.is_static, b.is_static)
self.assertEqual(a.is_reversed, b.is_reversed)
score_column = mv_columns[3]
compare_columns(score_column, mv.clustering_key[0], 'score')
user_column = mv_columns[4]
compare_columns(user_column, mv.clustering_key[1], 'user')
day_column = mv_columns[5]
compare_columns(day_column, mv.clustering_key[2], 'day')
def test_base_table_column_addition_mv(self):
"""
test to ensure that materialized view metadata is properly updated with base columns are added
test_create_view_metadata tests that materialized views metadata is properly updated when columns are added to
the base table.
@since 3.0.0
@jira_ticket PYTHON-419
@expected_result Materialized view metadata should be updated correctly
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score TEXT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS
SELECT * FROM {0}.scores
WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL
PRIMARY KEY (game, score, user, year, month, day)
WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name)
self.session.execute(create_mv)
self.session.execute(create_mv_alltime)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIsNotNone(score_table.views["monthlyhigh"])
self.assertIsNotNone(score_table.views["alltimehigh"])
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.keyspace_name))
self.session.execute(insert_fouls)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIn("fouls", score_table.columns)
# This is a workaround for mv notifications being separate from base table schema responses.
# This maybe fixed with future protocol changes
for i in range(10):
mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"]
if("fouls" in mv_alltime.columns):
break
time.sleep(.2)
self.assertIn("fouls", mv_alltime.columns)
mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls']
self.assertEqual(mv_alltime_fouls_comumn.cql_type, 'int')
@lessthancass30
def test_base_table_type_alter_mv(self):
"""
test to ensure that materialized view metadata is properly updated when a type in the base table
is updated.
test_create_view_metadata tests that materialized views metadata is properly updated when the type of base table
column is changed.
@since 3.0.0
@jira_ticket CASSANDRA-10424
@expected_result Materialized view metadata should be updated correctly
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score TEXT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
self.session.execute(create_mv)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
alter_scores = """ALTER TABLE {0}.scores ALTER score TYPE blob""".format((self.keyspace_name))
self.session.execute(alter_scores)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score']
self.assertEqual(score_column.cql_type, 'blob')
# until CASSANDRA-9920+CASSANDRA-10500 MV updates are only available later with an async event
for i in range(10):
score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score']
if "blob" == score_mv_column.cql_type:
break
time.sleep(.2)
self.assertEqual(score_mv_column.cql_type, 'blob')
def test_metadata_with_quoted_identifiers(self):
"""
test to ensure that materialized view metadata is properly constructed when quoted identifiers are used
test_metadata_with_quoted_identifiers tests that materialized views metadata is properly constructed.
It runs a simple query to construct a materialized view, then proceeds to inspect the metadata associated with
that MV. The caveat here is that the tables and the materialized view both have quoted identifiers
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately even with quoted identifiers.
@test_category metadata
"""
create_table = """CREATE TABLE {0}.t1 (
"theKey" int,
"the;Clustering" int,
"the Value" int,
PRIMARY KEY ("theKey", "the;Clustering"))""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.mv1 AS
SELECT "theKey", "the;Clustering", "the Value"
FROM {0}.t1
WHERE "theKey" IS NOT NULL AND "the;Clustering" IS NOT NULL AND "the Value" IS NOT NULL
PRIMARY KEY ("theKey", "the;Clustering")""".format(self.keyspace_name)
self.session.execute(create_mv)
t1_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['t1']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNotNone(t1_table.views["mv1"])
self.assertIsNotNone(len(t1_table.views), 1)
# Validate partition key, and not null
self.assertEqual(len(t1_table.partition_key), 1)
self.assertIsNotNone(t1_table.columns['theKey'])
self.assertTrue(t1_table.columns['theKey'], t1_table.partition_key[0])
# Validate clustering key column
self.assertEqual(len(t1_table.clustering_key), 1)
self.assertIsNotNone(t1_table.columns['the;Clustering'])
self.assertTrue(t1_table.columns['the;Clustering'], t1_table.clustering_key[0])
# Validate regular column
self.assertIsNotNone(t1_table.columns['the Value'])
# Validate basic mv information
self.assertEqual(mv.keyspace_name, self.keyspace_name)
self.assertEqual(mv.name, "mv1")
self.assertEqual(mv.base_table_name, "t1")
self.assertFalse(mv.include_all_columns)
# Validate that all columns are preset and correct
mv_columns = list(mv.columns.values())
self.assertEqual(len(mv_columns), 3)
theKey_column = mv_columns[0]
self.assertIsNotNone(theKey_column)
self.assertEqual(theKey_column.name, 'theKey')
self.assertEqual(theKey_column, mv.partition_key[0])
cluster_column = mv_columns[1]
self.assertIsNotNone(cluster_column)
self.assertEqual(cluster_column.name, 'the;Clustering')
self.assertEqual(cluster_column.name, mv.clustering_key[0].name)
self.assertEqual(cluster_column.table, mv.clustering_key[0].table)
self.assertEqual(cluster_column.is_static, mv.clustering_key[0].is_static)
self.assertEqual(cluster_column.is_reversed, mv.clustering_key[0].is_reversed)
value_column = mv_columns[2]
self.assertIsNotNone(value_column)
self.assertEqual(value_column.name, 'the Value')
class GroupPerHost(BasicSharedKeyspaceUnitTestCase):
@classmethod
def setUpClass(cls):
cls.common_setup(rf=1, create_class_table=True)
cls.table_two_pk = "table_with_two_pk"
cls.session.execute(
'''
CREATE TABLE {0}.{1} (
k_one int,
k_two int,
v int,
PRIMARY KEY ((k_one, k_two))
)'''.format(cls.ks_name, cls.table_two_pk)
)
def test_group_keys_by_host(self):
"""
Test to ensure group_keys_by_host functions as expected. It is tried
with a table with a single field for the partition key and a table
with two fields for the partition key
@since 3.13
@jira_ticket PYTHON-647
@expected_result group_keys_by_host return the expected value
@test_category metadata
"""
stmt = """SELECT * FROM {}.{}
WHERE k_one = ? AND k_two = ? """.format(self.ks_name, self.table_two_pk)
keys = ((1, 2), (2, 2), (2, 3), (3, 4))
self._assert_group_keys_by_host(keys, self.table_two_pk, stmt)
stmt = """SELECT * FROM {}.{}
WHERE k = ? """.format(self.ks_name, self.ks_name)
keys = ((1, ), (2, ), (2, ), (3, ))
self._assert_group_keys_by_host(keys, self.ks_name, stmt)
def _assert_group_keys_by_host(self, keys, table_name, stmt):
keys_per_host = group_keys_by_replica(self.session, self.ks_name, table_name, keys)
self.assertNotIn(NO_VALID_REPLICA, keys_per_host)
prepared_stmt = self.session.prepare(stmt)
for key in keys:
routing_key = prepared_stmt.bind(key).routing_key
hosts = self.cluster.metadata.get_replicas(self.ks_name, routing_key)
self.assertEqual(1, len(hosts)) # RF is 1 for this keyspace
self.assertIn(key, keys_per_host[hosts[0]])
|
py
|
1a5843ca28f0b22fbdfdc8c13d870131010daafa
|
# Download images and labels related to the validation/test set in the dataset
import os
import cv2
import shutil
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--inp', type = str, help = 'Input path.')
parser.add_argument('--out', type = str, help = 'Output path.')
parser.add_argument('--label', type = str, help = 'Image labels.')
opt = parser.parse_args()
print(opt)
file = open(opt.label)
valid_set = list()
for line in file.readlines():
valid_set.append(line.split('/')[-1].split('\n')[0])
n_images = 0
try:
os.mkdir(opt.out)
except:
pass
for subdir, dirs, files in os.walk(opt.inp):
for file in sorted(files):
if file.endswith(('.jpg')) and file in valid_set:
img = Image.open(opt.inp + file)
img = img.convert('RGB')
img.save(opt.out + os.sep + file)
shutil.copy(opt.inp + os.sep + str(file).split('.')[0] + '.txt', opt.out)
n_images += 1
print(n_images, 'images.')
|
py
|
1a5843fae36318c592afe2040220ab273ae3b9d4
|
import sys
import a
print(sys, a, b)
|
py
|
1a584435e88ccf4039d2a00d178707cb960f27ea
|
import random
import sys
from datetime import datetime
import torch
import numpy as np
import os
import logging
import torch.utils.data as data
import json
def seed_all_rng(seed=None):
"""
Set the random seed for the RNG in torch, numpy and python.
Args:
seed (int): if None, will use a strong random seed.
"""
if seed is None:
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
logger = logging.getLogger(__name__)
logger.info("Using a generated random seed {}".format(seed))
np.random.seed(seed)
torch.set_rng_state(torch.manual_seed(seed).get_state())
random.seed(seed)
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
class Label(object):
def __init__(self, gt_bboxes, gt_classes):
self.gt_classes = gt_classes
self.gt_bboxes = gt_bboxes
def __len__(self):
if isinstance(self.gt_classes, list):
return len(self.gt_classes)
elif isinstance(self.gt_classes, torch.Tensor):
return list(self.gt_classes.size())[0]
elif type(self.gt_classes) is np.ndarray:
return self.gt_classes.shape[0]
else:
return 0
# class AspectRatioGroupedDataset(object):
# """
# Batch data that have similar aspect ratio together.
# In this implementation, images whose aspect ratio < (or >) 1 will
# be batched together.
#
# It assumes the underlying dataset produces dicts with "width" and "height" keys.
# It will then produce a list of original dicts with length = batch_size,
# all with similar aspect ratios.
# """
#
# def __init__(self, dataset):
# """
# Args:
# dataset: an iterable. Each element must be a dict with keys
# "width" and "height", which will be used to batch data.
# batch_size (int):
# """
# self.dataset = dataset
# self.batch_size = dataset.batch_size
# self._buckets = [[] for _ in range(2)]
# # Hard-coded two aspect ratio groups: w > h and w < h.
# # Can add support for more aspect ratio groups, but doesn't seem useful
#
# def __iter__(self):
# for d in self.dataset:
# _, h, w = list(d["image"].size())
# bucket_id = 0 if w > h else 1
# bucket = self._buckets[bucket_id]
# bucket.append(d)
# if len(bucket) == self.batch_size:
# yield bucket[:]
# del bucket[:]
class AspectRatioGroupedDataset(object):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
bucket = []
for d in self.dataset:
bucket.append(d)
if len(bucket) == self.batch_size:
yield bucket[:]
bucket = []
"""
Enables writing json with numpy arrays to file
"""
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self,obj)
"""
Class will hold the average dimension for a class, regressed value is the residual
"""
class ClassAverages:
def __init__(self, classes=[]):
self.dimension_map = {}
self.filename = os.path.abspath(os.path.dirname(__file__)) + '/class_averages.txt'
if len(classes) == 0: # eval mode
self.load_items_from_file()
for detection_class in classes:
class_ = detection_class.lower()
if class_ in self.dimension_map.keys():
continue
self.dimension_map[class_] = {}
self.dimension_map[class_]['count'] = 0
self.dimension_map[class_]['total'] = np.zeros(3, dtype=np.double)
def add_item(self, class_, dimension):
class_ = class_.lower()
self.dimension_map[class_]['count'] += 1
self.dimension_map[class_]['total'] += dimension
# self.dimension_map[class_]['total'] /= self.dimension_map[class_]['count']
def get_item(self, class_):
class_ = class_.lower()
return self.dimension_map[class_]['total'] / self.dimension_map[class_]['count']
def dump_to_file(self):
f = open(self.filename, "w")
f.write(json.dumps(self.dimension_map, cls=NumpyEncoder))
f.close()
def load_items_from_file(self):
f = open(self.filename, 'r')
dimension_map = json.load(f)
for class_ in dimension_map:
dimension_map[class_]['total'] = np.asarray(dimension_map[class_]['total'])
self.dimension_map = dimension_map
def recognized_class(self, class_):
return class_.lower() in self.dimension_map
|
py
|
1a58447e0e35b759f39fcce0ae1438c4be8f2578
|
import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='jv_toolbox', # should match the package folder
packages=['jv_toolbox'], # should match the package folder
version='0.0.1', # important for updates
license='MIT', # should match your chosen license
description='Testing installation of Package',
long_description=long_description, # loads your README.md
long_description_content_type="text/markdown", # README.md is of type 'markdown'
author='Jonathan Vlk',
author_email='[email protected]',
url='https://github.com/jgvlk/toolbox_project',
project_urls = { # Optional
"Bug Tracker": "https://github.com/jgvlk/toolbox_project/issues"
},
install_requires=['requests'], # list all packages that your package uses
keywords=["pypi", "jv_toolbox", "tutorial"], #descriptive meta-data
classifiers=[ # https://pypi.org/classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
download_url="https://github.com/jgvlk/toolbox_project/archive/refs/tags/0.0.1.tar.gz",
)
|
py
|
1a584503d7cb7a927e83b69281f2dc33ca92fb75
|
"""Brownian bridge."""
import numpy as np
from stochastic.processes.continuous.brownian_motion import BrownianMotion
from stochastic.utils.validation import check_numeric
class BrownianBridge(BrownianMotion):
"""Brownian bridge.
.. image:: _static/brownian_bridge.png
:scale: 50%
A Brownian bridge is a Brownian motion with a conditional value on the
right endpoint of the process.
:param float b: the right endpoint value of the Brownian bridge at time t
:param float t: the right hand endpoint of the time interval :math:`[0,t]`
for the process
:param numpy.random.Generator rng: a custom random number generator
"""
def __init__(self, b=0, t=1, rng=None):
super().__init__(drift=0, scale=1, t=t, rng=rng)
self.b = b
def __str__(self):
return "Brownian bridge from 0 to {b} on [0, {t}]".format(t=str(self.t), b=str(self.b))
def __repr__(self):
return "BrownianBridge(b={b}, t={t})".format(t=str(self.t), b=str(self.b))
@property
def b(self):
"""Right endpoint value."""
return self._b
@b.setter
def b(self, value):
check_numeric(value, "Time end")
self._b = value
def _sample_brownian_bridge(self, n, b=None):
"""Generate a realization of a Brownian bridge."""
if b is None:
b = self.b
bm = self._sample_brownian_motion(n)
return bm + self.times(n) * (b - bm[-1]) / self.t
def _sample_brownian_bridge_at(self, times, b=None):
"""Generate a realization of a Brownian bridge at times."""
if b is None:
b = self.b
bm = self._sample_brownian_motion_at(times)
return bm + np.array(times) * (b - bm[-1]) / times[-1]
def sample(self, n):
"""Generate a realization.
:param int n: the number of increments to generate
"""
return self._sample_brownian_bridge(n)
def sample_at(self, times, b=None):
"""Generate a realization using specified times.
:param times: a vector of increasing time values at which to generate
the realization
:param float b: the right endpoint value for :py:attr:`times` [-1]
"""
return self._sample_brownian_bridge_at(times, b)
|
py
|
1a5845704abfdd0e3332207dba6d0c67c59492cb
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import hashlib
import hmac
ONETIMEAUTH_BYTES = 10
ONETIMEAUTH_CHUNK_BYTES = 12
ONETIMEAUTH_CHUNK_DATA_LEN = 2
def sha1_hmac(secret, data):
return hmac.new(secret, data, hashlib.sha1).digest()
def onetimeauth_verify(_hash, data, key):
return _hash == sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def onetimeauth_gen(data, key):
return sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 0x01
ADDRTYPE_IPV6 = 0x04
ADDRTYPE_HOST = 0x03
ADDRTYPE_AUTH = 0x10
ADDRTYPE_MASK = 0xF
def pack_addr(address):
address_str = to_str(address)
address = to_bytes(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
# add ss header
def add_header(address, port, data=b''):
_data = b''
_data = pack_addr(address) + struct.pack('>H', port) + data
return _data
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
|
py
|
1a5846203d5d27e4dbdc5add98fa902469e8e4f5
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cassandra.graph import Vertex, Edge
from tests.integration.advanced.graph import (
validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type,
validate_classic_edge_properties, validate_line_edge,
validate_generic_edge_result_type, validate_path_result_type)
from tests.integration import requiredse, DSE_VERSION
from tests.integration.advanced import use_single_node_with_graph
from tests.integration.advanced.graph import GraphTestConfiguration
from tests.integration.advanced.graph.fluent import (
BaseExplicitExecutionTest, _AbstractTraversalTest, _validate_prop)
def setup_module():
if DSE_VERSION:
dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}}
use_single_node_with_graph(dse_options=dse_options)
@requiredse
@GraphTestConfiguration.generate_tests(traversal=True)
class ExplicitExecutionTest(BaseExplicitExecutionTest, _AbstractTraversalTest):
"""
This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution
All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep.
"""
@staticmethod
def fetch_key_from_prop(property):
return property.label
def _validate_classic_vertex(self, g, vertex):
validate_classic_vertex(self, vertex)
def _validate_generic_vertex_result_type(self, g, vertex):
validate_generic_vertex_result_type(self, vertex)
def _validate_classic_edge_properties(self, g, edge):
validate_classic_edge_properties(self, edge)
def _validate_classic_edge(self, g, edge):
validate_classic_edge(self, edge)
def _validate_line_edge(self, g, edge):
validate_line_edge(self, edge)
def _validate_generic_edge_result_type(self, edge):
validate_generic_edge_result_type(self, edge)
def _validate_type(self, g, vertex):
for key in vertex.properties:
value = vertex.properties[key][0].value
_validate_prop(key, value, self)
def _validate_path_result_type(self, g, path_obj):
# This pre-processing is due to a change in TinkerPop
# properties are not returned automatically anymore
# with some queries.
for obj in path_obj.objects:
if not obj.properties:
props = []
if isinstance(obj, Edge):
obj.properties = {
p.key: p.value
for p in self.fetch_edge_props(g, obj)
}
elif isinstance(obj, Vertex):
obj.properties = {
p.label: p.value
for p in self.fetch_vertex_props(g, obj)
}
validate_path_result_type(self, path_obj)
def _validate_meta_property(self, g, vertex):
self.assertEqual(len(vertex.properties), 1)
self.assertEqual(len(vertex.properties['key']), 1)
p = vertex.properties['key'][0]
self.assertEqual(p.label, 'key')
self.assertEqual(p.value, 'meta_prop')
self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'})
|
py
|
1a5847398be7f1c00104dab0220c69afab1a9d42
|
from builtins import range
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, "../utils"))
import tf_util
from structural_losses.tf_nndistance import nn_distance
from structural_losses.tf_approxmatch import approx_match
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(
point_cloud, is_training, num_output_points, bottleneck_size, bn_decay=None
):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
# Point functions (MLP implemented as conv2d)
net = tf_util.conv2d(
input_image,
64,
[1, 3],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv1",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
64,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv2",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
64,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv3",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
128,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv4",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
bottleneck_size,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv5",
bn_decay=bn_decay,
)
net = tf_util.max_pool2d(net, [num_point, 1], padding="VALID", scope="maxpool")
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc11b", bn_decay=bn_decay
)
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc12b", bn_decay=bn_decay
)
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc13b", bn_decay=bn_decay
)
net = tf_util.fully_connected(
net,
3 * num_output_points,
bn=True,
is_training=is_training,
scope="fc14b",
bn_decay=bn_decay,
activation_fn=None,
)
out_point_cloud = tf.reshape(net, [batch_size, -1, 3])
return out_point_cloud
def calc_distances(p0, points):
return ((p0 - points) ** 2).sum(axis=1)
def fps_from_given_pc(pts, k, given_pc):
farthest_pts = np.zeros((k, 3))
t = np.size(given_pc) // 3
farthest_pts[0:t] = given_pc
distances = calc_distances(farthest_pts[0], pts)
for i in range(1, t):
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
for i in range(t, k):
farthest_pts[i] = pts[np.argmax(distances)]
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
return farthest_pts
def unique(arr):
_, idx = np.unique(arr, return_index=True)
return arr[np.sort(idx)]
def nn_matching(full_pc, idx, k, complete_fps=True):
batch_size = np.size(full_pc, 0)
out_pc = np.zeros((full_pc.shape[0], k, 3))
for ii in range(0, batch_size):
best_idx = idx[ii]
if complete_fps:
best_idx = unique(best_idx)
out_pc[ii] = fps_from_given_pc(full_pc[ii], k, full_pc[ii][best_idx])
else:
out_pc[ii] = full_pc[ii][best_idx]
return out_pc[:, 0:k, :]
def emd_matching(full_pc, gen_pc, sess):
batch_size = np.size(full_pc, 0)
k = np.size(gen_pc, 1)
out_pc = np.zeros_like(gen_pc)
match_mat_tensor = approx_match(
tf.convert_to_tensor(full_pc), tf.convert_to_tensor(gen_pc)
)
pc1_match_idx_tensor = tf.cast(tf.argmax(match_mat_tensor, axis=2), dtype=tf.int32)
pc1_match_idx = pc1_match_idx_tensor.eval(session=sess)
for ii in range(0, batch_size):
best_idx = unique(pc1_match_idx[ii])
out_pc[ii] = fps_from_given_pc(full_pc[ii], k, full_pc[ii][best_idx])
return out_pc
def get_nn_indices(ref_pc, samp_pc):
_, idx, _, _ = nn_distance(samp_pc, ref_pc)
return idx
def get_simplification_loss(ref_pc, samp_pc, pc_size, gamma=1, delta=0):
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(samp_pc, ref_pc)
max_cost = tf.reduce_max(cost_p1_p2, axis=1)
max_cost = tf.reduce_mean(max_cost)
cost_p1_p2 = tf.reduce_mean(cost_p1_p2)
cost_p2_p1 = tf.reduce_mean(cost_p2_p1)
loss = cost_p1_p2 + max_cost + (gamma + delta * pc_size) * cost_p2_p1
tf.summary.scalar("cost_p1_p2", cost_p1_p2)
tf.summary.scalar("cost_p2_p1", cost_p2_p1)
tf.summary.scalar("max_cost", max_cost)
return loss
|
py
|
1a5847f815f4a7c0daa32be5301e7053fb2e7bcf
|
# Main microservice script
# Retrieves, configures and connects all of the
# components of the microservice
import os
from flask import Flask
from flask_cors import CORS
from .config import config, config_logging
def create_app(args=None):
# create and configure the app
app = Flask(__name__, static_url_path='', static_folder='static', instance_relative_config=True)
# add user provided configurations for the
if args:
app.config.update(
HOST=args["host"],
PORT=args["port"],
DATABASE_NAME = args["database_name"],
DATABASE_USER = args["database_user"],
DATABASE_PASSWORD = args["database_password"]
)
# set the service environment
SERVICE_ENV = args["env"] if args else 'development'
# setup the app configuration
if SERVICE_ENV == 'production':
app.config.from_object(config.ProductionConfig)
elif SERVICE_ENV == 'development':
app.config.from_object(config.DevelopmentConfig)
elif SERVICE_ENV == 'testing':
app.config.from_object(config.TestingConfig)
# setup the cors configurations
if app.config['CORS']['origins']:
CORS(app, origins=app.config['CORS']['origins'])
# add error handlers
from .routes import error_handlers
error_handlers.register(app)
# create context: components are using app.config
with app.app_context():
# add logger configuration
config_logging.init_app(app)
# add index routes
from .routes import index
app.register_blueprint(index.bp)
# add embedding routes
from .routes import service
app.register_blueprint(service.bp)
# TODO: log start of the service
# return the app
return app
|
py
|
1a584856cbd5abf860508ae63b3b0284150c4c00
|
"""
sentry.views.exception
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.utils.translation import ugettext as _
from .base import View
__all__ = ('Exception',)
class Exception(View):
verbose_name = _('Exception')
verbose_name_plural = _('Exceptions')
def should_store(self, event):
return 'sentry.interfaces.Exception' in event.interfaces
|
py
|
1a5848c0c63b5c43ab54431a537bffcd372d95db
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""
Routines for reconstructing structures based on recorded manipulations
"""
from .. import query
from meta import Meta
def ReconStructHeap(EA):
#
# For the current instruction, find all heap references
#
Traces = query.GetTraces(EA)
if not Traces:
print "No heap traces for %x" % EA
return
#
# Get at the metadata
#
Metadata = [Meta(Trace['Address']) for Trace in Traces]
#
# Ensure homogoneity of:
# - Allocation type
# - Allocation size
# - Allocation offset
# - Allocation trace
#
HeapMeta = tuple(M for M in Metadata if M.Heap)
Sizes = set(M.Size for M in HeapMeta)
Offsets = set(M.Offset for M in HeapMeta)
Traces = set(M.Trace for M in HeapMeta)
if len(HeapMeta) != len(Metadata)
print "Non-Fatal: Not all interactions are heap metadata!"
if len(Sizes) != 1:
print "Fatal: Multiple sizes %r" % Sizes
if len(Offsets) != 1:
print "Fatal: Multiple offsets %r" % Offsets
if len(Traces) != 1:
print "Fatal: Multiple allocation stacks"
|
py
|
1a5848cd7896bd3119de9960a82abe7a3470488e
|
import os
import warnings
import numpy as np
import pytorch_lightning as pl
import toml
import torch
import torch.nn.functional as F
import wandb
from pytorch_lightning.callbacks import ModelCheckpoint
from torch import nn
from torch import optim
from torchvision import models
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.models.segmentation.deeplabv3 import DeepLabHead, DeepLabV3
from torchvision.models.segmentation.fcn import FCNHead, FCN
from data.data_palm import get_palm_loaders
from models.resnet_unet import UNetWithResnet50Encoder
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.multiprocessing.set_sharing_strategy("file_system")
warnings.filterwarnings("ignore", category=UserWarning)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set to False if not using wandb
WANDB = True
if WANDB:
from pytorch_lightning.loggers import WandbLogger
CHECKPOINT_PATH = None
CHECKPOINTS_BASE_PATH = toml.load("paths.toml")["CHECKPOINTS_BASE_PATH"]
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "supervised_baseline/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_inner_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_inner_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_outer_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_outer_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_outer_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_snps_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_snps_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_snps_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_risk_scores_gen_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_risk_scores_gen_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_risk_scores_gen_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_burden_scores_gen_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_burden_scores_gen_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_burden_scores_gen_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "barlow_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "byol_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "simsiam_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "simclr_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "nnclr_r50_proj128/epoch_99-step_170399.ckpt"
train_pct = 0.6
val_pct = 0.8 - train_pct
loader_param = {
"batch_size": 4,
"size": 448,
"joint_mask": True,
"train_pct": train_pct,
"val_pct": val_pct,
}
accumulate_grad_batches = 16
n_classes = 2
epochs = 50
warmup_epochs = 10 # if set to 0, fine-tune in all epochs
lr = 1e-3
dice_weight = 0.8
bce_weight = 0.2
seg_model_name = "unet" # "fcn" or "deeplabv3" or "unet"
basemodel = models.resnet50
pretrained_imagenet = False
set_scheduler = "none" # "none" or "steplr" or "onecycle" or "reduceplat"
# optimizer = "sgd"
# optimizer_dict = dict(weight_decay=5e-4, momentum=0.9, nesterov=True)
optimizer = "adam"
optimizer_dict = dict(weight_decay=1e-5)
pl.seed_everything(42, workers=True)
def dice(y, y_pred):
intersection = np.sum(y_pred * y) * 2.0
return intersection / (np.sum(y_pred) + np.sum(y))
def load_from_state_dict_supervised(model, state_dict):
"""Loads the model weights from the state dictionary."""
# step 1: filter state dict
model_keys_prefixes = []
for okey, oitem in model.state_dict().items():
model_keys_prefixes.append(okey.split(".")[0])
new_state_dict = {}
index = 0
for key, item in state_dict.items():
# remove the "model." prefix from the state dict key
all_key_parts = [model_keys_prefixes[index]]
all_key_parts.extend(key.split(".")[2:])
index += 1
new_key = ".".join(all_key_parts)
if new_key in model.state_dict() and "fc" not in new_key:
new_state_dict[new_key] = item
# step 2: load from checkpoint
model.load_state_dict(new_state_dict, strict=False)
def load_from_state_dict_gen_img(model, state_dict):
"""Loads the model weights from the state dictionary."""
# step 1: filter state dict
model_keys_prefixes = []
for okey, oitem in model.state_dict().items():
model_keys_prefixes.append(okey.split(".")[0])
new_state_dict = {}
index = 0
for key, item in state_dict.items():
if (
key.startswith("imaging_model")
or key.startswith("model.imaging_model")
or key.startswith("models.0.imaging_model")
):
# remove the "model." prefix from the state dict key
all_key_parts = [model_keys_prefixes[index]]
if key.startswith("imaging_model"):
all_key_parts.extend(key.split(".")[2:])
elif key.startswith("model.imaging_model"):
all_key_parts.extend(key.split(".")[3:])
else:
all_key_parts.extend(key.split(".")[4:])
index += 1
new_key = ".".join(all_key_parts)
if new_key in model.state_dict():
new_state_dict[new_key] = item
# step 2: load from checkpoint
model.load_state_dict(new_state_dict, strict=False)
def load_from_state_dict_img_only(model, state_dict):
"""Loads the model weights from the state dictionary."""
# step 1: filter state dict
model_keys_prefixes = []
for okey, oitem in model.state_dict().items():
model_keys_prefixes.append(okey.split(".")[0])
new_state_dict = {}
index = 0
for key, item in state_dict.items():
if (
(
key.startswith("resnet_simclr")
or key.startswith("resnet_simsiam")
or key.startswith("resnet_barlow_twins")
or key.startswith("resnet_byol")
or key.startswith("resnet_nnclr")
)
and "projection" not in key
and "prediction" not in key
and "momentum" not in key
):
# remove the "model." prefix from the state dict key
all_key_parts = [model_keys_prefixes[index]]
all_key_parts.extend(key.split(".")[3:])
index += 1
new_key = ".".join(all_key_parts)
if new_key in model.state_dict():
new_state_dict[new_key] = item
# step 2: load from checkpoint
model.load_state_dict(new_state_dict, strict=False)
class Model(pl.LightningModule):
def __init__(
self,
n_output,
loss_fct,
base_model=models.resnet50,
seg_model_name="fcn", # can be "fcn" or "deeplabv3" or "unet"
pretrained=True,
lr=1e-3,
total_steps=0,
set_scheduler="none",
opt_method="adam",
opt_param=dict(),
):
super().__init__()
self.lr = lr
self.total_steps = total_steps
self.loss_fct = loss_fct
self.set_scheduler = set_scheduler
if CHECKPOINT_PATH is None:
backbone = base_model(pretrained=pretrained)
else:
backbone = base_model(pretrained=pretrained)
state_dict = torch.load(CHECKPOINT_PATH, map_location=DEVICE)
if (
"simclr" in CHECKPOINT_PATH
or "byol" in CHECKPOINT_PATH
or "barlow" in CHECKPOINT_PATH
or "simsiam" in CHECKPOINT_PATH
or "nnclr" in CHECKPOINT_PATH
):
load_from_state_dict_img_only(backbone, state_dict["state_dict"])
elif "supervised" in CHECKPOINT_PATH:
if "state_dict" in state_dict:
load_from_state_dict_supervised(backbone, state_dict["state_dict"])
else:
load_from_state_dict_supervised(backbone, state_dict)
else:
if "state_dict" in state_dict:
load_from_state_dict_gen_img(backbone, state_dict["state_dict"])
else:
load_from_state_dict_gen_img(backbone, state_dict)
if warmup_epochs > 0 and CHECKPOINT_PATH is not None:
for param in backbone.parameters():
param.requires_grad = False
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
out_layer = "layer4"
out_inplanes = 2048
return_layers = {out_layer: "out"}
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model_map = {
"deeplabv3": (DeepLabHead, DeepLabV3),
"fcn": (FCNHead, FCN),
}
classifier = model_map[seg_model_name][0](out_inplanes, n_output)
base_model = model_map[seg_model_name][1]
self.model = base_model(backbone, classifier, aux_classifier=None)
else:
self.model = UNetWithResnet50Encoder(backbone, n_classes=n_output)
self.opt_method = opt_method
self.opt_param = opt_param
self.labels = []
self.preds = []
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
if self.opt_method == "adam":
optimizer = optim.Adam(self.parameters(), lr=self.lr, **self.opt_param)
elif self.opt_method == "sgd":
optimizer = optim.SGD(self.parameters(), lr=self.lr, **self.opt_param)
else:
raise NotImplementedError(
f"optimization method {self.opt_method} not set up"
)
if self.set_scheduler == "none":
return optimizer
elif self.set_scheduler == "steplr":
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
elif self.set_scheduler == "onecycle":
scheduler = optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.lr,
total_steps=self.total_steps,
)
elif self.set_scheduler == "reduceplat":
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
return {
"optimizer": optimizer,
"scheduler": scheduler,
"monitor": "valid_loss",
}
return [optimizer], [scheduler]
def on_train_epoch_start(self) -> None:
if warmup_epochs > 0 and self.current_epoch == warmup_epochs:
if CHECKPOINT_PATH is not None:
for param in self.parameters():
param.requires_grad = True
self.trainer.optimizers[0] = optim.Adam(
self.parameters(), lr=self.lr / 10, **self.opt_param
)
def training_step(self, batch, idx):
x, y = batch
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
y_hat = self(x)["out"]
else:
y_hat = self(x)
loss = self.loss_fct(y_hat, y)
self.log("train_loss", loss, on_epoch=True)
return loss
def validation_step(self, batch, idx):
x, y = batch
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
y_hat = self(x)["out"]
else:
y_hat = self(x)
loss = self.loss_fct(y_hat, y)
y_np = y.detach().cpu().numpy()
y_hat_np = F.sigmoid(y_hat).detach().cpu().numpy()
self.store_predictions_labels(y_np, y_hat_np)
self.log("valid_loss", loss, on_epoch=True, prog_bar=True)
if idx == 0:
self.display_batch_imgs(x, y_hat_np, y_np, title="val images")
return loss
def test_step(self, batch, idx):
x, y = batch
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
y_hat = self(x)["out"]
else:
y_hat = self(x)
loss = self.loss_fct(y_hat, y)
y_np = y.detach().cpu().numpy()
y_hat_np = F.sigmoid(y_hat).detach().cpu().numpy()
self.store_predictions_labels(y_np, y_hat_np)
self.log("test_loss", loss, on_epoch=True, prog_bar=True)
self.display_batch_imgs(x, y_hat_np, y_np, title="test images")
return loss
def on_validation_epoch_end(self) -> None:
y = np.concatenate(self.labels).ravel()
y_hat = np.concatenate(self.preds).ravel()
self.log(
"valid_dice",
dice(y, y_hat),
)
self.labels = []
self.preds = []
def on_test_epoch_end(self) -> None:
y = np.concatenate(self.labels).ravel()
y_hat = np.concatenate(self.preds).ravel()
self.log(
"test_dice",
dice(y, y_hat),
)
self.labels = []
self.preds = []
def store_predictions_labels(self, y, y_hat):
self.labels.append(y)
self.preds.append(y_hat)
def display_batch_imgs(self, x, y_hat_np, y_np, title="val images"):
mask_list = []
for original_image, true_mask, prediction_mask in zip(x, y_np, y_hat_np):
mask_list.append(
wandb.Image(
original_image.cpu(),
masks={
"prediction": {
"mask_data": np.argmax(prediction_mask, axis=0),
"class_labels": {0: "background", 1: "foreground"},
},
"ground truth": {
"mask_data": np.argmax(true_mask, axis=0),
"class_labels": {0: "background", 1: "foreground"},
},
},
)
)
self.logger.experiment.log({title: mask_list})
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
# comment out if your model contains a sigmoid or equivalent activation layer
inputs = F.sigmoid(inputs)
# flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)
return 1 - dice
loaders = get_palm_loaders(**loader_param)
tl, vl, ttl = loaders
bce_fn = torch.nn.BCEWithLogitsLoss()
dice_fn = DiceLoss()
def loss_fn(y_pred, y_true):
bce = bce_fn(y_pred, y_true)
dice = dice_fn(y_pred, y_true)
return bce_weight * bce + dice_weight * dice
use_sch = set_scheduler != "none"
total_steps = epochs * len(tl) if use_sch else 0
model = (
Model(
n_classes,
loss_fct=loss_fn,
base_model=basemodel,
lr=lr,
total_steps=total_steps,
pretrained=pretrained_imagenet,
set_scheduler=set_scheduler,
opt_method=optimizer,
opt_param=optimizer_dict,
seg_model_name=seg_model_name,
)
.cuda()
.train()
)
logger = None
if WANDB:
logger = WandbLogger(project="PALM_myopia_segmentation")
params = {
"epochs": epochs,
"train_pct": train_pct,
"lr": lr,
"scheduler": set_scheduler,
"base_model": basemodel.__name__,
"img_size": tl.dataset[0][0].shape[-1],
"bs": tl.batch_size,
"accumulate_grad_batches": accumulate_grad_batches,
"seg_model_name": seg_model_name,
}
logger.log_hyperparams(params)
trainer = pl.Trainer(
gpus=1,
deterministic=True,
max_epochs=epochs,
logger=logger if WANDB else True,
accumulate_grad_batches=accumulate_grad_batches,
callbacks=[
ModelCheckpoint(
monitor="valid_loss",
filename="model-{epoch:02d}-{valid_dice:.2f}",
save_top_k=1,
),
],
)
trainer.validate(model, dataloaders=vl)
trainer.fit(model, tl, vl)
result = trainer.test(dataloaders=ttl, ckpt_path="best")
|
py
|
1a584956d3a6d92aad9616c23f95aa8c634aa6f5
|
frase = 'Curso em Vídeo Python'
frase2 = ' Curso em Vídeo Python '
#FATIAMENTO
f = 'FATIAMENTO'
print(f'{f:-^90}')
print(frase) #escrever o objeto.
print(frase[3]) #apenas o caracter n°.
print(frase[3:12]) #do caracter n°3 até o n°12.
print(frase[:13]) #inicio não definido até o n°13.
print(frase[13:]) #do caracter n°13 até um final não definido.
print(frase[0:15]) #do caracter n°0 até o n°15.
print(frase[:15]) #inicio não definido até o n°15.
print(frase[::2]) #inicio e final não definido pulando de 2 em 2.
#ANÁLISE
a = 'ANÁLISE'
print(f'{a:-^90}')
len(frase) #contador de caracteres, começando do 0.
len(frase2) #espaços contam caracteres.
print(len(frase)) #mostrar na tela o valor contado.
print(frase.count('o')) #contagem de quantos 'o' o objeto 'frase' possui.
print(frase.find('deo')) #posição do caracter que começa a palavra 'deo'.
print(frase.find('Android')) #string não existe, não há nenhum android no objeto.
print('Curso' in frase) #Curso está na frase? True.
#TRANSFORMAÇÃO
n = 'TRANSFORMAÇÃO'
print(f'{n:-^90}')
print(frase.replace('Python', 'Android')) #Substitui Python por Android
print(frase.upper()) #Mantém os caracteres que estão em M e transforma o resto em M.
print(frase.lower()) #Mantém os caracteres que estão em m e transforma o resto em m.
print(frase.capitalize()) #Primeiro algarismo em Maiúsculo e o resto em minúsculo.
print(frase.title()) #Todas as palavras começam com letra maiúsculas.
print(frase.strip()) #Remove espaços desnecessarios da direita e esquerda.
print(frase2.rstrip()) #remove espaços desnecessários da direita.
print(frase2.lstrip()) #remove espaços desnecessários da esquerda.
#DIVISÃO
d = 'DIVISÃO'
dividido = frase.split()
print(f'{d:-^90}')
print(frase.split()) #Divide cada palavra em um novo agrupamento de caracteres.
print(dividido) #novo objeto com 'frase' dividida, split aplicada.
print(dividido[0]) #novos agrupamentos, mostrar o '0'.
print(dividido[2]) #novos agrupamentos, mostrar o '2'.
print(dividido[2][3]) #novos agrupamentos, mostrar o '2' com o '3°' caracter.
#JUNÇÃO
j = 'JUNÇÃO'
print(f'{j:-^90}')
print('-'.join(frase)) #Junta todos os agrupamentos dividindo as palavras com '-'.
#COMBINANDO FUNÇÕES A OBJETOS
c = 'COMBINANDO FUNÇÕES A OBJETOS'
print(f'{c:-^90}')
print(frase.upper().count('O')) #Conta 'O', com a frase transformada(Maiúsc).
print(frase.lower().find('vídeo')) #Conta 'vídeo', com a frase transformada(Minúsc).
|
py
|
1a58497125bef73eeb58cbfb98b2cdbf2930e8c4
|
"""Base Factory module."""
import factory
# from factory import alchemy
# from faker import Faker as RealFaker
# from faker.providers import internet, file, person, lorem, BaseProvider
from ultron8.api.db.u_sqlite.session import db_session
# # pylint: disable=invalid-name
# # class PackNameProvider(BaseProvider):
# # def pack_name = (
# # "ultron8", "nimrod", "jarvis", "friday", "eva", "adam"
# # )
# pack_name = (
# "ultron8", "nimrod", "jarvis", "friday", "eva", "adam"
# )
# pack_name_list = [
# "ultron8", "nimrod", "jarvis", "friday", "eva", "adam"
# ]
# fake = RealFaker()
# fake.add_provider(internet)
# fake.add_provider(file)
# fake.add_provider(person)
# fake.add_provider(lorem)
# # fake.add_provider(PackNameProvider)
# NOTE: https://factoryboy.readthedocs.io/en/latest/reference.html?highlight=abstract#factory.FactoryOptions.abstract
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
# This attribute indicates that the Factory subclass should not be used to generate objects, but instead provides some extra defaults.
abstract = True
# SQLAlchemy session to use to communicate with the database when creating an object through this SQLAlchemyModelFactory.
sqlalchemy_session = db_session
# Control the action taken by sqlalchemy session at the end of a create call.
sqlalchemy_session_persistence = "commit"
|
py
|
1a584a07ae78a44be6868506c59a065b8e5508ca
|
############################################################################## Setup
"""
1D Bayesian Optimization Test:
(1) Gemerate 1D objective.
(2) Initialize with data.
(3) Test predictions, variance estimation, and sampling.
(4) Run single iteration of each acquisition function.
"""
# Imports
import numpy as np
import pandas as pd
from edbo.bro import BO_express
from edbo.pd_utils import to_torch, torch_to_numpy
import matplotlib.pyplot as plt
import random
############################################################################## Test Functions
# Objective
def random_result(*kwargs):
"""Random objective."""
return round(random.random(),3) * 100
# Test a precomputed objective
def BO_pred(acq_func, plot=False, return_='pred', append=False, init='rand'):
# Define reaction space and auto-encode
n_ligands = random.sample([3,4,5,6,7,8], 1)[0]
ligands = pd.read_csv('ligands.csv').sample(n_ligands).values.flatten()
bases = ['DBU', 'MTBD', 'potassium carbonate', 'potassium phosphate', 'potassium tert-butoxide']
reaction_components={'aryl_halide':['chlorobenzene','iodobenzene','bromobenzene'],
'base':bases,
'solvent':['THF', 'Toluene', 'DMSO', 'DMAc'],
'ligand':ligands,
'concentration':[0.1, 0.2, 0.3],
'temperature': [20, 30, 40]
}
encoding={
'aryl_halide':'resolve',
'base':'resolve',
'solvent':'resolve',
'ligand':'mordred',
'concentration':'numeric',
'temperature':'numeric'}
# Instatiate BO class
bo = BO_express(reaction_components=reaction_components,
encoding=encoding,
acquisition_function=acq_func,
init_method=init,
batch_size=random.sample(range(30),1)[0],
computational_objective=random_result,
target='yield')
bo.init_sample(append=True)
bo.run(append=append)
bo.save()
bo = BO_express()
bo.load()
# Check prediction
if return_ == 'pred':
try:
bo.model.predict(to_torch(bo.obj.domain)) # torch.tensor
bo.model.predict(bo.obj.domain.values) # numpy.array
bo.model.predict(list(bo.obj.domain.values)) # list
bo.model.predict(bo.obj.domain) # pandas.DataFrame
except:
return False
return True
# Check predictive postrior variance
elif return_ == 'var':
try:
bo.model.predict(to_torch(bo.obj.domain)) # torch.tensor
bo.model.predict(bo.obj.domain.values) # numpy.array
bo.model.predict(list(bo.obj.domain.values)) # list
bo.model.predict(bo.obj.domain) # pandas.DataFrame
except:
return False
return True
# Make sure sampling works with tensors, arrays, lists, and DataFrames
elif return_ == 'sample':
try:
bo.model.sample_posterior(to_torch(bo.obj.domain)) # torch.tensor
bo.model.sample_posterior(bo.obj.domain.values) # numpy.array
bo.model.sample_posterior(list(bo.obj.domain.values)) # list
bo.model.sample_posterior(bo.obj.domain) # pandas.DataFrame
return True
except:
return False
# Plot model
elif return_ == 'plot':
mean = bo.obj.scaler.unstandardize(bo.model.predict(bo.obj.domain))
std = np.sqrt(bo.model.variance(bo.obj.domain)) * bo.obj.scaler.std * 2
samples = bo.obj.scaler.unstandardize(bo.model.sample_posterior(bo.obj.domain, batch_size=3))
plt.figure(1, figsize=(6,6))
# Model mean and standard deviation
plt.subplot(211)
plt.plot(range(len(mean)), mean, label='GP')
plt.fill_between(range(len(mean)), mean-std, mean+std, alpha=0.4)
# Known results and next selected point
plt.scatter(bo.obj.results_input().index.values, bo.obj.results_input()['yield'], color='black', label='known')
plt.ylabel('f(x)')
# Samples
plt.subplot(212)
for sample in samples:
plt.plot(range(len(mean)), torch_to_numpy(sample))
plt.xlabel('x')
plt.ylabel('Posterior Samples')
plt.show()
return True
elif return_ == 'simulate':
if init != 'external':
bo.init_seq.batch_size = random.sample([2,3,4,5,6,7,8,9,10],1)[0]
bo.simulate(iterations=3)
bo.plot_convergence()
bo.model.regression()
return True
############################################################################## Tests
# Test predicted mean and variance, sampling, and ploting
def test_BO_pred_mean_TS():
assert BO_pred('TS', return_='pred')
def test_BO_var():
assert BO_pred('TS', return_='var')
def test_BO_sample():
assert BO_pred('TS', return_='sample')
def test_BO_plot():
assert BO_pred('TS', return_='plot')
# Test simulations
def test_BO_simulate_TS():
assert BO_pred('TS', return_='simulate')
def test_BO_simulate_EI():
assert BO_pred('EI', return_='simulate')
|
py
|
1a584aa9848ca731559911a2b81d2bc5e69c6e18
|
"""I/O for UCSC Browser Extensible Data (BED)."""
from __future__ import absolute_import, division, print_function
from builtins import map, next
import shlex
import pandas as pd
from Bio.File import as_handle
from .util import report_bad_line
def read_bed(infile):
"""UCSC Browser Extensible Data (BED) format.
A BED file has these columns:
chromosome, start position, end position, [gene, strand, other stuff...]
Coordinate indexing is from 0.
Sets of regions are separated by "track" lines. This function stops reading
after encountering a track line other than the first one in the file.
"""
# ENH: just pd.read_table, skip 'track'
@report_bad_line
def _parse_line(line):
fields = line.split('\t', 6)
chrom, start, end = fields[:3]
gene = (fields[3].rstrip()
if len(fields) >= 4 else '-')
strand = (fields[5].rstrip()
if len(fields) >= 6 else '.')
return chrom, int(start), int(end), gene, strand
def track2track(handle):
try:
firstline = next(handle)
if firstline.startswith("browser "):
# UCSC Genome Browser feature -- ignore it
firstline = next(handle)
except StopIteration:
pass
else:
if not firstline.startswith("track"):
yield firstline
for line in handle:
if line.startswith("track"):
break
yield line
with as_handle(infile, 'rU') as handle:
rows = map(_parse_line, track2track(handle))
return pd.DataFrame.from_records(rows, columns=["chromosome", "start",
"end", "gene", "strand"])
def read_bed3(infile):
"""3-column BED format: chromosome, start, end."""
table = read_bed(infile)
return table.loc[:, ['chromosome', 'start', 'end']]
def read_bed4(infile):
"""4-column BED format: chromosome, start, end, name."""
table = read_bed(infile)
return table.loc[:, ['chromosome', 'start', 'end', 'gene']]
def read_bed6(infile):
"""6-column BED format: chromosome, start, end, name, score, strand."""
return NotImplemented
def parse_bed_track(line):
"""Parse the "name" field of a BED track definition line.
Example:
track name=146793_BastianLabv2_P2_target_region description="146793_BastianLabv2_P2_target_region"
"""
fields = shlex.split(line) # raises ValueError if line is corrupted
assert fields[0] == 'track'
for field in fields[1:]:
if '=' in field:
key, val = field.split('=', 1)
if key == 'name':
return val
raise ValueError("No name defined for this track")
def group_bed_tracks(bedfile):
"""Group the parsed rows in a BED file by track.
Yields (track_name, iterable_of_lines), much like itertools.groupby.
"""
# ENH - make this memory-efficient w/ generators or something
with as_handle(bedfile, 'r') as handle:
curr_track = 'DEFAULT'
curr_lines = []
for line in handle:
if line.startswith('track'):
if curr_lines:
yield curr_track, curr_lines
curr_lines = []
curr_track = parse_bed_track(line)
else:
curr_lines.append(line)
yield curr_track, curr_lines
# _____________________________________________________________________
def write_bed(dframe):
if len(dframe.columns) == 3:
return write_bed3(dframe)
elif len(dframe.columns) == 3:
return write_bed4(dframe)
else:
# Default: BED-like, keep all trailing columns
return dframe
def write_bed3(dframe):
return dframe.loc[:, ["chromosome", "start", "end"]]
def write_bed4(dframe):
dframe = dframe.copy()
if "gene" not in dframe:
dframe["gene"] = '-'
return dframe.loc[:, ["chromosome", "start", "end", "gene"]]
|
py
|
1a584afb7093c5360e5e94f409aa16e77a8d705b
|
from __future__ import absolute_import, division, print_function
from xfel.ui import settings_dir
from xfel.ui.db import db_proxy, get_run_path
import os, shutil
known_job_statuses = ["DONE", "ERR", "PEND", "RUN", "SUSP", "PSUSP", "SSUSP", "UNKWN", "EXIT", "DONE", "ZOMBI", "DELETED", "SUBMIT_FAIL", "SUBMITTED", "HOLD"]
finished_job_statuses = ["DONE", "EXIT", "DELETED", "UNKWN", "ERR", "SUBMIT_FAIL"]
class JobFactory(object):
@staticmethod
def from_job(job):
if job.task_id is None:
return IndexingJob(job.app, job.id, **job._db_dict)
task = job.app.get_task(job.task_id)
if task.type == "indexing":
return IndexingJob(job.app, job.id, **job._db_dict)
if task.type == "ensemble_refinement":
return EnsembleRefinementJob(job.app, job.id, **job._db_dict)
if task.type == "scaling":
return ScalingJob(job.app, job.id, **job._db_dict)
if task.type == "merging":
return MergingJob(job.app, job.id, **job._db_dict)
@staticmethod
def from_args(app, job_id = None, **kwargs):
return JobFactory.from_job(Job(app, job_id, **kwargs))
class Job(db_proxy):
def __init__(self, app, job_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_job" % app.params.experiment_tag, id = job_id, **kwargs)
self.job_id = self.id
self._run = None
self._rungroup = None
self._trial = None
self._task = None
self._dataset = None
self._dataset_version = None
def __getattr__(self, name):
# Called only if the property cannot be found
if name in ["run", "rungroup", "trial", "task", "dataset", "dataset_version"]:
_name = "_" + name
name_id = name + "_id"
if getattr(self, _name) is None:
if name == "dataset_version":
if self.dataset_id is not None:
self._dataset_version = self.dataset.latest_version # todo bug fix: add this to get_all_jobs
elif getattr(self, name_id) is not None:
setattr(self, _name, getattr(self.app, "get_" + name)(**{name_id:self.trial_id}))
return getattr(self, _name)
elif name == "scope":
return task_scope[task_types.index(self.type)]
else:
return super(Job, self).__getattr__(name)
def __setattr__(self, name, value):
if name in ["run", "rungroup", "trial", "task", "dataset", "dataset_version"]:
setattr(self, "_"+name, value)
else:
super(Job, self).__setattr__(name, value)
def get_log_path(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
return os.path.join(run_path, "stdout", "log.out")
def submit(self, previous_job = None):
raise NotImplementedError("Override me!")
def delete(self, output_only=False):
raise NotImplementedError("Override me!")
def get_output_files(self):
# Retrun folder and experiment and reflection table suffixes
raise NotImplementedError("Override me!")
def remove_from_db(self):
assert self.status == "DELETED"
print("Removing job %d from the db"%self.id, end=' ')
tag = self.app.params.experiment_tag
query = """DELETE job FROM `%s_job` job
WHERE job.id = %d""" % (
tag, self.id)
cursor = self.app.execute_query(query, commit=True)
print("(%d)"%cursor.rowcount)
def get_identifier_string(self):
if self.app.params.facility.name == 'lcls':
s = "%s_%s_r%04d_t%03d_rg%03d"% \
(self.app.params.facility.lcls.experiment, self.app.params.experiment_tag, int(self.run.run), self.trial.trial, self.rungroup.id)
else:
s = "%s_%s_t%03d_rg%03d"% \
(self.app.params.experiment_tag, self.run.run, self.trial.trial, self.rungroup.id)
if self.task is not None:
s += "_task%03d"%self.task.id
return s
class IndexingJob(Job):
def get_output_files(self):
run_path = str(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run))
return os.path.join(run_path, 'out'), '_integrated.expt', '_integrated.refl'
def submit(self, previous_job = None):
import libtbx.load_env
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
dispatcher = self.app.params.dispatcher
phil_str = self.trial.target_phil_str
if phil_str is None: phil_str = ""
if self.rungroup.extra_phil_str is not None:
phil_str += "\n" + self.rungroup.extra_phil_str
from xfel.ui import load_phil_scope_from_dispatcher
if dispatcher == "cxi.xtc_process":
image_format = 'pickle'
else:
orig_phil_scope = load_phil_scope_from_dispatcher(dispatcher)
if os.path.isfile(dispatcher):
dispatcher = 'libtbx.python ' + dispatcher
from iotbx.phil import parse
if self.rungroup.two_theta_low is not None or self.rungroup.two_theta_high is not None:
override_str = """
radial_average {
enable = True
show_plots = False
verbose = False
output_bins = False
}
"""
phil_scope = orig_phil_scope.fetch(parse(override_str))
else:
phil_scope = orig_phil_scope
trial_params = phil_scope.fetch(parse(phil_str)).extract()
image_format = self.rungroup.format
if image_format == 'cbf':
if "rayonix" in self.rungroup.detector_address.lower():
mode = "rayonix"
elif "cspad" in self.rungroup.detector_address.lower():
mode = "cspad"
elif "jungfrau" in self.rungroup.detector_address.lower():
mode = "jungfrau"
else:
mode = "other"
if hasattr(trial_params, 'format'):
trial_params.format.file_format = image_format
trial_params.format.cbf.mode = mode
if self.rungroup.calib_dir is not None or self.rungroup.config_str is not None or dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
config_path = os.path.join(configs_dir, identifier_string + ".cfg")
else:
config_path = None
if hasattr(trial_params.dispatch, 'process_percent'):
trial_params.dispatch.process_percent = self.trial.process_percent
# Dictionary for formating the submit phil and, if used, the labelit cfg file
d = dict(
# Generally for the LABELIT backend or image pickles
address = self.rungroup.detector_address,
default_calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0"),
dark_avg_path = self.rungroup.dark_avg_path,
dark_stddev_path = self.rungroup.dark_stddev_path,
untrusted_pixel_mask_path = self.rungroup.untrusted_pixel_mask_path,
detz_parameter = self.rungroup.detz_parameter,
gain_map_path = self.rungroup.gain_map_path,
gain_mask_level = self.rungroup.gain_mask_level,
beamx = self.rungroup.beamx,
beamy = self.rungroup.beamy,
energy = self.rungroup.energy,
binning = self.rungroup.binning,
two_theta_low = self.rungroup.two_theta_low,
two_theta_high = self.rungroup.two_theta_high,
# Generally for job submission
dry_run = self.app.params.dry_run,
dispatcher = dispatcher,
cfg = config_path,
experiment = self.app.params.facility.lcls.experiment, # LCLS specific parameter
run_num = self.run.run,
output_dir = self.app.params.output_folder,
use_ffb = self.app.params.facility.lcls.use_ffb, # LCLS specific parameter
# Generally for both
trial = self.trial.trial,
rungroup = self.rungroup.rungroup_id,
experiment_tag = self.app.params.experiment_tag,
calib_dir = self.rungroup.calib_dir,
nproc = self.app.params.mp.nproc,
nproc_per_node = self.app.params.mp.nproc_per_node,
queue = self.app.params.mp.queue or None,
env_script = self.app.params.mp.env_script[0] if self.app.params.mp.env_script is not None and len(self.app.params.mp.env_script) > 0 and len(self.app.params.mp.env_script[0]) > 0 else None,
method = self.app.params.mp.method,
htcondor_executable_path = self.app.params.mp.htcondor.executable_path,
target = target_phil_path,
host = self.app.params.db.host,
dbname = self.app.params.db.name,
user = self.app.params.db.user,
port = self.app.params.db.port,
# always use mpi for 'lcls'
use_mpi = self.app.params.mp.method != 'local' or (self.app.params.mp.method == 'local' and self.app.params.facility.name == 'lcls')
)
if self.app.params.db.password is not None and len(self.app.params.db.password) == 0:
d['password'] = None
else:
d['password'] = self.app.params.db.password
phil = open(target_phil_path, "w")
if dispatcher == 'cxi.xtc_process':
phil.write(phil_str)
else:
extra_scope = None
if hasattr(trial_params, 'format'):
if image_format == "cbf":
trial_params.input.address = self.rungroup.detector_address
trial_params.format.cbf.detz_offset = self.rungroup.detz_parameter
trial_params.format.cbf.override_energy = self.rungroup.energy
trial_params.format.cbf.invalid_pixel_mask = self.rungroup.untrusted_pixel_mask_path
if mode == 'cspad':
trial_params.format.cbf.cspad.gain_mask_value = self.rungroup.gain_mask_level
elif mode == 'rayonix':
trial_params.format.cbf.rayonix.bin_size = self.rungroup.binning
trial_params.format.cbf.rayonix.override_beam_x = self.rungroup.beamx
trial_params.format.cbf.rayonix.override_beam_y = self.rungroup.beamy
if trial_params.input.known_orientations_folder is not None:
trial_params.input.known_orientations_folder = trial_params.input.known_orientations_folder.format(run=self.run.run)
else:
if trial_params.spotfinder.lookup.mask is None:
trial_params.spotfinder.lookup.mask = self.rungroup.untrusted_pixel_mask_path
if trial_params.integration.lookup.mask is None:
trial_params.integration.lookup.mask = self.rungroup.untrusted_pixel_mask_path
if self.app.params.facility.name == 'lcls':
locator_path = os.path.join(configs_dir, identifier_string + ".loc")
locator = open(locator_path, 'w')
locator.write("experiment=%s\n"%self.app.params.facility.lcls.experiment) # LCLS specific parameter
locator.write("run=%s\n"%self.run.run)
locator.write("detector_address=%s\n"%self.rungroup.detector_address)
if self.rungroup.wavelength_offset:
locator.write("wavelength_offset=%s\n"%self.rungroup.wavelength_offset)
if self.app.params.facility.lcls.use_ffb:
locator.write("use_ffb=True\n")
if image_format == "cbf":
if mode == 'rayonix':
from xfel.cxi.cspad_ana import rayonix_tbx
pixel_size = rayonix_tbx.get_rayonix_pixel_size(self.rungroup.binning)
extra_scope = parse("geometry { detector { panel { origin = (%f, %f, %f) } } }"%(-self.rungroup.beamx * pixel_size,
self.rungroup.beamy * pixel_size,
-self.rungroup.detz_parameter))
locator.write("rayonix.bin_size=%s\n"%self.rungroup.binning)
elif mode == 'cspad':
locator.write("cspad.detz_offset=%s\n"%self.rungroup.detz_parameter)
locator.close()
d['locator'] = locator_path
else:
d['locator'] = None
if self.rungroup.two_theta_low is not None or self.rungroup.two_theta_high is not None:
try:
trial_params.radial_average.two_theta_low = self.rungroup.two_theta_low
trial_params.radial_average.two_theta_high = self.rungroup.two_theta_high
except AttributeError:
pass # not all dispatchers support radial averaging
working_phil = phil_scope.format(python_object=trial_params)
if extra_scope:
working_phil = working_phil.fetch(extra_scope)
diff_phil = orig_phil_scope.fetch_diff(source=working_phil)
phil.write(diff_phil.as_str())
phil.close()
if config_path is not None:
if dispatcher != 'cxi.xtc_process':
d['untrusted_pixel_mask_path'] = None # Don't pass a pixel mask to mod_image_dict as it will
# will be used during dials processing directly
config_str = "[psana]\n"
if self.rungroup.calib_dir is not None:
config_str += "calib-dir=%s\n"%self.rungroup.calib_dir
modules = []
if self.rungroup.config_str is not None:
for line in self.rungroup.config_str.split("\n"):
if line.startswith('['):
modules.append(line.lstrip('[').rstrip(']'))
if dispatcher == 'cxi.xtc_process':
modules.insert(0, 'my_ana_pkg.mod_radial_average')
modules.extend(['my_ana_pkg.mod_hitfind:index','my_ana_pkg.mod_dump:index'])
elif image_format == 'pickle':
modules.insert(0, 'my_ana_pkg.mod_radial_average')
modules.extend(['my_ana_pkg.mod_image_dict'])
if self.app.params.facility.lcls.dump_shots:
modules.insert(0, 'my_ana_pkg.mod_dump:shot')
if len(modules) > 0:
config_str += "modules = %s\n"%(" ".join(modules))
if self.rungroup.config_str is not None:
config_str += self.rungroup.config_str + "\n"
if dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
d['address'] = d['address'].replace('.','-').replace(':','|') # old style address
if dispatcher == 'cxi.xtc_process':
template = open(os.path.join(libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "index_all.cfg"))
elif image_format == 'pickle':
template = open(os.path.join(libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "image_dict.cfg"))
for line in template.readlines():
config_str += line.format(**d)
template.close()
d['address'] = self.rungroup.detector_address
cfg = open(config_path, 'w')
cfg.write(config_str)
cfg.close()
if dispatcher != 'cxi.xtc_process':
d['untrusted_pixel_mask_path'] = self.rungroup.untrusted_pixel_mask_path
submit_phil_path = os.path.join(configs_dir, identifier_string + "_submit.phil")
submit_root = libtbx.env.find_in_repositories("xfel/ui/db/cfgs")
if dispatcher in ['cxi.xtc_process', 'cctbx.xfel.xtc_process']:
template = open(os.path.join(submit_root, "submit_xtc_process.phil"))
else:
test_root = os.path.join(submit_root, "submit_" + dispatcher + ".phil")
if os.path.exists(test_root):
template = open(test_root)
else:
if hasattr(trial_params, 'format'):
template = open(os.path.join(submit_root, "submit_xtc_process.phil"))
else:
template = open(os.path.join(submit_root, "submit_xfel_process.phil"))
phil = open(submit_phil_path, "w")
if dispatcher == 'cxi.xtc_process':
d['target'] = None # any target phil will be in mod_hitfind
for line in template.readlines():
phil.write(line.format(**d))
d['target'] = target_phil_path
template.close()
phil.close()
from xfel.command_line.cxi_mpi_submit import Script as submit_script
args = [submit_phil_path]
if self.app.params.facility.name not in ['lcls']:
args.append(self.run.path)
return submit_script().run(args)
def delete(self, output_only=False):
if self.status not in finished_job_statuses:
print("Job is not finished (status = %s)"%self.status)
return
if self.status == "DELETED":
return
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
# Have to be careful to delete from the tables in the right order
tag = self.app.params.experiment_tag
def delete_and_commit(query):
cursor = self.app.execute_query(query, commit=True)
print("(%d)"%cursor.rowcount)
print("Deleting cell_bin entries", end=' ')
query = """DELETE cell_bin FROM `%s_cell_bin` cell_bin
JOIN `%s_crystal` crystal ON crystal.id = cell_bin.crystal_id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
ids = {}
for item in "crystal", "beam", "detector":
print("Listing %s ids"%item, end=' ')
query = """SELECT %s.id FROM `%s_%s` %s
JOIN `%s_experiment` expr ON expr.%s_id = %s.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
item, tag, item, item, tag, item, item, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
ids[item] = ",".join(item_ids)
if len(self.trial.isoforms) == 0:
print("Listing bin entries", end=' ')
query = """SELECT bin.id FROM `%s_bin` bin
JOIN `%s_cell` cell ON bin.cell_id = cell.id
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id is NULL""" % (
tag, tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
bin_ids = ",".join(item_ids)
print("Listing cell entries", end=' ')
query = """SELECT cell.id FROM `%s_cell` cell
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id IS NULL""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
cell_ids = ",".join(item_ids)
print("Deleting experiment entries", end=' ')
query = """DELETE expr FROM `%s_experiment` expr
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
for item in "crystal", "beam", "detector":
if len(ids[item]) > 0:
print("Deleting %s entries"%item, end=' ')
query = """DELETE %s FROM `%s_%s` %s
WHERE %s.id IN (%s)""" % (
item, tag, item, item, item, ids[item])
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(bin_ids) > 0:
print("Deleting bin entries", end=' ')
query = """DELETE bin FROM `%s_bin` bin
WHERE bin.id IN (%s)""" % (
tag, bin_ids)
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(cell_ids) > 0:
print("Deleting cell entries", end=' ')
query = """DELETE cell FROM `%s_cell` cell
WHERE cell.id IN (%s)""" % (
tag, cell_ids)
delete_and_commit(query)
print("Listing imageset entries", end=' ')
query = """SELECT imgset.id FROM `%s_imageset` imgset
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
imageset_ids = ",".join(item_ids)
print("Deleting imageset_event entries", end=' ')
query = """DELETE is_e FROM `%s_imageset_event` is_e
JOIN `%s_event` evt ON evt.id = is_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
if len(imageset_ids) > 0:
print("Deleting imageset entries", end=' ')
query = """DELETE imgset FROM `%s_imageset` imgset
WHERE imgset.id IN (%s)""" % (
tag, imageset_ids)
delete_and_commit(query)
print("Deleting event entries", end=' ')
query = """DELETE evt FROM `%s_event` evt
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
self.status = "DELETED"
class EnsembleRefinementJob(Job):
def delete(self, output_only=False):
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'combine_experiments_t%03d'%self.trial.trial, 'intermediates'), '_reintegrated.expt', '_reintegrated.refl'
def submit(self, previous_job = None):
from xfel.command_line.striping import Script
from xfel.command_line.cxi_mpi_submit import get_submission_id
from libtbx import easy_run
configs_dir = os.path.join(settings_dir, "cfgs")
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
with open(target_phil_path, 'w') as f:
if self.task.parameters:
f.write(self.task.parameters)
path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
os.mkdir(path)
arguments = """
mp.queue={}
mp.nproc={}
mp.nproc_per_node={}
mp.method={}
{}
mp.use_mpi=False
striping.results_dir={}
striping.trial={}
striping.rungroup={}
striping.run={}
{}
striping.chunk_size=3000
striping.stripe=False
striping.dry_run=True
striping.output_folder={}
reintegration.integration.lookup.mask={}
mp.local.include_mp_in_command=False
""".format(self.app.params.mp.queue if len(self.app.params.mp.queue) > 0 else None,
self.app.params.mp.nproc,
self.app.params.mp.nproc_per_node,
self.app.params.mp.method,
'\n'.join(['mp.env_script={}'.format(p) for p in self.app.params.mp.env_script if p]),
self.app.params.output_folder,
self.trial.trial,
self.rungroup.id,
self.run.run,
target_phil_path,
path,
self.rungroup.untrusted_pixel_mask_path,
).split()
commands = Script(arguments).run()
submission_ids = []
if self.app.params.mp.method == 'local':
self.status = "RUNNING"
for command in commands:
try:
result = easy_run.fully_buffered(command=command)
result.raise_if_errors()
except Exception as e:
if not "Warning: job being submitted without an AFS token." in str(e):
raise e
submission_ids.append(get_submission_id(result, self.app.params.mp.method))
if self.app.params.mp.method == 'local':
self.status = "DONE"
else:
return ",".join(submission_ids)
class ScalingJob(Job):
def delete(self, output_only=False):
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'out'), ".expt", ".refl"
def write_submit_phil(self, submit_phil_path, target_phil_path):
import libtbx.load_env
from xfel.ui.db.task import task_types, task_dispatchers
submit_root = libtbx.env.find_in_repositories("xfel/ui/db/cfgs")
d = dict(
dry_run = self.app.params.dry_run,
dispatcher = task_dispatchers[task_types.index(self.task.type)],
run_num = self.run.run,
output_dir = self.app.params.output_folder,
trial = self.trial.trial,
rungroup = self.rungroup.rungroup_id,
task = self.task.id,
nproc = self.app.params.mp.nproc,
nproc_per_node = self.app.params.mp.nproc_per_node,
queue = self.app.params.mp.queue or None,
env_script = self.app.params.mp.env_script[0] if len(self.app.params.mp.env_script) > 0 and len(self.app.params.mp.env_script[0]) > 0 else None,
method = self.app.params.mp.method,
htcondor_executable_path = self.app.params.mp.htcondor.executable_path,
target = target_phil_path,
# always use mpi for 'lcls'
use_mpi = self.app.params.mp.method != 'local' or (self.app.params.mp.method == 'local' and self.app.params.facility.name == 'lcls')
)
with open(submit_phil_path, "w") as phil:
for line in open(os.path.join(submit_root, "submit_xfel_merge.phil")).readlines():
phil.write(line.format(**d))
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import Script as submit_script
output_path = os.path.join(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task), 'out')
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
submit_phil_path = os.path.join(configs_dir, identifier_string + "_submit.phil")
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
input_folder, expt_suffix, refl_suffix = previous_job.get_output_files()
with open(target_phil_path, 'w') as f:
f.write("input.path=%s\n"%input_folder)
f.write("input.experiments_suffix=%s\n"%expt_suffix)
f.write("input.reflections_suffix=%s\n"%refl_suffix)
f.write("output.output_dir=%s\n"%output_path)
f.write("output.prefix=%s_%d\n"%(self.task.type, self.task.id))
f.write(self.task.parameters)
self.write_submit_phil(submit_phil_path, target_phil_path)
args = [submit_phil_path]
if self.app.params.facility.name not in ['lcls']:
args.append(self.run.path)
return submit_script().run(args)
class MergingJob(Job):
def get_global_path(self):
return self.dataset_version.output_path()
def get_log_path(self):
return self.get_global_path()
def get_identifier_string(self):
return "%s_%s%03d_v%03d"%(self.dataset.name, self.task.type, self.task.id, self.dataset_version.version)
def delete(self, output_only=False):
job_folder = self.get_global_path()
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
path = self.get_global_path()
return path, ".expt", ".refl"
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import do_submit
output_path = self.get_global_path()
if not os.path.exists(output_path):
os.makedirs(output_path)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(output_path, identifier_string + "_params.phil")
with open(target_phil_path, 'w') as f:
expt_suffix = refl_suffix = None
for job in self.dataset_version.jobs:
input_folder, _expt_suffix, _refl_suffix = job.get_output_files()
if expt_suffix is None: expt_suffix = _expt_suffix
else: assert expt_suffix == _expt_suffix
if refl_suffix is None: refl_suffix = _refl_suffix
else: assert refl_suffix == _refl_suffix
f.write("input.path=%s\n"%input_folder)
f.write("input.experiments_suffix=%s\n"%expt_suffix)
f.write("input.reflections_suffix=%s\n"%refl_suffix)
f.write("output.output_dir=%s\n"%output_path)
f.write("output.prefix=%s_v%03d\n"%(self.dataset.name, self.dataset_version.version))
f.write(self.task.parameters)
command = "cctbx.xfel.merge %s"%target_phil_path
submit_path = os.path.join(output_path, "submit.sh")
return do_submit(command, submit_path, output_path, self.app.params.mp, identifier_string)
# Support classes and functions for job submission
class _job(object):
"""Used to represent a job that may not have been submitted into the cluster or database yet"""
def __init__(self, trial, rungroup, run, task=None, dataset=None):
self.trial = trial
self.rungroup = rungroup
self.run = run
self.task = task
self.dataset = dataset
def __eq__(self, other):
ret = True
check = ['trial', 'rungroup', 'run', 'task']
if getattr(self, 'task') and self.task.scope == 'global':
check.append('dataset')
for subitem_name in check:
subitem = getattr(self, subitem_name)
other_subitem_id = getattr(other, subitem_name + '_id')
if subitem is None:
ret = ret and other_subitem_id is None
else:
ret = ret and subitem.id == other_subitem_id
return ret
def submit_all_jobs(app):
submitted_jobs = app.get_all_jobs()
if app.params.mp.method == 'local': # only run one job at a time
for job in submitted_jobs:
if job.status in ['RUN', 'UNKWN', 'SUBMITTED']: return
runs = app.get_all_runs()
trials = app.get_all_trials(only_active = True)
needed_jobs = []
for trial in trials:
for rungroup in trial.rungroups:
assert rungroup.active
for run in rungroup.runs:
needed_jobs.append(_job(trial, rungroup, run))
for job in needed_jobs:
if job in submitted_jobs:
continue
print("Submitting job: trial %d, rungroup %d, run %s"%(job.trial.trial, job.rungroup.id, job.run.run))
j = JobFactory.from_args(app,
trial_id = job.trial.id,
rungroup_id = job.rungroup.id,
run_id = job.run.id,
status = "SUBMITTED")
j.trial = job.trial; j.rungroup = job.rungroup; j.run = job.run
try:
j.submission_id = j.submit()
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
if app.params.mp.method == 'local': # only run one job at a time
return
datasets = app.get_all_datasets()
for dataset_idx, dataset in enumerate(datasets):
if not dataset.active: continue
# one of the tasks will have a trial, otherwise we don't know where to save the data
trial = None
for task in dataset.tasks:
if task.trial is not None:
if trial is None:
trial = task.trial
else:
assert trial.id == task.trial.id, "Found multiple trials, don't know where to save the results"
assert trial, "No trial found in task list, don't know where to save the results"
trial_tags_ids = [t.id for t in trial.tags]
dataset_tags = [t for t in dataset.tags if t.id in trial_tags_ids]
runs_rungroups = []
for rungroup in trial.rungroups:
for run in rungroup.runs:
run_tags_ids = [t.id for t in run.tags]
if dataset.tag_operator == "union":
if any([t.id in run_tags_ids for t in dataset_tags]):
runs_rungroups.append((run, rungroup))
elif dataset.tag_operator == "intersection":
if all([t.id in run_tags_ids for t in dataset_tags]):
runs_rungroups.append((run, rungroup))
else:
assert False
# Datasets always start with indexing
global_tasks = {}
for run, rungroup in runs_rungroups:
submit_next_task = False
last_task_status = ""
tasks = dataset.tasks
previous_job = None
for task_idx, task in enumerate(tasks):
if task.scope == 'global':
if previous_job.status in ["DONE", "EXIT"]:
key = (dataset_idx, task_idx)
if key not in global_tasks:
global_tasks[key] = []
global_tasks[key].append(previous_job)
continue
if task.type == 'indexing':
job = _job(trial, rungroup, run)
else:
job = _job(trial, rungroup, run, task)
try:
submitted_job = submitted_jobs[submitted_jobs.index(job)]
except ValueError:
if not submit_next_task:
print("Warning, expected to find submitted %s job: trial %d, rungroup %d, run %s, task %d"% \
(task.type, trial.trial, rungroup.id, run.run, task.id))
break
else:
if not task_idx+1 < len(tasks): break # no more tasks to do after this one
next_task = tasks[task_idx+1]
if submitted_job.status not in finished_job_statuses or submitted_job.status == "UNKWN":
print ("Task %s waiting on job %d (%s) for trial %d, rungroup %d, run %s, task %d" % \
(next_task.type, submitted_job.id, submitted_job.status, trial.trial, rungroup.id, run.run, next_task.id))
break
if submitted_job.status not in ["DONE", "EXIT"]:
print ("Task %s cannot start due to unexpected status for job %d (%s) for trial %d, rungroup %d, run %s, task %d" % \
(next_task.type, submitted_job.id, submitted_job.status, trial.trial, rungroup.id, run.run, next_task.id))
break
submit_next_task = True
previous_job = submitted_job
continue
print("Submitting %s job: trial %d, rungroup %d, run %s, task %d"% \
(task.type, trial.trial, rungroup.id, run.run, task.id))
j = JobFactory.from_args(app,
trial_id = trial.id,
rungroup_id = rungroup.id,
run_id = run.id,
task_id = task.id,
status = "SUBMITTED")
j.trial = job.trial; j.rungroup = job.rungroup; j.run = job.run; j.task = job.task
try:
j.submission_id = j.submit(previous_job)
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
previous_job = j
if app.params.mp.method == 'local': # only run one job at a time
return
break # job submitted so don't look for more in this run for this dataset
for global_task in global_tasks:
dataset = datasets[global_task[0]]
task = dataset.tasks[global_task[1]]
latest_version = dataset.latest_version
if latest_version is None:
next_version = 0
else:
latest_version_jobs = latest_version.jobs
latest_verion_job_ids = [j.id for j in latest_version_jobs if j.task_id != task.id]
new_jobs = [j for j in global_tasks[global_task] if j.id not in latest_verion_job_ids]
if not new_jobs: continue
next_version = latest_version.version + 1
latest_version = app.create_dataset_version(dataset_id = dataset.id, version=next_version)
for job in global_tasks[global_task]:
latest_version.add_job(job)
j = JobFactory.from_args(app,
task_id = task.id,
dataset_id = dataset.id,
status = "SUBMITTED")
j.task = task; j.dataset = dataset; j.dataset_version = latest_version
try:
j.submission_id = j.submit()
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
latest_version.add_job(j)
if app.params.mp.method == 'local': # only run one job at a time
return
|
py
|
1a584b9e255d5921f1cc7b8c26bb70b3e6ec7050
|
"""
Defines models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Model(nn.Module):
def __init__(self, opt):
super(Model, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.acoustic_feature_dim = opt.acoustic_feature_dim
self.visual_feature_dim = opt.visual_feature_dim
self.lexical_feature_dim = opt.lexical_feature_dim
self.conv_width_v = opt.conv_width_v
self.conv_width_a = opt.conv_width_a
self.kernel_size_v = opt.kernel_size_v
self.kernel_size_a = opt.kernel_size_a
self.max_pool_width = opt.max_pool_width
self.rnn_layer_num_v = opt.rnn_layer_num_v
self.rnn_layer_num_a = opt.rnn_layer_num_a
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.dropout_rate = opt.dropout_rate
self.conv1d_v1 = nn.Conv1d( in_channels=opt.visual_feature_dim,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v2 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v3 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_a1 = nn.Conv1d( in_channels=opt.acoustic_feature_dim,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a2 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a3 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.maxpool = nn.MaxPool1d(self.max_pool_width)
self.gru_v = nn.GRU(input_size=self.conv_width_v,
num_layers=self.rnn_layer_num_v,
hidden_size=self.rnn_width,
batch_first=True)
self.gru_a = nn.GRU(input_size=self.conv_width_a,
num_layers=self.rnn_layer_num_a,
hidden_size=self.rnn_width,
batch_first=True)
self.linear_l = nn.Linear(self.lexical_feature_dim, self.linear_width_l)
self.batchnorm_v = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_a = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_l = nn.BatchNorm1d(self.linear_width_l)
self.dropout = nn.Dropout(self.dropout_rate)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 3)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward_v(self, x_v):
x = x_v
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_v1(x)))
x = self.relu(self.maxpool(self.conv1d_v2(x)))
x = self.relu(self.maxpool(self.conv1d_v3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_v(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_v(self.dropout(x))
return x
def forward_a(self, x_a):
x = x_a
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_a1(x)))
x = self.relu(self.maxpool(self.conv1d_a2(x)))
x = self.relu(self.maxpool(self.conv1d_a3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_a(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_a(self.dropout(x))
return x
def forward_l(self, x_l):
x = x_l
x = self.relu(self.linear_l(x))
x = self.batchnorm_l(self.dropout(x))
return x
def encoder(self, x_v, x_a, x_l):
if self.visual_modality:
x_v = self.forward_v(x_v)
if self.acoustic_modality:
x_a = self.forward_a(x_a)
if self.lexical_modality:
x_l = self.forward_l(x_l)
if self.visual_modality:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_v, x_a, x_l), 1)
else:
x = torch.cat((x_v, x_a), 1)
else:
if self.lexical_modality:
x = torch.cat((x_v, x_l), 1)
else:
x = x_v
else:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_a, x_l), 1)
else:
x = x_a
else:
x = x_l
return x
def recognizer(self, x):
x = self.relu(self.linear_1(x))
x = self.softmax(self.linear_2(x))
return x
def forward(self, x_v, x_a, x_l):
x = self.encoder(x_v, x_a, x_l)
x = self.recognizer(x)
return x
class DomainDiscriminator(nn.Module):
def __init__(self, opt):
super(DomainDiscriminator, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.grl = GradientReversal(opt.domain_weight)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 2)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.grl(x)
x = self.relu(self.linear_1(x))
x = self.softmax(self.linear_2(x))
return x
class SpeakerDiscriminator(nn.Module):
def __init__(self, opt):
super(SpeakerDiscriminator, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.grl = GradientReversal(opt.subject_weight)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 22)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.grl(x)
x = self.relu(self.linear_1(x))
x = self.softmax(self.linear_2(x))
return x
|
py
|
1a584ddd0a6e2310cfac1925c63c42278f665e0b
|
from .color_enhanced import color_enhanced_filter
|
py
|
1a584ed4447584abea427dd7157cdffb598db27d
|
import os
from pathlib import Path
from typing import Any, Dict, Union
from unittest.mock import Mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import CPUAccelerator
from pytorch_lightning.plugins import SingleDevicePlugin
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
def test_unsupported_precision_plugins():
"""Test error messages are raised for unsupported precision plugins with CPU."""
trainer = Mock()
accelerator = CPUAccelerator(
training_type_plugin=SingleDevicePlugin(torch.device("cpu")), precision_plugin=MixedPrecisionPlugin()
)
with pytest.raises(MisconfigurationException, match=r"AMP \+ CPU is not supported"):
accelerator.setup(trainer=trainer)
@pytest.mark.parametrize("delay_dispatch", [True, False])
def test_plugin_setup_optimizers_in_pre_dispatch(tmpdir, delay_dispatch):
"""
Test when using a custom training type plugin that delays setup optimizers,
we do not call setup optimizers till ``pre_dispatch``.
"""
class TestModel(BoringModel):
def on_fit_start(self):
if delay_dispatch:
# Ensure we haven't setup optimizers if we've delayed dispatch
assert len(self.trainer.optimizers) == 0
else:
assert len(self.trainer.optimizers) > 0
def on_fit_end(self):
assert len(self.trainer.optimizers) > 0
class CustomPlugin(SingleDevicePlugin):
@property
def setup_optimizers_in_pre_dispatch(self) -> bool:
return delay_dispatch
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=CustomPlugin(device=torch.device("cpu")))
trainer.fit(model)
def test_accelerator_on_reset_dataloader_hooks(tmpdir):
"""
Ensure data-loader hooks are called using an Accelerator.
"""
class CustomAccelerator(CPUAccelerator):
train_count: int = 0
val_count: int = 0
test_count: int = 0
predict_count: int = 0
def on_reset_train_dataloader(self, dataloader):
self.train_count += 1
assert self.lightning_module.trainer.training
return super().on_reset_train_dataloader(dataloader)
def on_reset_val_dataloader(self, dataloader):
self.val_count += 1
assert self.lightning_module.trainer.training or self.lightning_module.trainer.validating
return super().on_reset_val_dataloader(dataloader)
def on_reset_test_dataloader(self, dataloader):
self.test_count += 1
assert self.lightning_module.trainer.testing
return super().on_reset_test_dataloader(dataloader)
def on_reset_predict_dataloader(self, dataloader):
self.predict_count += 1
assert self.lightning_module.trainer.predicting
return super().on_reset_predict_dataloader(dataloader)
model = BoringModel()
accelerator = CustomAccelerator(PrecisionPlugin(), SingleDevicePlugin(device=torch.device("cpu")))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, dataloaders=model.test_dataloader())
# assert that all loader hooks were called
assert accelerator.train_count == 1
assert accelerator.val_count == 1 # only called once during the entire session
assert accelerator.test_count == 1
assert accelerator.predict_count == 1
accelerator = CustomAccelerator(PrecisionPlugin(), SingleDevicePlugin(device=torch.device("cpu")))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
# assert val/test/predict loader hooks were called
assert accelerator.val_count == 1
assert accelerator.test_count == 1
assert accelerator.predict_count == 1
def test_plugin_on_reset_dataloader_hooks(tmpdir):
"""
Ensure data-loader hooks are called using a Plugin.
"""
class CustomPlugin(SingleDevicePlugin):
train_count: int = 0
val_count: int = 0
test_count: int = 0
predict_count: int = 0
def on_reset_train_dataloader(self, dataloader):
self.train_count += 1
assert self.lightning_module.trainer.training
return super().on_reset_train_dataloader(dataloader)
def on_reset_val_dataloader(self, dataloader):
self.val_count += 1
assert self.lightning_module.trainer.training or self.lightning_module.trainer.validating
return super().on_reset_val_dataloader(dataloader)
def on_reset_test_dataloader(self, dataloader):
self.test_count += 1
assert self.lightning_module.trainer.testing
return super().on_reset_test_dataloader(dataloader)
def on_reset_predict_dataloader(self, dataloader):
self.predict_count += 1
assert self.lightning_module.trainer.predicting
return super().on_reset_predict_dataloader(dataloader)
plugin = CustomPlugin(device=torch.device("cpu"))
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=plugin)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, dataloaders=model.test_dataloader())
# assert that all loader hooks were called
assert plugin.train_count == 1
assert plugin.val_count == 1 # only called once during the entire session
assert plugin.test_count == 1
assert plugin.predict_count == 1
plugin = CustomPlugin(device=torch.device("cpu"))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=plugin)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
# assert val/test/predict loader hooks were called
assert plugin.val_count == 1
assert plugin.test_count == 1
assert plugin.predict_count == 1
def test_restore_checkpoint_after_pre_dispatch_default():
"""
Assert default for restore_checkpoint_after_pre_dispatch is False.
"""
plugin = SingleDevicePlugin(torch.device("cpu"))
accelerator = CPUAccelerator(training_type_plugin=plugin, precision_plugin=PrecisionPlugin())
assert not accelerator.restore_checkpoint_after_pre_dispatch
assert not plugin.restore_checkpoint_after_pre_dispatch
@pytest.mark.parametrize("restore_after_pre_dispatch", [True, False])
def test_restore_checkpoint_after_pre_dispatch(tmpdir, restore_after_pre_dispatch):
"""
Test to ensure that if restore_checkpoint_after_pre_dispatch is True, then we only load the state after
pre-dispatch is called.
"""
class TestPlugin(SingleDevicePlugin):
predispatched_called = False
def pre_dispatch(self) -> None:
super().pre_dispatch()
self.predispatched_called = True
@property
def restore_checkpoint_after_pre_dispatch(self) -> bool:
return restore_after_pre_dispatch
def load_checkpoint_file(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:
assert self.predispatched_called == restore_after_pre_dispatch
return super().load_checkpoint_file(checkpoint_path)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
plugin = TestPlugin(torch.device("cpu"))
accelerator = CPUAccelerator(training_type_plugin=plugin, precision_plugin=PrecisionPlugin())
assert accelerator.restore_checkpoint_after_pre_dispatch == restore_after_pre_dispatch
assert plugin.restore_checkpoint_after_pre_dispatch == restore_after_pre_dispatch
trainer = Trainer(
default_root_dir=tmpdir, accelerator=accelerator, fast_dev_run=True, resume_from_checkpoint=checkpoint_path
)
trainer.fit(model)
for func in (trainer.test, trainer.validate, trainer.predict):
accelerator.training_type_plugin.predispatched_called = False
func(model, ckpt_path=checkpoint_path)
|
py
|
1a585057187b3276baf7751749a80b6b3e35884d
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, GreyCube Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestFluteType(unittest.TestCase):
pass
|
py
|
1a58505d756f629871cf60f95dbc92015e987058
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
import pecan
from pecan.configuration import set_config
from pecan.testing import load_test_app
from oslo_config import cfg
from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from dcmanager.api import api_config
from dcmanager.common import config
from dcmanager.tests import base
config.register_options()
OPT_GROUP_NAME = 'keystone_authtoken'
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
def fake_delete_response(self, context):
resp = jsonutils.dumps(context.to_dict())
return resp
class DCManagerApiTest(base.DCManagerTestCase):
def setUp(self):
super(DCManagerApiTest, self).setUp()
self.addCleanup(set_config, {}, overwrite=True)
api_config.test_init()
self.CONF = self.useFixture(fixture_config.Config()).conf
# self.setup_messaging(self.CONF)
self.CONF.set_override('auth_strategy', 'noauth')
self.app = self._make_app()
def _make_app(self, enable_acl=False):
self.config = {
'app': {
'root': 'dcmanager.api.controllers.root.RootController',
'modules': ['dcmanager.api'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
'__force_dict__': True
}
},
}
return load_test_app(self.config)
def tearDown(self):
super(DCManagerApiTest, self).tearDown()
pecan.set_config({}, overwrite=True)
class TestRootController(DCManagerApiTest):
"""Test version listing on root URI."""
def test_get(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
json_body = jsonutils.loads(response.body)
versions = json_body.get('versions')
self.assertEqual(1, len(versions))
def _test_method_returns_405(self, method):
api_method = getattr(self.app, method)
response = api_method('/', expect_errors=True)
self.assertEqual(response.status_int, 405)
def test_post(self):
self._test_method_returns_405('post')
def test_put(self):
self._test_method_returns_405('put')
def test_patch(self):
self._test_method_returns_405('patch')
def test_delete(self):
self._test_method_returns_405('delete')
def test_head(self):
self._test_method_returns_405('head')
class TestErrors(DCManagerApiTest):
def setUp(self):
super(TestErrors, self).setUp()
cfg.CONF.set_override('admin_tenant', 'fake_tenant_id',
group='cache')
def test_404(self):
response = self.app.get('/assert_called_once', expect_errors=True)
self.assertEqual(response.status_int, 404)
def test_bad_method(self):
fake_tenant = uuidutils.generate_uuid()
fake_url = '/v1.0/%s/bad_method' % fake_tenant
response = self.app.patch(fake_url,
expect_errors=True)
self.assertEqual(response.status_int, 404)
class TestRequestID(DCManagerApiTest):
def test_request_id(self):
response = self.app.get('/')
self.assertIn('x-openstack-request-id', response.headers)
self.assertTrue(
response.headers['x-openstack-request-id'].startswith('req-'))
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
self.assertTrue(uuidutils.is_uuid_like(id_part))
class TestKeystoneAuth(DCManagerApiTest):
def setUp(self):
super(DCManagerApiTest, self).setUp()
self.addCleanup(set_config, {}, overwrite=True)
api_config.test_init()
self.CONF = self.useFixture(fixture_config.Config()).conf
cfg.CONF.set_override('auth_strategy', 'keystone')
self.app = self._make_app()
def test_auth_not_enforced_for_root(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
|
py
|
1a58515fd95b20094d771f94f5e32e900c9b14e5
|
""" Hyperparameters for Large Scale Data Collection (LSDC) """
import os.path
from visual_mpc.policy.cem_controllers.variants.ensemble_vidpred import CEM_Controller_Ensemble_Vidpred
from visual_mpc.agent.benchmarking_agent import BenchmarkAgent
from visual_mpc.envs.mujoco_env.cartgripper_env.autograsp_env import AutograspCartgripperEnv
import numpy as np
BASE_DIR = '/'.join(str.split(__file__, '/')[:-1])
current_dir = os.path.dirname(os.path.realpath(__file__))
env_params = {
'num_objects': 1,
'object_mass': 0.5,
'friction': 1.0,
'finger_sensors': True,
'minlen': 0.03,
'maxlen': 0.06,
'object_object_mindist': 0.15,
'cube_objects': True,
'autograsp': {'zthresh': -0.06, 'touchthresh': 0.0, 'reopen': True}
}
agent = {
'type': BenchmarkAgent,
'env': (AutograspCartgripperEnv, env_params),
'T': 30,
'image_height' : 48,
'image_width' : 64,
'data_save_dir': BASE_DIR,
'make_final_gif_pointoverlay': True,
'record': BASE_DIR + '/record/',
'num_load_steps': 16,
'start_goal_confs': os.environ['VMPC_DATA_DIR'] + '/ensemble_lifting_tasks',
'current_dir': current_dir
}
policy = {
'verbose':True,
'initial_std': 0.04, # std dev. in xy
'initial_std_lift': 0.6, # std dev. in xy
'initial_std_rot': np.pi / 32,
'type': CEM_Controller_Ensemble_Vidpred,
'rejection_sampling': False,
'replan_interval': 10,
'num_samples': [800, 400],
}
config = {
'current_dir': current_dir,
'save_data': True,
'save_raw_images': True,
'start_index':0,
'end_index': 88,
'agent': agent,
'policy': policy,
}
|
py
|
1a5851b489aeee96cc3d275df45d9be2b13a8517
|
import json, os
import math, copy, time
import numpy as np
from collections import defaultdict
import pandas as pd
from utils import *
import math
from tqdm import tqdm
import seaborn as sb
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import dill
from functools import partial
import multiprocessing as mp
class Graph():
def __init__(self):
super(Graph, self).__init__()
'''
node_forward and bacward are only used when building the data.
Afterwards will be transformed into node_feature by DataFrame
node_forward: name -> node_id
node_bacward: node_id -> feature_dict
node_feature: a DataFrame containing all features
'''
self.node_forward = defaultdict(lambda: {})
self.node_bacward = defaultdict(lambda: [])
self.node_feature = defaultdict(lambda: [])
'''
edge_list: index the adjacancy matrix (time) by
<target_type, source_type, relation_type, target_id, source_id>
'''
self.edge_list = defaultdict( #target_type
lambda: defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: defaultdict( #target_id
lambda: defaultdict( #source_id(
lambda: int # time
)))))
self.times = {}
def add_node(self, node):
nfl = self.node_forward[node['type']]
if node['id'] not in nfl:
self.node_bacward[node['type']] += [node]
ser = len(nfl)
nfl[node['id']] = ser
return ser
return nfl[node['id']]
def add_edge(self, source_node, target_node, time = None, relation_type = None, directed = True):
edge = [self.add_node(source_node), self.add_node(target_node)]
'''
Add bi-directional edges with different relation type
'''
self.edge_list[target_node['type']][source_node['type']][relation_type][edge[1]][edge[0]] = time
if directed:
self.edge_list[source_node['type']][target_node['type']]['rev_' + relation_type][edge[0]][edge[1]] = time
else:
self.edge_list[source_node['type']][target_node['type']][relation_type][edge[0]][edge[1]] = time
self.times[time] = True
def update_node(self, node):
nbl = self.node_bacward[node['type']]
ser = self.add_node(node)
for k in node:
if k not in nbl[ser]:
nbl[ser][k] = node[k]
def get_meta_graph(self):
types = self.get_types()
metas = []
for target_type in self.edge_list:
for source_type in self.edge_list[target_type]:
for r_type in self.edge_list[target_type][source_type]:
metas += [(target_type, source_type, r_type)]
return metas
def get_types(self):
return list(self.node_feature.keys())
def sample_subgraph(graph, time_range, sampled_depth = 2, sampled_number = 8, inp = None, feature_extractor = feature_OAG):
'''
Sample Sub-Graph based on the connection of other nodes with currently sampled nodes
We maintain budgets for each node type, indexed by <node_id, time>.
Currently sampled nodes are stored in layer_data.
After nodes are sampled, we construct the sampled adjacancy matrix.
'''
layer_data = defaultdict( #target_type
lambda: {} # {target_id: [ser, time]}
)
budget = defaultdict( #source_type
lambda: defaultdict( #source_id
lambda: [0., 0] #[sampled_score, time]
))
new_layer_adj = defaultdict( #target_type
lambda: defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: [] #[target_id, source_id]
)))
'''
For each node being sampled, we find out all its neighborhood,
adding the degree count of these nodes in the budget.
Note that there exist some nodes that have many neighborhoods
(such as fields, venues), for those case, we only consider
'''
def add_budget(te, target_id, target_time, layer_data, budget):
for source_type in te:
tes = te[source_type]
for relation_type in tes:
if relation_type == 'self' or target_id not in tes[relation_type]:
continue
adl = tes[relation_type][target_id]
if len(adl) < sampled_number:
sampled_ids = list(adl.keys())
else:
sampled_ids = np.random.choice(list(adl.keys()), sampled_number, replace = False)
for source_id in sampled_ids:
source_time = adl[source_id]
if source_time == None:
source_time = target_time
if source_time > np.max(list(time_range.keys())) or source_id in layer_data[source_type]:
continue
budget[source_type][source_id][0] += 1. / len(sampled_ids)
budget[source_type][source_id][1] = source_time
'''
First adding the sampled nodes then updating budget.
'''
for _type in inp:
for _id, _time in inp[_type]:
layer_data[_type][_id] = [len(layer_data[_type]), _time]
for _type in inp:
te = graph.edge_list[_type]
for _id, _time in inp[_type]:
add_budget(te, _id, _time, layer_data, budget)
'''
We recursively expand the sampled graph by sampled_depth.
Each time we sample a fixed number of nodes for each budget,
based on the accumulated degree.
'''
for layer in range(sampled_depth):
sts = list(budget.keys())
for source_type in sts:
te = graph.edge_list[source_type]
keys = np.array(list(budget[source_type].keys()))
if sampled_number > len(keys):
'''
Directly sample all the nodes
'''
sampled_ids = np.arange(len(keys))
else:
'''
Sample based on accumulated degree
'''
score = np.array(list(budget[source_type].values()))[:,0] ** 2
score = score / np.sum(score)
sampled_ids = np.random.choice(len(score), sampled_number, p = score, replace = False)
sampled_keys = keys[sampled_ids]
'''
First adding the sampled nodes then updating budget.
'''
for k in sampled_keys:
layer_data[source_type][k] = [len(layer_data[source_type]), budget[source_type][k][1]]
for k in sampled_keys:
add_budget(te, k, budget[source_type][k][1], layer_data, budget)
budget[source_type].pop(k)
'''
Prepare feature, time and adjacency matrix for the sampled graph
'''
feature, times, indxs, texts = feature_extractor(layer_data, graph)
edge_list = defaultdict( #target_type
lambda: defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: [] # [target_id, source_id]
)))
for _type in layer_data:
for _key in layer_data[_type]:
_ser = layer_data[_type][_key][0]
edge_list[_type][_type]['self'] += [[_ser, _ser]]
'''
Reconstruct sampled adjacancy matrix by checking whether each
link exist in the original graph
'''
for target_type in graph.edge_list:
te = graph.edge_list[target_type]
for source_type in te:
tes = te[source_type]
for relation_type in tes:
tesr = tes[relation_type]
for target_key in layer_data[target_type]:
target_ser = layer_data[target_type][target_key][0]
if target_key not in tesr:
continue
tesrt = tesr[target_key]
for source_key in layer_data[source_type]:
source_ser = layer_data[source_type][source_key][0]
'''
Check whether each link (target_id, source_id) exist in original adjacancy matrix
'''
if source_key in tesrt:
edge_list[target_type][source_type][relation_type] += [[target_ser, source_ser]]
return feature, times, edge_list, indxs, texts
def to_torch(feature, time, edge_list, graph):
'''
Transform a sampled sub-graph into pytorch Tensor
node_dict: {node_type: <node_number, node_type_ID>} node_number is used to trace back the nodes in original graph.
edge_dict: {edge_type: edge_type_ID}
'''
node_dict = {}
node_feature = []
node_type = []
node_time = []
edge_index = []
edge_type = []
edge_time = []
node_num = 0
types = graph.get_types()
for t in types:
node_dict[t] = [node_num, len(node_dict)]
node_num += len(feature[t])
for t in types:
node_feature += list(feature[t])
node_time += list(time[t])
node_type += [node_dict[t][1] for _ in range(len(feature[t]))]
edge_dict = {e[2]: i for i, e in enumerate(graph.get_meta_graph())}
edge_dict['self'] = len(edge_dict)
for target_type in edge_list:
for source_type in edge_list[target_type]:
for relation_type in edge_list[target_type][source_type]:
for ii, (ti, si) in enumerate(edge_list[target_type][source_type][relation_type]):
tid, sid = ti + node_dict[target_type][0], si + node_dict[source_type][0]
edge_index += [[sid, tid]]
edge_type += [edge_dict[relation_type]]
'''
Our time ranges from 1900 - 2020, largest span is 120.
'''
edge_time += [node_time[tid] - node_time[sid] + 120]
node_feature = torch.FloatTensor(node_feature)
node_type = torch.LongTensor(node_type)
edge_time = torch.LongTensor(edge_time)
edge_index = torch.LongTensor(edge_index).t()
edge_type = torch.LongTensor(edge_type)
return node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict
|
py
|
1a5851dfc9d3a50468a1d37540895f5be1bac42d
|
import datetime
from app import app, init_app, db
from app.models import User
# Call `init_app()` in `__init__.py` here
# We declare and create the global `app` instance in `__init__.py`
# This allows us to access it, and use function decorations like `@app.context_processor`, from other modules
# which would not be the case if we created `app` from this module
init_app()
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User}
@app.context_processor
def inject_today_date():
return {'today_date': datetime.date.today()}
try:
from myconfig import MyConfig
except:
MyConfig = {}
SERVER_HOST = getattr(MyConfig, 'SERVER_HOST', 'closed')
if __name__ == '__main__':
if SERVER_HOST == 'closed':
app.run(debug=True, use_debugger=False, use_reloader=False, passthrough_errors=True)
else:
app.run(host='0.0.0.0', debug=False, use_debugger=False, use_reloader=False, passthrough_errors=True)
|
py
|
1a5852292507537867ab96a0c97225ebdae3b90d
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
import socket
from unittest import TestCase
from mock.mock import patch
import unittest
class TestHDP23StackAdvisor(TestCase):
def setUp(self):
import imp
self.maxDiff = None
if 'util' in dir(unittest): unittest.util._MAX_LENGTH=2000
self.testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp21StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
hdp22StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.2/services/stack_advisor.py')
hdp23StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.3/services/stack_advisor.py')
hdp23StackAdvisorClassName = 'HDP23StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp206StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp21StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp22StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp22StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp23StackAdvisorPath, 'rb') as fp:
stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp23StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(stack_advisor_impl, hdp23StackAdvisorClassName)
self.stackAdvisor = clazz()
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
def load_json(self, filename):
file = os.path.join(self.testDirectory, filename)
with open(file, 'rb') as f:
data = json.load(f)
return data
def prepareHosts(self, hostsNames):
hosts = { "items": [] }
for hostName in hostsNames:
nextHost = {"Hosts":{"host_name" : hostName}}
hosts["items"].append(nextHost)
return hosts
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def fqdn_mock_result(value=None):
return 'c6401.ambari.apache.org' if value is None else value
@patch('socket.getfqdn', side_effect=fqdn_mock_result)
def test_getComponentLayoutValidations_sparkts_no_hive(self, socket_mock):
""" Test SparkTS is picked when Hive is not installed """
hosts = self.load_json("sparkts-host.json")
services = self.load_json("services-sparkts.json")
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
sparkTS = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SPARK_THRIFTSERVER"]
hiveMetaStore = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "HIVE_METASTORE"]
self.assertEquals(len(sparkTS), 1)
self.assertEquals(len(hiveMetaStore), 0)
validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
expected = {'component-name': 'SPARK_THRIFTSERVER', 'message': 'Spark Thrift Server requires HIVE_METASTORE to be present in the cluster.', 'type': 'host-component', 'level': 'ERROR'}
self.assertEquals(validations[0], expected)
@patch('socket.getfqdn', side_effect=fqdn_mock_result)
def test_getComponentLayoutValidations_sparkts_with_hive(self, socket_mock):
""" Test SparkTS is picked when Hive is installed """
hosts = self.load_json("sparkts-host.json")
services = self.load_json("services-sparkts-hive.json")
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
sparkTS = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SPARK_THRIFTSERVER"]
hiveMetaStore = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "HIVE_METASTORE"]
self.assertEquals(len(sparkTS), 1)
self.assertEquals(len(hiveMetaStore), 1)
validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
self.assertEquals(len(validations), 0)
def test_recommendHDFSConfigurations(self):
configurations = {
"hdfs-site": {
"properties": {
"dfs.namenode.inode.attributes.provider.class": "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer",
}
},
"ranger-hdfs-plugin-properties": {
"properties": {
"ranger-hdfs-plugin-enabled": "No"
}
}
}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
hosts = {
"items": [
{
"Hosts": {
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
}]}
services = {
"services":
[
{
"StackServices": {
"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components": [
]
}
],
"Versions": {
"stack_version": "2.3"
},
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
# Test with Ranger HDFS plugin disabled
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hdfs-site']['property_attributes']['dfs.namenode.inode.attributes.provider.class'], {'delete': 'true'}, "Test with Ranger HDFS plugin is disabled")
# Test with Ranger HDFS plugin is enabled
configurations['hdfs-site']['properties'] = {}
configurations['hdfs-site']['property_attributes'] = {}
services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hdfs-site']['properties']['dfs.namenode.inode.attributes.provider.class'], 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer', "Test with Ranger HDFS plugin is enabled")
def test_recommendYARNConfigurations(self):
configurations = {}
servicesList = ["YARN"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
],
"public_host_name" : "c6401.ambari.apache.org",
"host_name" : "c6401.ambari.apache.org"
}
}
]
}
services = {
"context" : {
"call_type" : "recommendConfigurations"
},
"services" : [ {
"StackServices":{
"service_name": "YARN",
},
"Versions": {
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "NODEMANAGER",
"hostnames": ["c6401.ambari.apache.org"]
}
}
]
}
],
"configurations": {
"yarn-site": {
"properties": {
"yarn.authorization-provider": "org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer"
}
},
"ranger-yarn-plugin-properties": {
"properties": {
"ranger-yarn-plugin-enabled": "No"
}
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
# Test with Ranger YARN plugin disabled
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['yarn-site']['property_attributes']['yarn.authorization-provider'], {'delete': 'true'}, "Test with Ranger HDFS plugin is disabled")
# Test with Ranger YARN plugin is enabled
configurations['yarn-site']['properties'] = {}
configurations['yarn-site']['property_attributes'] = {}
services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['yarn-site']['properties']['yarn.authorization-provider'], 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer', "Test with Ranger YARN plugin enabled")
def test_recommendKAFKAConfigurations(self):
configurations = {}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
services = {
"services":
[
{
"StackServices": {
"service_name" : "KAFKA",
"service_version" : "2.6.0.2.2"
}
},
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0.2.3"
}
},
{
"StackServices": {
"service_name": "AMBARI_METRICS"
},
"components": [{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}, {
"StackServiceComponents": {
"component_name": "METRICS_MONITOR",
"hostnames": ["host1"]
}
}]
},
{
"StackServices": {
"service_name": "ZOOKEEPER"
},
"components": [{
"StackServiceComponents": {
"component_name": "ZOOKEEPER_SERVER",
"hostnames": ["host1"]
}
}]
}
],
"Versions": {
"stack_version": "2.3"
},
"configurations": {
"core-site": {
"properties": {}
},
"cluster-env": {
"properties": {
"security_enabled" : "true"
},
"property_attributes": {}
},
"kafka-broker": {
"properties": {
"authorizer.class.name" : "kafka.security.auth.SimpleAclAuthorizer"
},
"property_attributes": {}
},
"ranger-kafka-plugin-properties": {
"properties": {
"ranger-kafka-plugin-enabled": "No",
"zookeeper.connect": ""
}
},
"kafka-log4j": {
"properties": {
"content": "kafka.logs.dir=logs"
}
},
"zoo.cfg" : {
"properties": {
"clientPort": "2181"
}
}
}
}
# Test authorizer.class.name with Ranger Kafka plugin disabled in non-kerberos environment
services['configurations']['cluster-env']['properties']['security_enabled'] = "false"
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['property_attributes']['authorizer.class.name'], {'delete': 'true'}, "Test authorizer.class.name with Ranger Kafka plugin is disabled in non-kerberos environment")
# Test authorizer.class.name with Ranger Kafka plugin disabled in kerberos environment
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol'] = 'PLAINTEXTSASL'
services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer'
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'kafka.security.auth.SimpleAclAuthorizer' , "Test authorizer.class.name with Ranger Kafka plugin disabled in kerberos environment")
# Advise 'PLAINTEXTSASL' for secure cluster by default
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEqual(configurations['kafka-broker']['properties']['security.inter.broker.protocol'], 'PLAINTEXTSASL')
# Secure security.inter.broker.protocol values should be retained by stack advisor
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
for proto in ('PLAINTEXTSASL', 'SASL_PLAINTEXT', 'SASL_SSL'):
services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol'] = proto
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEqual(configurations['kafka-broker']['properties']['security.inter.broker.protocol'], proto)
# Test authorizer.class.name with Ranger Kafka plugin enabled in non-kerberos environment
services['configurations']['cluster-env']['properties']['security_enabled'] = "false"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
del services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol']
services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'kafka.security.auth.SimpleAclAuthorizer'
services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer', "Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment")
services['configurations']['cluster-env']['properties']['security_enabled'] = "false"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol'] = 'PLAINTEXTSASL'
services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'kafka.security.auth.SimpleAclAuthorizer'
services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer', "Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment")
self.assertEquals(configurations['ranger-kafka-plugin-properties']['properties']['zookeeper.connect'], 'host1:2181')
self.assertTrue('security.inter.broker.protocol' not in configurations['kafka-broker']['properties'])
# Test kafka-log4j content when Ranger plugin for Kafka is enabled
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
log4jContent = services['configurations']['kafka-log4j']['properties']['content']
newRangerLog4content = "\nlog4j.appender.rangerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.rangerAppender.DatePattern='.'yyyy-MM-dd-HH\n" \
"log4j.appender.rangerAppender.File=${kafka.logs.dir}/ranger_kafka.log\nlog4j.appender.rangerAppender.layout" \
"=org.apache.log4j.PatternLayout\nlog4j.appender.rangerAppender.layout.ConversionPattern=%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n\n" \
"log4j.logger.org.apache.ranger=INFO, rangerAppender"
expectedLog4jContent = log4jContent + newRangerLog4content
self.assertEquals(configurations['kafka-log4j']['properties']['content'], expectedLog4jContent, "Test kafka-log4j content when Ranger plugin for Kafka is enabled")
# Test kafka.metrics.reporters when AMBARI_METRICS is present in services
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEqual(configurations['kafka-broker']['properties']['kafka.metrics.reporters'],
'org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter')
def test_recommendHBASEConfigurations(self):
configurations = {}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
expected = {
"hbase-site": {
"properties": {
"hbase.bucketcache.size": "92160",
"hbase.bucketcache.percentage.in.combinedcache": "1.0000",
"hbase.regionserver.global.memstore.size": "0.4",
"hfile.block.cache.size": "0.4",
"hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint",
"hbase.coprocessor.master.classes": "",
"hbase.coprocessor.regionserver.classes": "",
"hbase.region.server.rpc.scheduler.factory.class": "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory",
'hbase.regionserver.wal.codec': 'org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec',
"hbase.bucketcache.ioengine": "offheap",
"phoenix.functions.allowUserDefinedFunctions": "true"
},
"property_attributes": {
"hbase.coprocessor.regionserver.classes": {
"delete": "true"
},
"hbase.bucketcache.percentage.in.combinedcache": {
"delete": "true"
}
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "1024",
"hbase_max_direct_memory_size": "94208",
"hbase_regionserver_heapsize": "20480"
}
}
}
services = {
"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"Versions": {
"stack_version": "2.3"
},
"configurations": {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "2048"
}
},
"hbase-env": {
"properties": {
"phoenix_sql_enabled": "true"
}
},
"hbase-site": {
"properties": {
"hbase.coprocessor.regionserver.classes": ""
}
}
}
}
# Test
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test
clusterData['hbaseRam'] = '4'
expected["hbase-site"]["property_attributes"]["hbase.bucketcache.size"] = {"delete": "true"}
expected["hbase-site"]["property_attributes"]["hbase.bucketcache.ioengine"] = {"delete": "true"}
expected["hbase-site"]["property_attributes"]["hbase.bucketcache.percentage.in.combinedcache"] = {"delete": "true"}
expected["hbase-env"]["property_attributes"] = {"hbase_max_direct_memory_size" : {"delete": "true"}}
expected["hbase-env"]["properties"]["hbase_master_heapsize"] = "1024"
expected["hbase-env"]["properties"]["hbase_regionserver_heapsize"] = "4096"
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test - default recommendations should have certain configs deleted. HAS TO BE LAST TEST.
services["configurations"] = {"hbase-site": {"properties": {"phoenix.functions.allowUserDefinedFunctions": '', "hbase.rpc.controllerfactory.class": '', "hbase.region.server.rpc.scheduler.factory.class": ''}}}
configurations = {}
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['hbase-site']['property_attributes']['phoenix.functions.allowUserDefinedFunctions'], {'delete': 'true'})
self.assertEquals(configurations['hbase-site']['property_attributes']['hbase.rpc.controllerfactory.class'], {'delete': 'true'})
self.assertEquals(configurations['hbase-site']['property_attributes']['hbase.region.server.rpc.scheduler.factory.class'], {'delete': 'true'})
self.assertEquals(configurations['hbase-site']['properties']['hbase.regionserver.wal.codec'], "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec")
def test_recommendHiveConfigurations(self):
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '8192'
}
},
'hive-env': {
'properties': {
'hive_exec_orc_storage_strategy': 'SPEED',
'hive_security_authorization': 'None',
'hive_timeline_logging_enabled': 'true',
'hive_txn_acid': 'off',
'hive.atlas.hook': 'false'
}
},
'hive-site': {
'properties': {
'hive.server2.enable.doAs': 'true',
'hive.server2.tez.default.queues': "queue1,queue2",
'hive.server2.tez.initialize.default.sessions': 'false',
'hive.server2.tez.sessions.per.default.queue': '1',
'hive.auto.convert.join.noconditionaltask.size': '214748364',
'hive.compactor.initiator.on': 'false',
'hive.compactor.worker.threads': '0',
'hive.compute.query.using.stats': 'true',
'hive.exec.dynamic.partition.mode': 'strict',
'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.orc.compression.strategy': 'SPEED',
'hive.exec.orc.default.compress': 'ZLIB',
'hive.exec.orc.default.stripe.size': '67108864',
'hive.exec.orc.encoding.strategy': 'SPEED',
'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.reducers.bytes.per.reducer': '67108864',
'hive.execution.engine': 'mr',
'hive.optimize.index.filter': 'true',
'hive.optimize.sort.dynamic.partition': 'false',
'hive.prewarm.enabled': 'false',
'hive.prewarm.numcontainers': '3',
'hive.security.authorization.enabled': 'false',
'hive.server2.use.SSL': 'false',
'hive.stats.fetch.column.stats': 'true',
'hive.stats.fetch.partition.stats': 'true',
'hive.support.concurrency': 'false',
'hive.tez.auto.reducer.parallelism': 'true',
'hive.tez.container.size': '768',
'hive.tez.dynamic.partition.pruning': 'true',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
'hive.vectorized.execution.enabled': 'true',
'hive.vectorized.execution.reduce.enabled': 'false',
'hive.security.metastore.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider',
'hive.security.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory'
},
'property_attributes': {
'hive.auto.convert.join.noconditionaltask.size': {'maximum': '644245094'},
'hive.server2.authentication.pam.services': {'delete': 'true'},
'hive.server2.custom.authentication.class': {'delete': 'true'},
'hive.server2.authentication.kerberos.principal': {'delete': 'true'},
'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
'hive.server2.authentication.ldap.url': {'delete': 'true'},
'hive.server2.tez.default.queues': {
'entries': [{'value': 'queue1', 'label': 'queue1 queue'}, {'value': 'queue2', 'label': 'queue2 queue'}]
},
'atlas.cluster.name': {'delete': 'true'},
'atlas.rest.address': {'delete': 'true'},
'datanucleus.rdbms.datastoreAdapterClassName': {'delete': 'true'},
'hive.tez.container.size': {'maximum': '8192', 'minimum': '256'}
}
},
'hiveserver2-site': {
'properties': {
},
'property_attributes': {
'hive.security.authorization.manager': {'delete': 'true'},
'hive.security.authenticator.manager': {'delete': 'true'}
}
},
'webhcat-site': {
'properties': {
'templeton.hadoop.queue.name': 'queue2'
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"capacity-scheduler": {
"properties": {
"capacity-scheduler" :"yarn.scheduler.capacity.root.queues=queue1,queue2"
}
},
"hive-env": {
"properties": {
"hive.atlas.hook": "false"
}
},
"hive-site": {
"properties": {
"hive.server2.authentication": "none",
"hive.server2.authentication.ldap.url": "",
"hive.server2.authentication.ldap.baseDN": "",
"hive.server2.authentication.kerberos.keytab": "",
"hive.server2.authentication.kerberos.principal": "",
"hive.server2.authentication.pam.services": "",
"hive.server2.custom.authentication.class": "",
"hive.cbo.enable": "true"
}
},
"hiveserver2-site": {
"properties": {
"hive.security.authorization.manager": "",
"hive.security.authenticator.manager": ""
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.7
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.8
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.8_44'}
expected['hive-site']['properties']['hive.tez.java.opts'] = "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps"
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.9
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.9.2_44'}
expected['hive-site']['properties']['hive.tez.java.opts'] = "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps"
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendHiveConfigurations_with_atlas(self):
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '8192'
}
},
'hive-env': {
'properties': {
'hive_exec_orc_storage_strategy': 'SPEED',
'hive_security_authorization': 'None',
'hive_timeline_logging_enabled': 'true',
'hive_txn_acid': 'off',
'hive.atlas.hook': 'true'
}
},
'hive-site': {
'properties': {
'hive.server2.enable.doAs': 'true',
'hive.server2.tez.default.queues': "queue1,queue2",
'hive.server2.tez.initialize.default.sessions': 'false',
'hive.server2.tez.sessions.per.default.queue': '1',
'hive.auto.convert.join.noconditionaltask.size': '214748364',
'hive.compactor.initiator.on': 'false',
'hive.compactor.worker.threads': '0',
'hive.compute.query.using.stats': 'true',
'hive.exec.dynamic.partition.mode': 'strict',
'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.orc.compression.strategy': 'SPEED',
'hive.exec.orc.default.compress': 'ZLIB',
'hive.exec.orc.default.stripe.size': '67108864',
'hive.exec.orc.encoding.strategy': 'SPEED',
'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook',
'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.reducers.bytes.per.reducer': '67108864',
'hive.execution.engine': 'mr',
'hive.optimize.index.filter': 'true',
'hive.optimize.sort.dynamic.partition': 'false',
'hive.prewarm.enabled': 'false',
'hive.prewarm.numcontainers': '3',
'hive.security.authorization.enabled': 'false',
'hive.server2.use.SSL': 'false',
'hive.stats.fetch.column.stats': 'true',
'hive.stats.fetch.partition.stats': 'true',
'hive.support.concurrency': 'false',
'hive.tez.auto.reducer.parallelism': 'true',
'hive.tez.container.size': '768',
'hive.tez.dynamic.partition.pruning': 'true',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
'hive.vectorized.execution.enabled': 'true',
'hive.vectorized.execution.reduce.enabled': 'false',
'hive.security.metastore.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider',
'hive.security.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory'
},
'property_attributes': {
'hive.auto.convert.join.noconditionaltask.size': {'maximum': '644245094'},
'hive.tez.container.size': {'maximum': '8192', 'minimum': '256'},
'hive.server2.authentication.pam.services': {'delete': 'true'},
'hive.server2.custom.authentication.class': {'delete': 'true'},
'hive.server2.authentication.kerberos.principal': {'delete': 'true'},
'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
'hive.server2.authentication.ldap.url': {'delete': 'true'},
'hive.server2.tez.default.queues': {
'entries': [{'value': 'queue1', 'label': 'queue1 queue'}, {'value': 'queue2', 'label': 'queue2 queue'}]
},
'atlas.cluster.name': {'delete': 'true'},
'atlas.rest.address': {'delete': 'true'},
'datanucleus.rdbms.datastoreAdapterClassName': {'delete': 'true'}
}
},
'hiveserver2-site': {
'properties': {
},
'property_attributes': {
'hive.security.authorization.manager': {'delete': 'true'},
'hive.security.authenticator.manager': {'delete': 'true'}
}
},
'webhcat-site': {
'properties': {
'templeton.hadoop.queue.name': 'queue2'
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
"StackServices": {
"service_name": "ATLAS",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "ATLAS_SERVER",
"display_name": "Atlas Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
}
],
"configurations": {
"capacity-scheduler": {
"properties": {
"capacity-scheduler" :"yarn.scheduler.capacity.root.queues=queue1,queue2"
}
},
"hive-env": {
"properties": {
"hive.atlas.hook": "false"
}
},
"hive-site": {
"properties": {
"hive.server2.authentication": "none",
"hive.server2.authentication.ldap.url": "",
"hive.server2.authentication.ldap.baseDN": "",
"hive.server2.authentication.kerberos.keytab": "",
"hive.server2.authentication.kerberos.principal": "",
"hive.server2.authentication.pam.services": "",
"hive.server2.custom.authentication.class": "",
"hive.cbo.enable": "true"
}
},
"hiveserver2-site": {
"properties": {
"hive.security.authorization.manager": "",
"hive.security.authenticator.manager": ""
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
@patch('os.path.exists')
@patch('os.path.isdir')
@patch('os.listdir')
def test_recommendTezConfigurations(self, os_listdir_mock, os_isdir_mock, os_exists_mock):
os_exists_mock.return_value = True
os_isdir_mock.return_value = True
os_listdir_mock.return_value = ['TEZ{0.7.0.2.3.0.0-2155}']
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
},
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
},
"tez-site": {
"properties": {
"tez.task.resource.memory.mb": "768",
"tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.runtime.io.sort.mb": "202",
"tez.session.am.dag.submit.timeout.secs": "600",
"tez.runtime.unordered.output.buffer.size-mb": "57",
"tez.am.resource.memory.mb": "4000",
"tez.queue.name": "queue2",
}
},
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192"
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [ ],
"ambari-server-properties": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
server_host = socket.getfqdn()
for host in hosts["items"]:
if server_host == host["Hosts"]["host_name"]:
server_host = host["Hosts"]["public_host_name"]
tez_ui_url = "http://" + server_host + ":8080/#/main/view/TEZ/tez_cluster_instance"
# Test JDK1.7
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.8
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.8_44'}
expected['tez-site']['properties']['tez.am.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.task.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.9
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.9.2_44'}
expected['tez-site']['properties']['tez.am.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.task.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_validateHiveConfigurations(self):
properties = {"hive_security_authorization": "None",
"hive.exec.orc.default.stripe.size": "8388608",
'hive.tez.container.size': '2048',
'hive.tez.java.opts': '-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.auto.convert.join.noconditionaltask.size': '1100000000'}
recommendedDefaults = {'hive.tez.container.size': '1024',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.auto.convert.join.noconditionaltask.size': '1000000000'}
configurations = {
"hive-site": {
"properties": {"hive.security.authorization.enabled": "true", 'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true'}
},
"hive-env": {
"properties": {"hive_security_authorization": "None"}
}
}
services = {
"services": []
}
# Test for 'ranger-hive-plugin-properties' not being in configs
res_expected = []
res = self.stackAdvisor.validateHiveConfigurations(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
# This test intentionally calls all validate methods with
# incorrect parameters (empty configs)
def test_noRiskyDictLookups(self):
properties = {}
recommendedDefaults = {}
configurations = {"core-site": {"properties": {}}}
services = {
"services": [],
"Versions": {
"stack_name": "HDP",
"stack_version": "2.3"
},
"configurations": configurations
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"disk_info" : [
{
"available" : "4564632",
"used" : "5230344",
"percent" : "54%",
"size" : "10319160",
"type" : "ext4",
"mountpoint" : "/"
},
{
"available" : "1832436",
"used" : "0",
"percent" : "0%",
"size" : "1832436",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
],
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
def return_c6401_hostname(services, service_name, component_name):
return ["c6401.ambari.apache.org"]
self.stackAdvisor.getComponentHostNames = return_c6401_hostname
validators = self.stackAdvisor.getServiceConfigurationValidators()
# Setting up empty configs and services info
for serviceName, validator in validators.items():
services["services"].extend([{"StackServices": {"service_name": serviceName},
"components": []}])
for siteName in validator.keys():
configurations[siteName] = {"properties": {}}
# Emulate enabled RANGER
services["services"].extend([{"StackServices": {"service_name": "RANGER"},
"components": []}])
configurations["ranger-hbase-plugin-properties"] = {
"ranger-hbase-plugin-enabled": "Yes"
}
exceptionThrown = False
try:
recommendations = self.stackAdvisor.recommendConfigurations(services, hosts)
except Exception as e:
exceptionThrown = True
self.assertTrue(exceptionThrown)
pass
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0.2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX",
"StackServices": {
"service_name": "KNOX",
"service_version": "0.9.0.2.3",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX/components/KNOX_GATEWAY",
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1+",
"component_category": "MASTER",
"component_name": "KNOX_GATEWAY",
"display_name": "Knox Gateway",
"is_client": "false",
"is_master": "true",
"hostnames": ["c6401.ambari.apache.org"]
},
"dependencies": []
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
"ranger.sso.providerurl": "",
}
}
},
"ambari-server-properties": {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:389",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:389",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "false",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:7777'
}
},
'ranger-ugsync-site': {
'properties': {
'ranger.usersync.group.objectclass': 'posixGroup',
'ranger.usersync.group.nameattribute': 'cn',
'ranger.usersync.group.memberattributename': 'memberUid',
'ranger.usersync.ldap.binddn': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'ranger.usersync.ldap.user.nameattribute': 'uid',
'ranger.usersync.ldap.user.objectclass': 'posixAccount',
'ranger.usersync.ldap.url': 'ldap://c6403.ambari.apache.org:389',
'ranger.usersync.ldap.searchBase': 'dc=apache,dc=org'
}
},
'ranger-admin-site': {
'properties': {
"ranger.audit.solr.zookeepers": "NONE",
"ranger.audit.source.type": "solr",
"ranger.sso.providerurl": "https://c6401.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso"
}
},
'ranger-env': {
'properties': {
'ranger-storm-plugin-enabled': 'No',
}
},
'ranger-knox-security': {'properties': {}}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
# Recommend ranger.audit.solr.zookeepers when solrCloud is disabled
services['configurations']['ranger-env'] = {
"properties": {
"is_solrCloud_enabled": "false"
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations['ranger-admin-site']['properties']['ranger.audit.solr.zookeepers'], 'NONE')
def test_recommendRangerKMSConfigurations(self):
clusterData = {}
services = {
"ambari-server-properties": {
"ambari-server.user": "root"
},
"Versions": {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER_KMS",
"service_version": "0.5.0.2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_KMS_SERVER",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"kms-env": {
"properties": {
"kms_user": "kmsname"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8020"
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-properties': {
'properties': {
'DB_FLAVOR': 'ORACLE',
'db_host' : 'c6401.ambari.apache.org:1521:XE',
'db_name' : "XE"
}
},
'cluster-env': {
'properties': {
'security_enabled': 'false'
}
}
},
"forced-configurations": []
}
expected = {
'kms-properties': {
'properties': {}
},
'dbks-site': {
'properties': {
"ranger.ks.jpa.jdbc.driver" : "oracle.jdbc.driver.OracleDriver",
"ranger.ks.jpa.jdbc.url" : "jdbc:oracle:thin:@c6401.ambari.apache.org:1521:XE"
}
},
'core-site': {
'properties': {
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-site': {
'properties': {
},
'property_attributes': {
'hadoop.kms.proxyuser.HTTP.hosts': {'delete': 'true'},
'hadoop.kms.proxyuser.HTTP.users': {'delete': 'true'},
'hadoop.kms.proxyuser.root.hosts': {'delete': 'true'},
'hadoop.kms.proxyuser.root.users': {'delete': 'true'}
}
}
}
# non kerberized cluster. There should be no proxyuser configs
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
# kerberized cluster
services['services'].append({
"StackServices": {
"service_name": "KERBEROS"
}
})
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
services['configurations']['cluster-env']['properties']['ambari_principal_name'] = "[email protected]"
expected = {
'kms-properties': {
'properties': {}
},
'dbks-site': {
'properties': {
"ranger.ks.jpa.jdbc.driver" : "oracle.jdbc.driver.OracleDriver",
"ranger.ks.jpa.jdbc.url" : "jdbc:oracle:thin:@c6401.ambari.apache.org:1521:XE"
}
},
'core-site': {
'properties': {
'hadoop.proxyuser.kmsname.groups': '*'
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-site': {
'properties': {
'hadoop.kms.proxyuser.HTTP.hosts': '*',
'hadoop.kms.proxyuser.HTTP.users': '*',
'hadoop.kms.proxyuser.ambari-cl1.hosts': '*',
'hadoop.kms.proxyuser.ambari-cl1.users': '*'
}
}
}
# on kerberized cluster property should be recommended
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
recommendedConfigurations = {}
services['changed-configurations'] = [
{
'type': 'kms-env',
'name': 'kms_user',
'old_value': 'kmsname'
}
]
services['configurations']['kms-env']['properties']['kms_user'] = 'kmsnew'
expected['core-site'] = {
'properties': {
'hadoop.proxyuser.kmsnew.groups': '*'
},
'property_attributes': {
'hadoop.proxyuser.kmsname.groups': {
'delete': 'true'
}
}
}
# kms_user was changed, old property should be removed
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
def test_recommendStormConfigurations(self):
self.maxDiff = None
configurations = {
"storm-site": {
"properties": {
"storm.topology.submission.notifier.plugin.class": "foo"
}
},
"ranger-storm-plugin-properties": {
"properties": {
"ranger-storm-plugin-enabled": "No"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'storm-site': {
'properties': {
'storm.topology.submission.notifier.plugin.class': 'foo,org.apache.atlas.storm.hook.StormAtlasHook',
},
"property_attributes":{
'nimbus.authorizer': {'delete':'true'}
}
},
"ranger-storm-plugin-properties": {
"properties": {
"ranger-storm-plugin-enabled": "No"
}
},
"storm-env": {
"properties": {
"storm.atlas.hook": "true"
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
"StackServices": {
"service_name": "ATLAS",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "ATLAS_SERVER",
"display_name": "Atlas Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"storm-site": {
"properties": {
"storm.topology.submission.notifier.plugin.class": "foo"
},
"property-attributes":{}
},
"ranger-storm-plugin-properties": {
"properties": {
"ranger-storm-plugin-enabled": "No"
}
},
"storm-env": {
"properties": {
"storm.atlas.hook": "false"
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
services["services"] = []
services["configurations"]["storm-site"]["properties"]["storm.topology.submission.notifier.plugin.class"] = "org.apache.atlas.storm.hook.StormAtlasHook"
self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(True, "storm.topology.submission.notifier.plugin.class" in configurations["storm-site"]["property_attributes"])
def test_recommendSqoopConfigurations(self):
self.maxDiff = None
configurations = {
"sqoop-site": {
"properties": {
"sqoop.job.data.publish.class": "foo"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'sqoop-site': {
'properties': {
'sqoop.job.data.publish.class': 'org.apache.atlas.sqoop.hook.SqoopHook',
}
},
'sqoop-env': {
'properties': {
'sqoop.atlas.hook': 'true'
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
"StackServices": {
"service_name": "ATLAS",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "ATLAS_SERVER",
"display_name": "Atlas Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"sqoop-site": {
"properties": {
"sqoop.job.data.publish.class": "foo"
}
},
"sqoop-env": {
"properties": {
"sqoop.atlas.hook": "false"
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendSqoopConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
self.stackAdvisor.recommendSqoopConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_validateRangerConfigurationsEnv(self):
properties = {
"ranger-kafka-plugin-enabled": "Yes",
}
recommendedDefaults = {
"ranger-kafka-plugin-enabled": "No",
}
configurations = {
"cluster-env": {
"properties": {
"security_enabled": "false",
}
}
}
services = {
"services":
[
{
"StackServices": {
"service_name" : "RANGER"
}
}
],
"configurations": {
"cluster-env": {
"properties": {
"security_enabled" : "false"
},
"property_attributes": {}
}
}
}
# Test with ranger plugin enabled, validation fails
res_expected = [{'config-type': 'ranger-env', 'message': 'Ranger Kafka plugin should not be enabled in non-kerberos environment.', 'type': 'configuration', 'config-name': 'ranger-kafka-plugin-enabled', 'level': 'WARN'}]
res = self.stackAdvisor.validateRangerConfigurationsEnv(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
# Test for security_enabled is true
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
configurations['cluster-env']['properties']['security_enabled'] = "true"
res_expected = []
res = self.stackAdvisor.validateRangerConfigurationsEnv(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
|
py
|
1a5853408502cae33a124fb57e3e44e9ee9f6eb8
|
#!/usr/bin/env python
import sys
import os
import platform
import subprocess
def check_for_executable(exe_name, args=['--version']):
try:
cmd = [exe_name]
cmd.extend(args)
subprocess.check_output(cmd)
return True
except Exception:
return False
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--clean',
help='remove build directory before build',
action='store_true',
dest='clean')
parser.add_argument(
'-t', '--tests', help='run tests', action='store_true', dest='run_tests')
parser.add_argument(
'-v', help='verbose', action='store_true', dest='verbose')
parser.add_argument(
'-o', '--output',
help='output dir (relative to source dir)',
default='build',
dest='out_dir')
parser.add_argument(
'-c', '--config',
help='config (Debug or Release)',
default='Debug',
dest='config')
parser.add_argument(
'--sanitizers',
help='Run tests with address and undefined behaviour sanitizer if available',
default=False,
dest='sanitizers')
if platform.system() == "Windows":
parser.add_argument(
'--win32',
help='Build 32-bit libraries',
action='store_true',
dest='win32')
args = parser.parse_args()
args.platform = platform.system()
src_dir = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
if args.clean:
subprocess.check_call('rm -rf {}'.format(args.out_dir).split())
cmake_invocation = ['cmake', '.', '-B{}'.format(args.out_dir)]
if args.platform == 'Windows':
if args.win32:
cmake_invocation.extend(['-G', 'Visual Studio 15 2017'])
else:
cmake_invocation.extend(['-G', 'Visual Studio 15 2017 Win64'])
else:
if check_for_executable('ninja'):
cmake_invocation.extend(['-GNinja'])
cmake_invocation.extend(['-DCMAKE_BUILD_TYPE={}'.format(args.config)])
cmake_invocation.append('-DCMAKE_BUILD_TYPE={}'.format(args.config))
if args.verbose:
cmake_invocation.append('-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON')
if args.sanitizers:
cmake_invocation.append('-DENABLE_SANITIZERS:BOOL=ON')
subprocess.check_call(cmake_invocation, cwd=src_dir)
subprocess.check_call(
'cmake --build ./{}'.format(args.out_dir).split(), cwd=src_dir)
if args.run_tests:
rc = subprocess.call(
'ctest . --output-on-failure -C {}'.format(args.config).split(),
cwd=os.path.join(src_dir, args.out_dir))
if rc != 0:
sys.exit(1)
if __name__ == '__main__':
main()
|
py
|
1a58543a9a1fbf37ea48db6919059a574263d670
|
import os
import textwrap
import warnings
from xml.dom import minidom
from conans.client.tools import msvs_toolset
from conans.errors import ConanException
from conans.util.files import save, load
class MSBuildToolchain(object):
filename = "conantoolchain.props"
def __init__(self, conanfile):
self._conanfile = conanfile
self.preprocessor_definitions = {}
self.configuration = conanfile.settings.build_type
def _name_condition(self, settings):
props = [("Configuration", self.configuration),
# FIXME: This probably requires mapping ARM architectures
("Platform", {'x86': 'Win32',
'x86_64': 'x64'}.get(settings.get_safe("arch")))]
name = "".join("_%s" % v for _, v in props if v is not None)
condition = " And ".join("'$(%s)' == '%s'" % (k, v) for k, v in props if v is not None)
return name.lower(), condition
def write_toolchain_files(self):
# Warning
msg = ("\n*****************************************************************\n"
"******************************************************************\n"
"'write_toolchain_files()' has been deprecated and moved.\n"
"It will be removed in next Conan release.\n"
"Use 'generate()' method instead.\n"
"********************************************************************\n"
"********************************************************************\n")
from conans.client.output import Color, ConanOutput
ConanOutput(self._conanfile.output._stream,
color=self._conanfile.output._color).writeln(msg, front=Color.BRIGHT_RED)
warnings.warn(msg)
self.generate()
def generate(self):
name, condition = self._name_condition(self._conanfile.settings)
config_filename = "conantoolchain{}.props".format(name)
self._write_config_toolchain(config_filename)
self._write_main_toolchain(config_filename, condition)
def _write_config_toolchain(self, config_filename):
def format_macro(k, value):
return '%s="%s"' % (k, value) if value is not None else k
runtime = self._conanfile.settings.get_safe("compiler.runtime")
cppstd = self._conanfile.settings.get_safe("compiler.cppstd")
toolset = msvs_toolset(self._conanfile.settings)
runtime_library = {"MT": "MultiThreaded",
"MTd": "MultiThreadedDebug",
"MD": "MultiThreadedDLL",
"MDd": "MultiThreadedDebugDLL"}.get(runtime, "")
content = textwrap.dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>
{};%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<RuntimeLibrary>{}</RuntimeLibrary>
<LanguageStandard>{}</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<PropertyGroup Label="Configuration">
<PlatformToolset>{}</PlatformToolset>
</PropertyGroup>
</Project>
""")
preprocessor_definitions = ";".join([format_macro(k, v)
for k, v in self.preprocessor_definitions.items()])
# It is useless to set PlatformToolset in the config file, because the conditional checks it
cppstd = "stdcpp%s" % cppstd if cppstd else ""
toolset = toolset or ""
config_props = content.format(preprocessor_definitions, runtime_library, cppstd, toolset)
config_filepath = os.path.abspath(config_filename)
self._conanfile.output.info("MSBuildToolchain created %s" % config_filename)
save(config_filepath, config_props)
def _write_main_toolchain(self, config_filename, condition):
main_toolchain_path = os.path.abspath(self.filename)
if os.path.isfile(main_toolchain_path):
content = load(main_toolchain_path)
else:
content = textwrap.dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="ConanPackageInfo">
<ConanPackageName>{}</ConanPackageName>
<ConanPackageVersion>{}</ConanPackageVersion>
</PropertyGroup>
</Project>
""")
conan_package_name = self._conanfile.name if self._conanfile.name else ""
conan_package_version = self._conanfile.version if self._conanfile.version else ""
content = content.format(conan_package_name, conan_package_version)
dom = minidom.parseString(content)
try:
import_group = dom.getElementsByTagName('ImportGroup')[0]
except Exception:
raise ConanException("Broken {}. Remove the file and try again".format(self.filename))
children = import_group.getElementsByTagName("Import")
for node in children:
if (config_filename == node.getAttribute("Project") and
condition == node.getAttribute("Condition")):
break # the import statement already exists
else: # create a new import statement
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', config_filename)
import_group.appendChild(import_node)
conan_toolchain = dom.toprettyxml()
conan_toolchain = "\n".join(line for line in conan_toolchain.splitlines() if line.strip())
self._conanfile.output.info("MSBuildToolchain writing {}".format(self.filename))
save(main_toolchain_path, conan_toolchain)
|
py
|
1a5854e826edc007f27cd9626e23a66ea84399e5
|
from __future__ import division, absolute_import, print_function
import sys
from numpy.testing import (TestCase, run_module_suite, assert_,
assert_array_equal)
from numpy import random
from numpy.compat import long
import numpy as np
class TestRegression(TestCase):
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits:
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
def test_permutation_longs(self):
np.random.seed(1234)
a = np.random.permutation(12)
np.random.seed(1234)
b = np.random.permutation(long(12))
assert_array_equal(a, b)
def test_randint_range(self):
# Test for ticket #1690
lmax = np.iinfo('l').max
lmin = np.iinfo('l').min
try:
random.randint(lmin, lmax)
except:
raise AssertionError
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
if __name__ == "__main__":
run_module_suite()
|
py
|
1a58552cd2fb0af65afa75eee8b1c9882fae5f29
|
from __future__ import print_function
import ROOT,itertools,math #
from array import array #
from DataFormats.FWLite import Events, Handle
ROOT.FWLiteEnabler.enable()
#
tag='output'
##A class to keep BMTF data
###Common methods############
def fetchStubsOLD(event,ontime=False,isData=True):
phiSeg = Handle ('L1MuDTChambPhContainer')
if not isData:
event.getByLabel('simTwinMuxDigis',phiSeg)
else:
event.getByLabel('bmtfDigis',phiSeg)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg.product().getContainer())
return filtered
else:
return phiSeg.product().getContainer()
def fetchStubs(event,ontime=True):
phiSeg2 = Handle ('std::vector<L1MuKBMTCombinedStub>')
event.getByLabel('simKBmtfStubs',phiSeg2)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg2.product())
return filtered
else:
return phiSeg2.product()
def globalBMTFPhi(muon):
temp=muon.processor()*48+muon.hwPhi()
temp=temp*2*math.pi/576.0-math.pi*15.0/180.0;
if temp>math.pi:
temp=temp-2*math.pi;
K=1.0/muon.hwPt()
if muon.hwSign()>0:
K=-1.0/muon.hwPt()
return temp+5.740*K
def fetchKMTF(event,etaMax,collection):
kbmtfH = Handle ('BXVector<l1t::RegionalMuonCand>')
event.getByLabel(collection,kbmtfH)
kbmtf=kbmtfH.product()
kbmtfMuons={}
for bx in [-3,-2,-1,0,1,2,3]:
kbmtfMuons[bx]=[]
for bx in range(kbmtf.getFirstBX(),kbmtf.getLastBX()+1):
for j in range(0,kbmtf.size(bx)):
mu = kbmtf.at(bx,j)
kbmtfMuons[bx].append(mu)
# kbmtfMuons[bx]=sorted(kbmtfMuons[bx],key=lambda x: x.hwPt(),reverse=True)
return kbmtfMuons
def curvResidual(a,b):
return (a.charge()/a.pt()-b.charge()/b.pt())*b.pt()/b.charge()
def ptResidual(a,b):
return (a.pt()-b.pt())/b.pt()
def curvResidualSTA(a,b):
return (a.charge()/a.ptUnconstrained()-b.charge()/b.pt())*b.pt()/b.charge()
def deltaPhi( p1, p2):
'''Computes delta phi, handling periodic limit conditions.'''
res = p1 - p2
while res > math.pi:
res -= 2*math.pi
while res < -math.pi:
res += 2*math.pi
return res
def deltaR( *args ):
return math.sqrt( deltaR2(*args) )
def deltaR2( e1, p1, e2, p2):
de = e1 - e2
dp = deltaPhi(p1, p2)
return de*de + dp*dp
def log(event,counter,mystubs,kmtf,bmtf):
print("--------EVENT"+str(counter)+"------------")
print('RUN={run} LUMI={lumi} EVENT={event}'.format(run=event.eventAuxiliary().id().run(),lumi=event.eventAuxiliary().id().luminosityBlock(),event=event.eventAuxiliary().id().event()))
print("-----------------------------")
print("-----------------------------")
print('Stubs:')
for stub in mystubs:
print('wheel={w} sector={sc} station={st} high/low={ts} phi={phi} phiB={phiB} qual={qual} BX={BX}'.format(w=stub.whNum(),sc=stub.scNum(),st=stub.stNum(),ts=stub.Ts2Tag(),phi=stub.phi(),phiB=stub.phiB(),qual=stub.code(),BX=stub.bxNum()))
print('EMU:')
for g in bmtf :
print("EMU sector={sector} pt={pt} eta={eta} phi={phi} qual={qual} dxy={dxy} pt2={pt2} hasFineEta={HF}".format(sector=g.processor(), pt=g.hwPt(),eta=g.hwEta(),phi=g.hwPhi(),qual=g.hwQual(),dxy=g.hwDXY(),pt2=g.hwPt2(),HF=g.hwHF()))
print('DATA:')
for g in kmtf :
print("DATA sector={sector} pt={pt} eta={eta} phi={phi} qual={qual} dxy={dxy} pt2={pt2} hasFineEta={HF}".format(sector=g.processor(),pt=g.hwPt(),eta=g.hwEta(),phi=g.hwPhi(),qual=g.hwQual(),dxy=g.hwDXY(),pt2=g.hwPt2(),HF=g.hwHF()))
print("-----------------------------")
print("-----------------------------")
print("c + enter to continue")
import pdb;pdb.set_trace()
###############################
#########Histograms#############
histos={}
histos['fw']={}
histos['fw']['pt1']=ROOT.TH1D("fw_pt1","HW p_{T}",512,0,511)
histos['fw']['eta1']=ROOT.TH1D("fw_eta1","HW #eta",256,-127,128)
histos['fw']['phi1']=ROOT.TH1D("fw_phi1","HW #phi",256,-127,128)
histos['fw']['HF1']=ROOT.TH1D("fw_HF1","HW HF",256,-127,128)
histos['fw']['qual1']=ROOT.TH1D("fw_qual1","HW qual",16,0,16)
histos['fw']['dxy1']=ROOT.TH1D("fw_dxy1","HW DXY",4,0,4)
histos['fw']['ptSTA1']=ROOT.TH1D("fw_ptSTA1","HW STA PT",256,0,255)
histos['fw']['pt2']=ROOT.TH1D("fw_pt2","HW p_{T}",512,0,511)
histos['fw']['eta2']=ROOT.TH1D("fw_eta2","HW #eta",256,-127,128)
histos['fw']['phi2']=ROOT.TH1D("fw_phi2","HW #phi",256,-127,128)
histos['fw']['HF2']=ROOT.TH1D("fw_HF2","HW HF",256,-127,128)
histos['fw']['qual2']=ROOT.TH1D("fw_qual2","HW qual",16,0,16)
histos['fw']['dxy2']=ROOT.TH1D("fw_dxy2","HW DXY",4,0,4)
histos['fw']['ptSTA2']=ROOT.TH1D("fw_ptSTA2","HW STA PT",256,0,255)
histos['fw']['pt3']=ROOT.TH1D("fw_pt3","HW p_{T}",512,0,511)
histos['fw']['eta3']=ROOT.TH1D("fw_eta3","HW #eta",256,-127,128)
histos['fw']['phi3']=ROOT.TH1D("fw_phi3","HW #phi",256,-127,128)
histos['fw']['HF3']=ROOT.TH1D("fw_HF3","HW HF",256,-127,128)
histos['fw']['qual3']=ROOT.TH1D("fw_qual3","HW qual",16,0,16)
histos['fw']['dxy3']=ROOT.TH1D("fw_dxy3","HW DXY",4,0,4)
histos['fw']['ptSTA3']=ROOT.TH1D("fw_ptSTA3","HW STA PT",256,0,255)
histos['emu']={}
histos['emu']['pt1']=ROOT.TH1D("emu_pt1","HW p_{T}",512,0,511)
histos['emu']['eta1']=ROOT.TH1D("emu_eta1","HW #eta",256,-127,128)
histos['emu']['phi1']=ROOT.TH1D("emu_phi1","HW #phi",256,-127,128)
histos['emu']['HF1']=ROOT.TH1D("emu_HF1","HW HF",256,-127,128)
histos['emu']['qual1']=ROOT.TH1D("emu_qual1","HW qual",16,0,16)
histos['emu']['dxy1']=ROOT.TH1D("emu_dxy1","HW DXY",4,0,4)
histos['emu']['ptSTA1']=ROOT.TH1D("emu_ptSTA1","HW STA PT",256,0,255)
histos['emu']['pt2']=ROOT.TH1D("emu_pt2","HW p_{T}",512,0,511)
histos['emu']['eta2']=ROOT.TH1D("emu_eta2","HW #eta",256,-127,128)
histos['emu']['phi2']=ROOT.TH1D("emu_phi2","HW #phi",256,-127,128)
histos['emu']['HF2']=ROOT.TH1D("emu_HF2","HW HF",256,-127,128)
histos['emu']['qual2']=ROOT.TH1D("emu_qual2","HW qual",16,0,16)
histos['emu']['dxy2']=ROOT.TH1D("emu_dxy2","HW DXY",4,0,4)
histos['emu']['ptSTA2']=ROOT.TH1D("emu_ptSTA2","HW STA PT",256,0,255)
histos['emu']['pt3']=ROOT.TH1D("emu_pt3","HW p_{T}",512,0,511)
histos['emu']['eta3']=ROOT.TH1D("emu_eta3","HW #eta",256,-127,128)
histos['emu']['phi3']=ROOT.TH1D("emu_phi3","HW #phi",256,-127,128)
histos['emu']['HF3']=ROOT.TH1D("emu_HF3","HW HF",256,-127,128)
histos['emu']['qual3']=ROOT.TH1D("emu_qual3","HW qual",16,0,16)
histos['emu']['dxy3']=ROOT.TH1D("emu_dxy3","HW DXY",4,0,4)
histos['emu']['ptSTA3']=ROOT.TH1D("emu_ptSTA3","HW STA PT",256,0,255)
for key,histo in histos['fw'].iteritems():
histo.Sumw2()
def fill(info,mu):
if len(mu)>0:
info['pt1'].Fill(mu[0].hwPt())
info['eta1'].Fill(mu[0].hwEta())
info['phi1'].Fill(mu[0].hwPhi())
info['HF1'].Fill(mu[0].hwHF())
info['qual1'].Fill(mu[0].hwQual())
info['dxy1'].Fill(mu[0].hwDXY())
info['ptSTA1'].Fill(mu[0].hwPt2())
else:
info['pt1'].Fill(0)
info['eta1'].Fill(0)
info['phi1'].Fill(0)
info['HF1'].Fill(0)
info['qual1'].Fill(0)
info['dxy1'].Fill(0)
info['ptSTA1'].Fill(0)
if len(mu)>1:
info['pt2'].Fill(mu[1].hwPt())
info['eta2'].Fill(mu[1].hwEta())
info['phi2'].Fill(mu[1].hwPhi())
info['HF2'].Fill(mu[1].hwHF())
info['qual2'].Fill(mu[1].hwQual())
info['dxy2'].Fill(mu[1].hwDXY())
info['ptSTA2'].Fill(mu[1].hwPt2())
else:
info['pt2'].Fill(0)
info['eta2'].Fill(0)
info['phi2'].Fill(0)
info['HF2'].Fill(0)
info['qual2'].Fill(0)
info['dxy2'].Fill(0)
info['ptSTA2'].Fill(0)
if len(mu)>2:
info['pt3'].Fill(mu[2].hwPt())
info['eta3'].Fill(mu[2].hwEta())
info['phi3'].Fill(mu[2].hwPhi())
info['HF3'].Fill(mu[2].hwHF())
info['qual3'].Fill(mu[2].hwQual())
info['dxy3'].Fill(mu[2].hwDXY())
info['ptSTA3'].Fill(mu[2].hwPt2())
else:
info['pt3'].Fill(0)
info['eta3'].Fill(0)
info['phi3'].Fill(0)
info['HF3'].Fill(0)
info['qual3'].Fill(0)
info['dxy3'].Fill(0)
info['ptSTA3'].Fill(0)
##############################
BUNCHES=[0]
events=Events([tag+'.root'])
counter=-1
for event in events:
counter=counter+1
#fetch stubs
stubs=fetchStubsOLD(event,True)
unpacker=fetchKMTF(event,100.0,'bmtfDigis:kBMTF')
emulator=fetchKMTF(event,100.0,'simKBmtfDigis:BMTF')
for processor in range(0,12):
for bx in BUNCHES:
emu=filter(lambda x: x.processor()==processor,emulator[bx])
data=filter(lambda x: x.processor()==processor,unpacker[bx])
if (len(emu)+len(data))>0:
fill(histos['emu'],emu)
fill(histos['fw'],data)
# if len(emu)!=0 and len(data)==0:
# log(event,counter,stubs,data,emu)
# import pdb;pdb.set_trace()
f=ROOT.TFile("validationResults.root","RECREATE")
for key,histo in histos['fw'].iteritems():
histo.SetMarkerStyle(7)
histo.Write()
for key,histo in histos['emu'].iteritems():
histo.SetLineColor(ROOT.kRed)
histo.Write()
#make fancy plots
histonames=['pt1','eta1','phi1','HF1','qual1','dxy1','ptSTA1']
for h in histonames:
c=ROOT.TCanvas(h)
c.cd()
histos['emu'][h].Draw("HIST")
histos['emu'][h].GetXaxis().SetTitle(histos['emu'][h].GetTitle())
histos['emu'][h].GetYaxis().SetTitle("events")
histos['fw'][h].Draw("SAME")
c.SetLogy()
l=ROOT.TLegend(0.6,0.6,0.9,0.8)
l.AddEntry(histos['emu'][h],"emulator","l")
l.AddEntry(histos['fw'][h],"data","p")
l.Draw()
c.Write("plot_"+h)
f.Close()
|
py
|
1a58553bc8c90ede23a9b31c964f162193100fb1
|
#!/usr/bin/env python3
#
# Copyright (c) 2013-2019, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# // Author: Filippov Ilia
import common
import sys
import os
from pkg_resources import parse_version
print_debug = common.print_debug
error = common.error
take_lines = common.take_lines
exists = [False, False, False, False, False, False, False, False, False]
names = ["m4", "bison", "flex", "sde", "ispc", "clang", "gcc", "icc", "cmake"]
PATH_dir = os.environ["PATH"].split(os.pathsep)
for counter in PATH_dir:
for i in range(0,len(exists)):
if os.path.exists(counter + os.sep + names[i]):
exists[i] = True
print_debug("=== in PATH: ===\n", False, "")
print_debug("Tools:\n", False, "")
for i in range(0,3):
if exists[i]:
print_debug(take_lines(names[i] + " --version", "first"), False, "")
else:
error("you don't have " + names[i], 0)
if exists[0] and exists[1] and exists[2]:
if common.check_tools(2):
print_debug("Tools' versions are ok\n", False, "")
print_debug("\nSDE:\n", False, "")
if exists[3]:
print_debug(take_lines(names[3] + " --version", "first"), False, "")
else:
error("you don't have " + names[3], 2)
print_debug("\nISPC:\n", False, "")
if exists[4]:
print_debug(take_lines(names[4] + " --version", "first"), False, "")
else:
error("you don't have " + names[4], 2)
print_debug("\nC/C++ compilers:\n", False, "")
for i in range(5,8):
if exists[i]:
print_debug(take_lines(names[i] + " --version", "first"), False, "")
else:
error("you don't have " + names[i], 2)
print_debug("\nCMake:\n", False, "")
if exists[8]:
cmake_version = take_lines(names[8] + " --version", "first")[3]
if (parse_version(cmake_version) >= parse_version("3.8.0")):
print_debug(take_lines(names[8] + " --version", "first"), False, "")
else:
error("CMake version is older than needed. Please install version 3.8 or newer", 2)
else:
error("you don't have " + names[8], 2)
print_debug("\n=== in ISPC specific environment variables: ===\n", False, "")
if os.environ.get("LLVM_HOME") == None:
error("you have no LLVM_HOME", 2)
else:
print_debug("Your LLVM_HOME:" + os.environ.get("LLVM_HOME") + "\n", False, "")
if os.environ.get("ISPC_HOME") == None:
error("you have no ISPC_HOME", 2)
else:
print_debug("Your ISPC_HOME:" + os.environ.get("ISPC_HOME") + "\n", False, "")
if os.path.exists(os.environ.get("ISPC_HOME") + os.sep + "ispc"):
print_debug("You have ISPC in your ISPC_HOME: " +
take_lines(os.environ.get("ISPC_HOME") + os.sep + "ispc" + " --version", "first"), False, "")
else:
error("you don't have ISPC in your ISPC_HOME", 2)
if os.environ.get("SDE_HOME") == None:
error("You have no SDE_HOME", 2)
else:
print_debug("Your SDE_HOME:" + os.environ.get("SDE_HOME") + "\n", False, "")
if os.path.exists(os.environ.get("SDE_HOME") + os.sep + "sde"):
print_debug("You have sde in your SDE_HOME: " +
take_lines(os.environ.get("SDE_HOME") + os.sep + "sde" + " --version", "first"), False, "")
else:
error("you don't have any SDE in your ISPC_HOME", 2)
|
py
|
1a585614268fea23f9a862d6067a68b05003475b
|
"""
Apicurio Registry API [v2]
Apicurio Registry is a datastore for standard event schemas and API designs. Apicurio Registry enables developers to manage and share the structure of their data using a REST interface. For example, client applications can dynamically push or pull the latest updates to or from the registry without needing to redeploy. Apicurio Registry also enables developers to create rules that govern how registry content can evolve over time. For example, this includes rules for content validation and version compatibility. The Apicurio Registry REST API enables client applications to manage the artifacts in the registry. This API provides create, read, update, and delete operations for schema and API artifacts, rules, versions, and metadata. The supported artifact types include: - Apache Avro schema - AsyncAPI specification - Google protocol buffers - GraphQL schema - JSON Schema - Kafka Connect schema - OpenAPI specification - Web Services Description Language - XML Schema Definition **Important**: The Apicurio Registry REST API is available from `https://MY-REGISTRY-URL/apis/registry/v2` by default. Therefore you must prefix all API operation paths with `../apis/registry/v2` in this case. For example: `../apis/registry/v2/ids/globalIds/{globalId}`. # noqa: E501
The version of the OpenAPI document: 2.2.0.Final
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import registryclient
from registryclient.model.role_type import RoleType
class TestRoleType(unittest.TestCase):
"""RoleType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRoleType(self):
"""Test RoleType"""
# FIXME: construct object with mandatory attributes with example values
# model = RoleType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py
|
1a5856f2d314af228f9d9d433ca19b72aac16da6
|
#####################################################################
#
# Predictive Failure Analysis (PFA)
# Graph JES2 Resource Data for Jobs
#
#This python script is for use with data that is collected, created,
#and written by the PFA_JES2_RESOURCE_EXHAUSTION check only. Its
#use with data from any other source will result in errors.
#
#Copyright 2021 IBM Corp.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
#either express or implied. See the License for the specific
#language governing permissions and limitations under the License.
#####################################################################
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import platform
import os
#Make sure we have plenty of potential data points to plot.
plt.rcParams['agg.path.chunksize']=10000
#Disable false positive warning
pd.options.mode.chained_assignment = None # default='warn'
#Which system are we running on?
system = platform.system()
keys = {"JQE":"Q","SPOOL":"S","BERT":"B","JOE":"J"}
user_keys = ["JQE","SPOOL","BERT","JOE"]
asid_header_data = ["Key","JobName","TaskId","Start_Time","STCK_Time","Current_Usage","Date_Time"]
capacity_header_data = ["Resource","Capacity"]
data_types_dict={'Key':str,'JobName':str,'TaskId':str,'Start_Time':str,'STCK_Time':int,'Current_Usage':int,'Date_Time':str}
capacity_types_dict={"Resource":str,"Capacity":int}
check_name = "PFA_JES2_Resource_Exhaustion"
COLUMN_CHAR_LEN = 8
#Parse our command line arguments.
if(len(sys.argv) == 5):
data_filepath = sys.argv[1]
capacity_filepath = sys.argv[2]
jobName = sys.argv[3]
jobName = jobName.upper()
key = sys.argv[4]
key = key.upper()
verbose = False
elif(len(sys.argv) == 6 and (sys.argv[5] == '-v' or sys.argv[5] == '-verbose')):
data_filepath = sys.argv[1]
capacity_filepath = sys.argv[2]
jobName = sys.argv[3]
jobName = jobName.upper()
key = sys.argv[4]
key = key.upper()
verbose = True
elif(len(sys.argv) == 2 and (sys.argv[1] == '-h' or sys.argv[1] == '-help')):
print("The proper syntax for this script is the following:\n")
print("'python Graph_JRE_Job.py data_file capacity_file job_name jes2_resource'.\n")
print("Valid JES2 Resources are: " + str([key for key in user_keys]) + "\n")
print("The file path value is case sensitive, but the JES2 resource and job_name values are not.\n")
print("For example, if this script and the required files are in the same directory, and you want to graph the JES2 Spool data for Job3, you would type the following:\n")
print("'python Graph_JRE_Job.py SY1.5day.All.data Capacity.data Job3 SPOOL'\n")
print("You can also add -v to the end of the command for verbose mode. This option will print additional data ")
print("that could help debug errors or verify the results. An example using verbose mode looks like the following:\n")
print("'python Graph_JRE_Job.py SY1.5day.All.data Capacity.data Job3 BERT -v'\n")
print("When this script is executed on z/OS, it saves the graph in a .pdf file that can be downloaded from the directory where this script was executed and displayed anywhere that supports displaying a .pdf file.")
print("The file name is in the format of jobName_JESResource_graph.pdf.")
print("For example, if you entered 'python Graph_JRE_Job.py SY1.5day.All.data Capacity.data Job3 SPOOL' on z/OS the saved file would be:")
print("JOB3_SPOOL_graph.pdf and it would be located in the current working directory.")
sys.exit()
else:
raise Exception("The supplied arguments are not correct. Specify the data_file_path, capacity_filepath, job_name, and JES2 resource in that order. For help enter 'python Graph_JRE_Job.py -h'")
#Make sure we have proper input from the user.
if(not os.path.exists(data_filepath)):
raise Exception("The specified file or filepath for the data file does not exist. Verify the file and filepath then try again.")
if(not os.path.exists(capacity_filepath)):
raise Exception("The specified file or filepath for the capacity file does not exist. Verify the file and filepath then try again.")
if key not in user_keys:
raise Exception("The specified resource does not exist. Specify a resource that exists.")
#Load up our data and assign correct header values so we can narrow it down to the pieces we want.
data_file = pd.read_csv(data_filepath,
sep="/|,",
names=asid_header_data,
header=None,
engine="python",
converters=data_types_dict)
capacity_file = pd.read_csv(capacity_filepath,
sep="/|,",
names=capacity_header_data,
header=None,
engine="python",
converters=capacity_types_dict)
#We need to make sure our jobName is left justified and the proper length.
#Otherwise we will not be able to find the correct data to graph.
if(len(jobName) < COLUMN_CHAR_LEN):
jobName = jobName.ljust(COLUMN_CHAR_LEN)
#Make sure we have proper input from the user.
if jobName not in data_file.values:
raise Exception("The specified job name does not exist. Verify the job name and try again.")
user_key = key
key = keys[user_key]
user_key = user_key.ljust(COLUMN_CHAR_LEN)
data_file['Capacity'] = np.nan
NUM_TO_PRINT = 10
PDF_FILENAME = jobName.strip()+'_'+user_key.strip()+"_graph.pdf" #This is the name of the .pdf file that gets saved when this script is ran on z/OS
def process_data(data_file, capacity_file):
the_capacity = capacity_file.loc[capacity_file['Resource'] == user_key,'Capacity'].values[0]
the_data = data_file.loc[(data_file['Key'] == key) & (data_file['JobName'] == jobName)]
the_data['Capacity'].fillna(the_capacity, inplace=True)
the_data['Capacity'] = the_data['Capacity'].astype(int)
the_data.loc[:,('Date_Time')] = pd.to_datetime(the_data['Date_Time'].astype(str), format='%Y%m%d%H%M%S')
the_data = get_latest_time(the_data)
if(verbose):
print_details(the_data,NUM_TO_PRINT)
return the_data
def graph_data(the_data):
y_values = [0,(the_data['Capacity'].max())*.25,(the_data['Capacity'].max())*.50,(the_data['Capacity'].max())*.75,(the_data['Capacity'].max())]
y_ticks = [str(int(y)) + user_key for y in y_values]
fig, ax = plt.subplots()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
ax.plot(the_data['Date_Time'],the_data['Capacity'],'--r', label='Capacity')
ax.plot(the_data['Date_Time'],the_data['Current_Usage']/1024,'-b', label='Current Usage')
plt.xlabel('Month-Day Time')
fig.suptitle(check_name + "\n" + jobName + '/' + user_key, fontsize=16)
fig.autofmt_xdate()
plt.yticks(y_values, y_ticks)
ax.set_ylim(0,the_data['Capacity'].max()*1.10)
ax.legend(bbox_to_anchor=(1.41, 1),loc="upper right")
fig.subplots_adjust(right=0.75)
if system != 'z/OS':
plt.show();
else:
fig.savefig(PDF_FILENAME)
def print_details(data_frame,num_to_print):
print("Now graphing " + check_name + " data on a " + system + " system.")
print("The job_name is: " + jobName)
print("The JES2 resource is: " + user_key)
print("The data_filepath entered: " + data_filepath)
print("The capacity_filepath entered was: " + capacity_filepath)
print("\nPreview of the data being graphed:")
print(data_frame.head(num_to_print).to_string(index=False))
def get_latest_time(our_data):
#Need to verify that we are using the latest start time if multiple exist for the same ASID.
list_data = our_data['Start_Time'].to_dict()
#Here we make sure we get the latest start time.
times_dict = {}
for i in list_data:
if list_data[i] in times_dict:
times_dict[list_data[i]] += 1
else:
times_dict[list_data[i]] = 1
if(len(times_dict) > 1):
latest_time = max(times_dict.keys())
our_data = our_data.loc[(our_data['Start_Time'] == latest_time)]
return our_data
#Process and graph our data.
the_data = process_data(data_file, capacity_file)
jobName = jobName.strip()
user_key = user_key.strip()
graph_data(the_data)
if system == 'z/OS':
print(PDF_FILENAME + ' has been created and is ready to be downloaded and viewed.')
|
py
|
1a5857017504d9294bf802c6d91a86a85cc2a1d5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : __init__.py.py
# @Author: [email protected]
# @Date : 2019-03-21
# @Desc :
|
py
|
1a585725b051d5d2e5446b10ff41adf7b1361431
|
#!/usr/bin/env python
import os
import sys
from io import BytesIO, IOBase
import math
from collections import Counter
import bisect
def lcm(a, b):
return a * b // math.gcd(a, b)
def func(n, m, k, first, second):
least = lcm(n, m)
max_same = len(set(first).intersection(set(second)))
day = day_diff = same = 0
same_day = []
last_diff = 0
while day_diff < k:
if first[day % n] != second[day % m]:
day_diff += 1
last_diff = day
else:
same += 1
same_day.append(day)
day += 1
if day == least or same == max_same:
if day == least:
diff_one = day_diff
else:
diff_one = least - max_same
day = least * (k // diff_one)
day_diff = diff_one * (k // diff_one)
break
if not same_day:
return k
if day_diff == k:
if same_day[-1] != least - 1:
return day
return day - (least - last_diff - 1)
same_day.append(least)
for i, s in enumerate(same_day):
if s - i >= k - day_diff
return day + same_day[i - 1] + k - (day_diff + same_day[i - 1] - i)
def main():
n, m, k = [int(i) for i in parse_input().split()]
first = [int(i) for i in parse_input().split()]
second = [int(i) for i in parse_input().split()]
# n, m, k = 500000, 500000 - 1, 10 ** 12
# first = list(range(1, n + 1))
# second = list(range(1, m + 1))
print(func(n, m, k, first, second))
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
parse_input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main()
|
py
|
1a5857cec517fd483e3ad5f2ef318528243d0d83
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: olmos.version
.. moduleauthor:: NarekA <my_email>
This module contains project version information.
"""
__version__ = '0.0.1' #: the working version
__release__ = '0.0.1' #: the release version
|
py
|
1a5858352bfb707740831412c56596112f6f8c02
|
from glfw import *
from OpenGL.GL import *
import numpy as np
from ctypes import *
from learnopengl import *
from PIL import Image
import glm
def resize(window, width, height):
glViewport(0, 0, width, height)
def main():
# Initialize the library
if not init():
return
# Create a windowed mode window and its OpenGL context
window_hint(CONTEXT_VERSION_MAJOR, 3)
window_hint(CONTEXT_VERSION_MINOR, 3)
window_hint(OPENGL_PROFILE, OPENGL_CORE_PROFILE)
screen_width, screen_height = 800, 600
window = create_window(screen_width, screen_height, "LearnOpenGL", None, None)
if not window:
terminate()
make_context_current(window)
set_framebuffer_size_callback(window, resize)
glViewport(0, 0, 800, 600)
# Make the window's context current
make_context_current(window)
# shaders
shader = Shader('vertex.glsl', 'fragment.glsl')
vertices = np.array([
-0.5, -0.5, -0.5, 0.0, 0.0,
0.5, -0.5, -0.5, 1.0, 0.0,
0.5, 0.5, -0.5, 1.0, 1.0,
0.5, 0.5, -0.5, 1.0, 1.0,
-0.5, 0.5, -0.5, 0.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 0.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
0.5, -0.5, 0.5, 1.0, 0.0,
0.5, 0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 0.5, 1.0, 1.0,
-0.5, 0.5, 0.5, 0.0, 1.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
-0.5, 0.5, 0.5, 1.0, 0.0,
-0.5, 0.5, -0.5, 1.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
-0.5, 0.5, 0.5, 1.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0,
0.5, 0.5, -0.5, 1.0, 1.0,
0.5, -0.5, -0.5, 0.0, 1.0,
0.5, -0.5, -0.5, 0.0, 1.0,
0.5, -0.5, 0.5, 0.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
0.5, -0.5, -0.5, 1.0, 1.0,
0.5, -0.5, 0.5, 1.0, 0.0,
0.5, -0.5, 0.5, 1.0, 0.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
-0.5, 0.5, -0.5, 0.0, 1.0,
0.5, 0.5, -0.5, 1.0, 1.0,
0.5, 0.5, 0.5, 1.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0,
-0.5, 0.5, 0.5, 0.0, 0.0,
-0.5, 0.5, -0.5, 0.0, 1.0
], dtype=np.float32)
indices = np.array([
0, 1, 3, # first triangle
1, 2, 3 # second triangle
], dtype=np.uint32)
image1 = Image.open('container.jpg')
image2 = Image.open('awesomeface.png')
# generate buffers
VAO = glGenVertexArrays(1)
VBO = glGenBuffers(1)
EBO = glGenBuffers(1)
texture1 = glGenTextures(1)
texture2 = glGenTextures(1)
# vertex array buffer
glBindVertexArray(VAO)
# vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
# element buffer
#glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
#glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
# texture1
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, texture1)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image1.width, image1.height, 0, GL_RGB, GL_UNSIGNED_BYTE, np.array(image1))
glGenerateMipmap(GL_TEXTURE_2D)
# texture1 warp
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# texture1 filter
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
# texture2
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, texture2)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image2.width, image2.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, np.flipud(np.array(image2)))
glGenerateMipmap(GL_TEXTURE_2D)
# texture2 warp
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# texture2 filter
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
# position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(c_float), c_void_p(0))
glEnableVertexAttribArray(0)
# texture
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(c_float), c_void_p(3 * sizeof(c_float)))
glEnableVertexAttribArray(2)
# unbind buffer and vertex array objects
glBindVertexArray(0)
shader.use()
shader.set_int("texture2", 1)
# model
# model = glm.mat4(1.0)
# model = glm.rotate(model, glm.radians(-55.), glm.vec3(1.0, 0, 0))
# view
view = glm.mat4(1.0)
view = glm.translate(view, glm.vec3(0, 0, -3.))
# projection
projection = glm.perspective(glm.radians(45.), screen_width/float(screen_height), 0.1, 100.)
# cube translations
np.random.seed(13)
positions = np.random.rand(10, 3) * 2 - 1
#print(positions)
# Loop until the user closes the window
while not window_should_close(window):
glEnable(GL_DEPTH_TEST)
# Render here, e.g. using pyOpenGL
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# bind textures
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, texture1)
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, texture2)
glBindVertexArray(VAO)
shader.set_mat4('view', view)
shader.set_mat4('projection', projection)
for i in range(positions.shape[0]):
x, y, z = positions[i]
# set transformations
model = glm.mat4(1.0)
model = glm.translate(model, glm.vec3(x, y, z))
model = glm.rotate(model, (i % 3) * get_time() * glm.radians(i * 20.), glm.vec3(1., 0.3, 0.5))
model = glm.scale(model, glm.vec3(0.3, 0.3, 0.3))
# update transformations
shader.set_mat4('model', model)
#glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, c_void_p(0))
glDrawArrays(GL_TRIANGLES, 0, 36)
# Swap front and back buffers
swap_buffers(window)
# Poll for and process events
poll_events()
glDeleteVertexArrays(1, VAO)
glDeleteBuffers(1, VBO)
glDeleteBuffers(1, EBO)
terminate()
if __name__ == "__main__":
main()
|
py
|
1a585841bedaf4200386632766746d402799232f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2021 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
# type: ignore
"""\
Command line script to generate (Micro) QR codes with Segno.
"QR Code" and "Micro QR Code" are registered trademarks of DENSO WAVE INCORPORATED.
"""
from __future__ import absolute_import, unicode_literals
import os
import sys
import argparse
import segno
from segno import writers
# file extension to supported keywords mapping
_EXT_TO_KW_MAPPING = {}
def _get_args(func):
# Python 2 vs Python 3
func_code = getattr(func, 'func_code', None) or func.__code__
defaults = getattr(func, 'func_defaults', None) or func.__defaults__
args = func_code.co_varnames[:func_code.co_argcount]
return args[-len(defaults):]
for ext, func in writers._VALID_SERIALIZERS.items():
kws = set(_get_args(func))
try:
kws.update(_get_args(func.__wrapped__))
except AttributeError:
pass
_EXT_TO_KW_MAPPING[ext] = frozenset(kws)
del writers
def make_parser():
"""\
Returns the command line parser.
"""
def _convert_scale(val):
val = float(val)
return val if val != int(val) else int(val)
parser = argparse.ArgumentParser(prog='segno',
description='Segno QR Code and Micro QR Code generator version {0}'
.format(segno.__version__))
parser.add_argument('--version', '-v', help='(Micro) QR Code version: 1 .. 40 or "M1", "M2", "M3", "M4"',
required=False,)
parser.add_argument('--error', '-e', help='Error correction level: "L": 7%% (default), "M": 15%%, "Q": 25%%, '
'"H": 30%%, "-": no error correction (used for M1 symbols)',
choices=('L', 'M', 'Q', 'H', '-'),
default=None,
type=lambda x: x.upper())
parser.add_argument('--mode', '-m', help='Mode. If unspecified (default), an optimal mode is chosen for the given '
'input.',
choices=('numeric', 'alphanumeric', 'byte', 'kanji', 'hanzi'),
default=None,
type=lambda x: x.lower())
parser.add_argument('--encoding', help='Sets the encoding of the input. '
'If not set (default), a minimal encoding is chosen.',
default=None)
parser.add_argument('--micro', help='Allow the creation of Micro QR Codes',
dest='micro', action='store_true')
parser.add_argument('--no-micro', help='Disallow creation of Micro QR Codes (default)',
dest='micro', action='store_false')
parser.add_argument('--pattern', '-p', help='Mask pattern to use. '
'If unspecified (default), an optimal mask pattern is used. '
'Valid values for QR Codes: 0 .. 7. '
'Valid values for Micro QR Codes: 0 .. 3',
required=False,
default=None,
type=int)
parser.add_argument('--no-error-boost', help='Disables the automatic error correction level incrementation. '
'By default, the maximal error correction level is used '
'(without changing the version).',
dest='boost_error', action='store_false')
parser.add_argument('--seq', help='Creates a sequence of QR Codes (Structured Append mode). '
'Version or symbol count must be provided',
dest='seq', action='store_true')
parser.add_argument('--symbol-count', '-sc', help='Number of symbols to create',
default=None,
type=int)
parser.add_argument('--border', '-b', help='Size of the border / quiet zone of the output. '
'By default, the standard border (4 modules for QR Codes, '
'2 modules for Micro QR Codes) will be used. '
'A value of 0 omits the border',
default=None,
type=int)
parser.add_argument('--scale', '-s', help='Scaling factor. By default, a scaling factor of 1 is used. '
'That may lead into too small images. '
'Some output formats, i.e. SVG, accept a decimal value.',
default=1,
type=_convert_scale)
parser.add_argument('--output', '-o', help='Output file. If not specified, the QR Code is printed to the terminal',
required=False)
color_group = parser.add_argument_group('Module Colors', 'Arguments to specify the module colors. '
'Multiple colors are supported for SVG and PNG. '
'The module color support varies between the '
'serialization formats. '
'Most serializers support at least "--dark" and "--light". ' # noqa: E501
'Unsupported arguments are ignored.')
color_group.add_argument('--dark', help='Color of the dark modules. '
'The color may be specified as web color name, i.e. "red" or '
'as hexadecimal value, i.e. "#0033cc". '
'Some serializers, i.e. SVG and PNG, support alpha channels '
'(8-digit hexadecimal value) and some support "transparent" / "trans" as '
'color value for alpha transparency. '
'The standard color is black.')
color_group.add_argument('--light', help='Color of the light modules. '
'See "dark" for a description of possible values. '
'The standard light color is white.')
color_group.add_argument('--finder-dark', help='Sets the color of the dark finder modules')
color_group.add_argument('--finder-light', help='Sets the color of the light finder modules')
color_group.add_argument('--separator', help='Sets the color of the separator modules')
color_group.add_argument('--data-dark', help='Sets the color of the dark data modules')
color_group.add_argument('--data-light', help='Sets the color of the light data modules')
color_group.add_argument('--timing-dark', help='Sets the color of the dark timing modules')
color_group.add_argument('--timing-light', help='Sets the color of the light timing modules')
color_group.add_argument('--align-dark', help='Sets the color of the dark alignment modules',
dest='alignment_dark', )
color_group.add_argument('--align-light', help='Sets the color of the light alignment modules',
dest='alignment_light', )
color_group.add_argument('--quiet-zone', help='Sets the color of the quiet zone (border)')
color_group.add_argument('--dark-module', help='Sets the color of the dark module')
color_group.add_argument('--format-dark', help='Sets the color of the dark format information modules')
color_group.add_argument('--format-light', help='Sets the color of the light format information modules')
color_group.add_argument('--version-dark', help='Sets the color of the dark version information modules')
color_group.add_argument('--version-light', help='Sets the color of the light version information modules')
# SVG
svg_group = parser.add_argument_group('SVG', 'SVG specific options')
svg_group.add_argument('--no-classes', help='Omits the (default) SVG classes',
action='store_true')
svg_group.add_argument('--no-xmldecl', help='Omits the XML declaration header',
dest='xmldecl',
action='store_false')
svg_group.add_argument('--no-namespace', help='Indicates that the SVG document should have no SVG namespace '
'declaration',
dest='svgns',
action='store_false')
svg_group.add_argument('--no-newline', help='Indicates that the SVG document should have no trailing newline',
dest='nl',
action='store_false')
svg_group.add_argument('--title', help='Specifies the title of the SVG document')
svg_group.add_argument('--desc', help='Specifies the description of the SVG document')
svg_group.add_argument('--svgid', help='Indicates the ID of the <svg/> element')
svg_group.add_argument('--svgclass', help='Indicates the CSS class of the <svg/> element. '
'An empty string omits the attribute.')
svg_group.add_argument('--lineclass', help='Indicates the CSS class of the <path/> elements. '
'An empty string omits the attribute.')
svg_group.add_argument('--no-size', help='Indicates that the SVG document should not have "width" and "height" '
'attributes',
dest='omitsize',
action='store_true')
svg_group.add_argument('--unit', help='Indicates SVG coordinate system unit')
svg_group.add_argument('--svgversion', help='Indicates the SVG version',
type=float)
svg_group.add_argument('--svgencoding', help='Specifies the encoding of the document',
default='utf-8')
svg_group.add_argument('--draw-transparent', help='Indicates that transparent paths should be drawn',
action='store_true')
# PNG
png_group = parser.add_argument_group('PNG', 'PNG specific options')
png_group.add_argument('--dpi', help='Sets the DPI value of the PNG file',
type=int)
# Show Segno's version --version and -v are taken by QR Code version
parser.add_mutually_exclusive_group().add_argument('--ver', '-V', help="Shows Segno's version",
action='version',
version='Segno {0}'.format(segno.__version__))
parser.add_argument('content', nargs='+', help='The content to encode')
return parser
def parse(args):
"""\
Parses the arguments and returns the result.
"""
parser = make_parser()
if not len(args):
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args(args)
if parsed_args.error == '-':
parsed_args.error = None
# 'micro' is False by default. If version is set to a Micro QR Code version,
# encoder.encode raises a VersionError.
# Small problem: --version=M4 --no-micro is allowed
version = parsed_args.version
if version is not None:
version = str(version).upper()
if not parsed_args.micro and version in ('M1', 'M2', 'M3', 'M4'):
parsed_args.micro = None
return _AttrDict(vars(parsed_args))
def build_config(config, filename=None):
"""\
Builds a configuration and returns it.
The config contains only keywords which are supported by the serializer.
Unsupported values are removed.
:param dict config: The configuration / dict returned by the :py:func:`parse` function.
:param filename: Optional filename. If not ``None`` (default), the `filename`
must provide a supported extension to identify the serializer.
:return: A (maybe) modified configuration.
"""
# Done here since it seems not to be possible to detect if an argument
# was supplied by the user or if it's the default argument.
# If using type=lambda v: None if v in ('transparent', 'trans') else v
# we cannot detect if "None" comes from "transparent" or the default value
for clr in ('dark', 'light', 'finder_dark', 'finder_light',
'format_dark', 'format_light', 'alignment_dark', 'alignment_light',
'timing_dark', 'timing_light', 'data_dark', 'data_light',
'version_dark', 'version_light',
'quiet_zone', 'dark_module', 'separator'):
val = config.pop(clr, None)
if val in ('transparent', 'trans'):
config[clr] = None
elif val:
config[clr] = val
# SVG
for name in ('svgid', 'svgclass', 'lineclass'):
if config.get(name, None) is None:
config.pop(name, None)
if config.pop('no_classes', False):
config['svgclass'] = None
config['lineclass'] = None
# encoding is used to provide the encoding to *create* a QR code
config['encoding'] = config.pop('svgencoding', 'utf-8')
if filename is not None:
ext = filename[filename.rfind('.') + 1:].lower()
if ext == 'svgz': # There is no svgz serializer, use same config as svg
ext = 'svg'
supported_args = _EXT_TO_KW_MAPPING.get(ext, ())
# Drop unsupported arguments from config rather than getting a
# "unsupported keyword" exception
config = {k: config[k] for k in config if k in supported_args}
return config
def make_code(config):
"""\
Creates the (Micro) QR Code (Sequence).
Configuration parameters used for creating the Micro QR Code, QR Code
or QR Code Sequence are removed from the configuration.
:param config: Configuration, see :py:func:`build_config`
:return: :py:class:`segno.QRCode` or :py:class:`segno.QRCodeSequence`.
"""
make = segno.make
kw = dict(mode=config.pop('mode'), error=config.pop('error'),
version=config.pop('version'), mask=config.pop('pattern'),
encoding=config.pop('encoding'),
boost_error=config.pop('boost_error'))
if config.pop('seq'):
make = segno.make_sequence
kw['symbol_count'] = config.pop('symbol_count')
else:
kw['micro'] = config.pop('micro')
return make(' '.join(config.pop('content')), **kw)
def main(args=sys.argv[1:]):
config = parse(args)
try:
qr = make_code(config)
except ValueError as ex:
sys.stderr.writelines([str(ex), os.linesep])
return sys.exit(1)
output = config.pop('output')
if output is None:
qr.terminal(border=config['border'])
else:
qr.save(output, **build_config(config, filename=output))
return 0
class _AttrDict(dict):
"""\
Internal helper class.
"""
def __init__(self, *args, **kwargs):
super(_AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
if __name__ == '__main__':
main()
|
py
|
1a58588be8faab5c2fe18f7f1bf776abf1b03771
|
from ovos_utils.ssml import SSMLBuilder
|
py
|
1a5859799b632acdb22d35bead93fb83881d9c90
|
"""Testing NgramScore fitness function"""
from lantern.fitness import NgramScorer
def test_ngram_score():
"""Testing NgramScore"""
scorer = NgramScorer({'a': 1, 'b': 1})
assert round(scorer("abb")) == -7
|
py
|
1a58599ff6653fc1167c1eb0a580541f528aded1
|
# Inspired from OpenAI Baselines. This uses the same design of having an easily
# substitutable generic policy that can be trained. This allows to easily
# substitute in the I2A policy as opposed to the basic CNN one.
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import numpy as np
import tensorflow as tf
from common.multiprocessing_env import SubprocVecEnv
import gym
import gym_minigrid
from tqdm import tqdm
import argparse
from i2a import I2aPolicy
from a2c import CnnPolicy, get_actor_critic
#N_ENVS = 16
N_STEPS = 5
N_ENVS = 8
#N_STEPS = 1
# Total number of iterations (taking into account number of environments and
# number of steps). You wish to train for.
TOTAL_TIMESTEPS=int(1e6)
GAMMA=0.99
LOG_INTERVAL=100
SAVE_INTERVAL = 1e5
# Where you want to save the weights
SAVE_PATH = 'weights'
# This can be anything from "regular" "avoid" "hunt" "ambush" "rush" each
# resulting in a different reward function giving the agent different behavior.
REWARD_MODE = 'regular'
def discount_with_dones(rewards, dones, GAMMA):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + GAMMA*r*(1.-done)
discounted.append(r)
return discounted[::-1]
def train(policy, save_name, load_count = 0, summarize=True, load_path=None, log_path = './logs'):
#Minigrid maze env
env_name = "MiniGrid-BlockMaze-v0"
def make_env(env_name):
return lambda: gym_minigrid.wrappers.PadImgObsWrapper(gym.make(env_name))
envs = [make_env(env_name) for i in range(N_ENVS)]
envs = SubprocVecEnv(envs)
ob_space = envs.observation_space.shape
nw, nh, nc = ob_space
ac_space = envs.action_space
obs = envs.reset()
with tf.Session() as sess:
actor_critic = get_actor_critic(sess, N_ENVS, N_STEPS, ob_space,
ac_space, policy, summarize)
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (N_ENVS*N_STEPS, nw, nh, nc)
dones = [False for _ in range(N_ENVS)]
nbatch = N_ENVS * N_STEPS
episode_rewards = np.zeros((N_ENVS, ))
final_rewards = np.zeros((N_ENVS, ))
for update in tqdm(range(load_count + 1, TOTAL_TIMESTEPS + 1)):
# mb stands for mini batch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
for n in range(N_STEPS):
actions, values, _ = actor_critic.act(obs)
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, _ = envs.step(actions)
#print(obs[0:3, :,:,0])
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_dones.append(dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).swapaxes(1, 0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(obs).tolist()
#discount/bootstrap off value fn
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], GAMMA)[:-1]
else:
rewards = discount_with_dones(rewards, d, GAMMA)
mb_rewards[n] = rewards
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
if summarize:
loss, policy_loss, value_loss, policy_entropy, _, summary = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update,
summary_op)
writer.add_summary(summary, update)
else:
loss, policy_loss, value_loss, policy_entropy, _ = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update)
if update % LOG_INTERVAL == 0 or update == 1:
print('%i): %.4f, %.4f, %.4f' % (update, policy_loss, value_loss, policy_entropy))
print(final_rewards.mean())
if update % SAVE_INTERVAL == 0:
print('Saving model')
actor_critic.save(SAVE_PATH, save_name + '_' + str(update) + '.ckpt')
actor_critic.save(SAVE_PATH, save_name + '_done.ckpt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('algo', help='Algorithm to train either i2a or a2c')
args = parser.parse_args()
if args.algo == 'a2c':
policy = CnnPolicy
elif args.algo == 'i2a':
policy = I2aPolicy
else:
raise ValueError('Must specify the algo name as either a2c or i2a')
train(policy, args.algo, summarize=True, log_path=args.algo + '_logs')
|
py
|
1a585a0cbc71b9793d9c1e246beedab12eb0bdd4
|
#!/usr/bin/env python
"""
_DeleteFileset_
MySQL implementation of DeleteFileset
"""
__all__ = []
from WMCore.Database.DBFormatter import DBFormatter
class Delete(DBFormatter):
sql = "delete from wmbs_fileset where name = :fileset"
def getBinds(self, name = None):
return self.dbi.buildbinds(self.dbi.makelist(name), 'fileset')
def execute(self, name = None, conn = None, transaction = False):
self.dbi.processData(self.sql, self.getBinds(name),
conn = conn, transaction = transaction)
return True #or raise
|
py
|
1a585a316ed64599b2e0faa741de25f9be3d8352
|
"""
Code to extract some key info from the zresults*fits file that gets produced
after running zspec on calibrated DEIMOS data
"""
import sys
from astropy.io import fits as pf
from astropy.table import Table
maskname = sys.argv[1]
hdu = pf.open(maskname)
tdat = hdu[1].data
nobj = len(tdat)
for i in range(nobj):
i = tdat[i]
print('%-15s %-3s %-7s %1d %7.4f %g %s' %
(i['objname'],i['slitname'],i['maskname'],i['zquality'],i['z'], \
i['z_err'],i['comment']))
|
py
|
1a585a813aa6de9782bf099d67602757ed58486e
|
"""
Test fiberassign target operations.
"""
import os
import subprocess
import re
import shutil
import unittest
from datetime import datetime
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, TargetTree,
LocationsAvailable, load_target_file)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
class TestQA(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Find the location of scripts. First try the case where we are running
# tests from the top level of the source tree.
cls.topDir = os.path.dirname( # top-level
os.path.dirname( # build/
os.path.dirname( # lib.arch/
os.path.dirname( # fiberassign/
os.path.dirname(os.path.abspath(__file__)) # test/
)
)
)
)
cls.binDir = os.path.join(cls.topDir, "bin")
if not os.path.isdir(cls.binDir):
# We are running from some other directory from an installed package
cls.topDir = os.path.dirname( # top-level
os.path.dirname( # lib/
os.path.dirname( # python3.x/
os.path.dirname( # site-packages/
os.path.dirname( # egg/
os.path.dirname( # fiberassign/
os.path.dirname(os.path.abspath(__file__)) # test/
)
)
)
)
)
)
cls.binDir = os.path.join(cls.topDir, "bin")
def setUp(self):
self.density_science = 5000
self.density_standards = 5000
self.density_sky = 10
self.density_suppsky = 5000
pass
def tearDown(self):
pass
def test_science(self):
set_matplotlib_pdf_backend()
import matplotlib.pyplot as plt
test_dir = test_subdir_create("qa_test_science")
log_file = os.path.join(test_dir, "log.txt")
np.random.seed(123456789)
input_mtl = os.path.join(test_dir, "mtl.fits")
# For this test, we will use just 2 science target classes, in order to verify
# we get approximately the correct distribution
sdist = [
(3000, 1, 0.25, "QSO"),
(2000, 1, 0.75, "ELG")
]
nscience = sim_targets(
input_mtl,
TARGET_TYPE_SCIENCE,
0,
density=self.density_science,
science_frac=sdist
)
log_msg = "Simulated {} science targets\n".format(nscience)
tgs = Targets()
load_target_file(tgs, input_mtl)
# Create a hierarchical triangle mesh lookup of the targets positions
tree = TargetTree(tgs, 0.01)
# Read hardware properties
fp, exclude, state = sim_focalplane(rundate=test_assign_date)
hw = load_hardware(focalplane=(fp, exclude, state))
tfile = os.path.join(test_dir, "footprint.fits")
sim_tiles(tfile)
tiles = load_tiles(tiles_file=tfile)
# Compute the targets available to each fiber for each tile.
tgsavail = TargetsAvailable(hw, tgs, tiles, tree)
# Free the tree
del tree
# Compute the fibers on all tiles available for each target
favail = LocationsAvailable(tgsavail)
# Pass empty map of STUCK positioners that land on good sky
stucksky = {}
# Create assignment object
asgn = Assignment(tgs, tgsavail, favail, stucksky)
# First-pass assignment of science targets
asgn.assign_unused(TARGET_TYPE_SCIENCE)
# Redistribute
asgn.redistribute_science()
write_assignment_fits(tiles, asgn, out_dir=test_dir, all_targets=True)
tile_ids = list(tiles.id)
merge_results(
[input_mtl], list(), tile_ids, result_dir=test_dir, copy_fba=False
)
# FIXME: In order to use the qa_targets function, we need to know the
# starting requested number of observations (NUMOBS_INIT). Then we can use
# that value for each target and compare to the number actually assigned.
# However, the NUMOBS_INIT column was removed from the merged TARGET table.
# If we are ever able to reach consensus on restoring that column, then we
# can re-enable these tests below.
#
# qa_targets(
# hw,
# tiles,
# result_dir=test_dir,
# result_prefix="fiberassign-"
# )
#
# # Load the target catalog so that we have access to the target properties
#
# fd = fitsio.FITS(input_mtl, "r")
# scidata = np.array(np.sort(fd[1].read(), order="TARGETID"))
# fd.close()
# del fd
#
# # How many possible positioner assignments did we have?
# nassign = 5000 * len(tile_ids)
#
# possible = dict()
# achieved = dict()
#
# namepat = re.compile(r".*/qa_target_count_(.*)_init-(.*)\.fits")
# for qafile in glob.glob("{}/qa_target_count_*.fits".format(test_dir)):
# namemat = namepat.match(qafile)
# name = namemat.group(1)
# obs = int(namemat.group(2))
# if obs == 0:
# continue
# fd = fitsio.FITS(qafile, "r")
# fdata = fd["COUNTS"].read()
# # Sort by target ID so we can select easily
# fdata = np.sort(fdata, order="TARGETID")
# tgid = np.array(fdata["TARGETID"])
# counts = np.array(fdata["NUMOBS_DONE"])
# avail = np.array(fdata["NUMOBS_AVAIL"])
# del fdata
# fd.close()
#
# # Select target properties. BOTH TARGET LISTS MUST BE SORTED.
# rows = np.where(np.isin(scidata["TARGETID"], tgid, assume_unique=True))[0]
#
# ra = np.array(scidata["RA"][rows])
# dec = np.array(scidata["DEC"][rows])
# dtarget = np.array(scidata["DESI_TARGET"][rows])
# init = np.array(scidata["NUMOBS_INIT"][rows])
#
# requested = obs * np.ones_like(avail)
#
# under = np.where(avail < requested)[0]
# over = np.where(avail > requested)[0]
#
# limavail = np.array(avail)
# limavail[over] = obs
#
# deficit = np.zeros(len(limavail), dtype=np.int)
#
# deficit[:] = limavail - counts
# deficit[avail == 0] = 0
#
# possible[name] = np.sum(limavail)
# achieved[name] = np.sum(counts)
#
# log_msg += "{}-{}:\n".format(name, obs)
#
# pindx = np.where(deficit > 0)[0]
# poor_tgid = tgid[pindx]
# poor_dtarget = dtarget[pindx]
# log_msg += " Deficit > 0: {}\n".format(len(poor_tgid))
# poor_ra = ra[pindx]
# poor_dec = dec[pindx]
# poor_deficit = deficit[pindx]
#
# # Plot Target availability
# # Commented out by default, since in the case of high target density
# # needed for maximizing assignments, there are far more targets than
# # the number of available fiber placements.
#
# # marksize = 4 * np.ones_like(deficit)
# #
# # fig = plt.figure(figsize=(12, 12))
# # ax = fig.add_subplot(1, 1, 1)
# # ax.scatter(ra, dec, s=2, c="black", marker="o")
# # for pt, pr, pd, pdef in zip(poor_tgid, poor_ra, poor_dec, poor_deficit):
# # ploc = plt.Circle(
# # (pr, pd), radius=(0.05*pdef), fc="none", ec="red"
# # )
# # ax.add_artist(ploc)
# # ax.set_xlabel("RA", fontsize="large")
# # ax.set_ylabel("DEC", fontsize="large")
# # ax.set_title(
# # "Target \"{}\": (min(avail, requested) - counts) > 0".format(
# # name, obs
# # )
# # )
# # #ax.legend(handles=lg, framealpha=1.0, loc="upper right")
# # plt.savefig(os.path.join(test_dir, "{}-{}_deficit.pdf".format(name, obs)), dpi=300, format="pdf")
#
# log_msg += \
# "Assigned {} tiles for total of {} possible target observations\n".format(
# len(tile_ids), nassign
# )
# ach = 0
# for nm in possible.keys():
# ach += achieved[nm]
# log_msg += \
# " type {} had {} possible target obs and achieved {}\n".format(
# nm, possible[nm], achieved[nm]
# )
# frac = 100.0 * ach / nassign
# log_msg += \
# " {} / {} = {:0.2f}% of fibers were assigned\n".format(
# ach, nassign, frac
# )
# for nm in possible.keys():
# log_msg += \
# " type {} had {:0.2f}% of achieved observations\n".format(
# nm, achieved[nm] / ach
# )
# with open(log_file, "w") as f:
# f.write(log_msg)
#
# self.assertGreaterEqual(frac, 99.0)
# Test if qa-fiberassign script runs without crashing
script = os.path.join(self.binDir, "qa-fiberassign")
if os.path.exists(script):
fafiles = glob.glob(f"{test_dir}/fiberassign-*.fits")
cmd = "{} --targets {}".format(script, " ".join(fafiles))
err = subprocess.call(cmd.split())
self.assertEqual(err, 0, f"FAILED ({err}): {cmd}")
else:
print(f"ERROR: didn't find {script}")
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
py
|
1a585b849651b6d353536689e246aa2062a7c8a9
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2021 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Fine-tuning pre-trained models for token classification tasks.
Heavily adapted from: https://github.com/huggingface/transformers/blob/
v3.0.1/examples/token-classification/run_ner.py"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import (
accuracy_score as seq_accuracy_score,
f1_score as seq_f1_score,
precision_score as seq_precision_score,
recall_score as seq_recall_score,
classification_report as seq_classification_report
)
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score
)
from torch import nn
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from utils import TokenClassificationDataSet, Split, get_labels
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are
going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from "
"huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if "
"not the same as model_name"}
)
# If you want to tweak more attributes on your tokenizer, you should do it
# in a distinct script, or just modify its tokenizer_config.json.
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if "
"not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to "
"use fast "
"tokenization."})
task_type: Optional[str] = field(
default="ner", metadata={"help": "the name of the task (ner or pos)"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the "
"pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for
training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .txt files "
"for a CoNLL-2003-formatted task."}
)
labels: Optional[str] = field(
default=None,
metadata={"help": "Path to a file containing all labels."},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after "
"tokenization. Sequences longer than this will be truncated, "
"sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and "
"evaluation sets"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments,
DataTrainingArguments,
TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a
# json file, let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(
sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists "
"and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=(logging.INFO if training_args.local_rank in [-1, 0]
else logging.WARN),
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, "
"16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Prepare task
labels = get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can
# concurrently download model & vocab.
config = AutoConfig.from_pretrained(
(model_args.config_name if model_args.config_name
else model_args.model_name_or_path),
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
(model_args.tokenizer_name if model_args.tokenizer_name
else model_args.model_name_or_path),
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def align_predictions(predictions: np.ndarray,
label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(p: EvalPrediction) -> Dict:
preds_list, out_label_list = align_predictions(p.predictions,
p.label_ids)
# If task type is NER, use seqeval metrics.
# Otherwise, use scikit learn
if model_args.task_type == "ner":
return {
"accuracy": seq_accuracy_score(out_label_list, preds_list),
"precision": seq_precision_score(out_label_list, preds_list),
"recall": seq_recall_score(out_label_list, preds_list),
"f1": seq_f1_score(out_label_list, preds_list),
"matrix": seq_classification_report(out_label_list, preds_list)
}
else:
# Flatten the preds_list and out_label_list
preds_list = [p for sublist in preds_list for p in sublist]
out_label_list = [p for sublist in out_label_list for p in sublist]
return {
"accuracy": accuracy_score(out_label_list, preds_list),
"precision_micro": precision_score(out_label_list, preds_list,
average="micro"),
"recall_micro": recall_score(out_label_list, preds_list,
average="micro"),
"f1_micro": f1_score(out_label_list, preds_list,
average="micro"),
"precision_macro": precision_score(out_label_list, preds_list,
average="macro"),
"recall_macro": recall_score(out_label_list, preds_list,
average="macro"),
"f1_macro": f1_score(out_label_list, preds_list,
average="macro"),
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train(
model_path=(model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None)
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir,
"eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
test_dataset = TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.test,
)
predictions, label_ids, metrics = trainer.predict(test_dataset)
preds_list, _ = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir,
"test_results.txt")
if trainer.is_world_process_zero():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir,
"test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if (line.startswith("-DOCSTART-") or line == ""
or line == "\n"):
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = (line.split()[0] + " " +
preds_list[example_id].pop(0) + "\n")
writer.write(output_line)
else:
logger.warning(
"Maximum sequence length exceeded: "
"No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
|
py
|
1a585bf49ed0e057a61045b9ea585aad5007333b
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Glproto(AutotoolsPackage):
"""OpenGL Extension to the X Window System.
This extension defines a protocol for the client to send 3D rendering
commands to the X server."""
homepage = "https://www.x.org/wiki/"
url = "https://www.x.org/archive/individual/proto/glproto-1.4.17.tar.gz"
version('1.4.17', 'd69554c1b51a83f2c6976a640819911b')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
py
|
1a585d10179310c271459c596408936499b99576
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron.tests import base
from neutron_vpnaas.services.vpn.common import netns_wrapper as nswrap
class TestNetnsWrapper(base.BaseTestCase):
def setUp(self):
super(TestNetnsWrapper, self).setUp()
patch_methods = ['filter_command',
'execute',
'setup_conf']
for method in patch_methods:
self.patch_obj(nswrap, method)
patch_classes = ['neutron.common.config.setup_logging',
'os.path.isdir',
'os.path.samefile',
'sys.exit']
for cls in patch_classes:
self.patch_cls(cls)
self.filter_command.return_value = False
self.execute.return_value = 0
self.conf = mock.Mock()
self.conf.cmd = 'ls,-al'
self.conf.mount_paths = {'/foo': '/dir/foo',
'/var': '/dir/var'}
self.setup_conf.return_value = self.conf
self.conf.rootwrap_config = 'conf'
self.isdir.return_value = True
self.samefile.return_value = False
def patch_obj(self, obj, method):
_m = mock.patch.object(obj, method)
_mock = _m.start()
setattr(self, method, _mock)
def patch_cls(self, patch_class):
_m = mock.patch(patch_class)
mock_name = patch_class.split('.')[-1]
_mock = _m.start()
setattr(self, mock_name, _mock)
def test_netns_wrap_fail_without_netns(self):
self.samefile.return_value = True
return_val = nswrap.execute_with_mount()
self.assertTrue(return_val)
def test_netns_wrap(self):
self.conf.cmd = 'ls,-al'
return_val = nswrap.execute_with_mount()
exp_calls = [mock.call(['mount', '--bind', '/dir/foo', '/foo']),
mock.call(['mount', '--bind', '/dir/var', '/var']),
mock.call('ls,-al')]
self.execute.assert_has_calls(exp_calls, any_order=True)
self.assertFalse(return_val)
def test_netns_wrap_fail_without_cmd(self):
self.conf.cmd = None
return_val = nswrap.execute_with_mount()
self.assertFalse(self.execute.called)
self.assertTrue(return_val)
def test_netns_wrap_fail_without_mount_paths(self):
self.conf.mount_paths = None
return_val = nswrap.execute_with_mount()
self.assertFalse(self.execute.called)
self.assertTrue(return_val)
|
py
|
1a585e927d9b23d55ccfb3d92f3eaecfabdbc3f1
|
import argparse
import os
import random
import sys
import time
import struct
from collections import Counter
from collections import deque
from operator import itemgetter
from tempfile import NamedTemporaryFile as NTF
import SharedArray as sa
import numpy as np
from numba import jit
from text_embedding.documents import *
FLOAT = np.float32
INT = np.uint32
CHUNK = 1000000
STORE = 10*CHUNK
FMT = 'iif'
NBYTES = 12
def vocab_count(corpusfile, vocabfile=None, min_count=1, verbose=True, comm=None):
'''counts word occurrences to determine vocabulary
Args:
corpusfile: corpus .txt file
vocabfile: output .txt file
min_count: minimum word count
verbose: display progress
comm: MPI Communicator
Returns:
[(word, count)] list if vocabfile is None ; else None
'''
rank, size = ranksize(comm)
if verbose:
write('Counting Words with Minimum Count '+str(min_count)+'\n', comm)
t = time.time()
with open(corpusfile, 'r') as f:
documents = (line for i, line in enumerate(f) if i%size == rank)
counts = Counter(w for doc in documents for w in doc.split())
if size > 1:
counts = comm.reduce(counts, root=0)
if not rank:
vocab = sorted((item for item in counts.items() if item[1] >= min_count), key=itemgetter(1), reverse=True)
if verbose:
write('Counted '+str(len(vocab))+' Words, Time='+str(round(time.time()-t))+' sec\n')
if vocabfile is None:
checkpoint(comm)
return vocab
with open(vocabfile, 'w') as f:
for word, count in vocab:
f.write(word+' '+str(count)+'\n')
checkpoint(comm)
@jit
def doc2cooc(indices, weights, window_size, V):
row, col, val = [], [], []
start = 0
for i, index in enumerate(indices):
if index != V:
for w, other in zip(weights[start-i:], indices[start:i]):
if other != V:
if index < other:
row.append(index)
col.append(other)
else:
row.append(other)
col.append(index)
val.append(w)
start += i >= window_size
return row, col, val
@jit
def doc2cooc_unweighted(indices, window_size, V):
row, col = [], []
start = 0
for i, index in enumerate(indices):
if index != V:
for other in indices[start:i]:
if other != V:
if index < other:
row.append(index)
col.append(other)
else:
row.append(other)
col.append(index)
start += i >= window_size
return row, col
def counts2bin(counts, f):
for (i, j), v in counts.items():
f.write(struct.pack(FMT, i, j, v))
def bin2counts(f, counts, subset):
position = f.tell()
ncooc = int((f.seek(0, 2)-position)/NBYTES)
f.seek(position)
for cooc in range(ncooc):
i, j, v = struct.unpack(FMT, f.read(NBYTES))
if i in subset:
counts[(i, j)] += v
# NOTE: Result is highly non-random and contains only upper triangular entries
def cooc_count(corpusfile, vocabfile, coocfile, window_size=10, unweighted=False, verbose=True, comm=None):
'''counts word cooccurrence in a corpus
Args:
corpusfile: corpus .txt file
vocabfile: vocab .txt file
coocfile: cooccurrence .bin file
window_size: length of cooccurrence window
unweighted: do not weight cooccurrence by distance
verbose: display progress
comm: MPI Communicator
Returns:
None
'''
rank, size = ranksize(comm)
with open(vocabfile, 'r') as f:
word2index = {line.split()[0]: INT(i) for i, line in enumerate(f)}
if unweighted:
one = FLOAT(1)
else:
weights = np.fromiter((1.0/d for d in reversed(range(1, window_size+1))), FLOAT, window_size)
V = INT(len(word2index))
counts = Counter()
if verbose:
write('\rCounting Cooccurrences with Window Size '+str(window_size)+'\n', comm)
lines = 0
t = time.time()
if size > 1:
random.seed(0)
idx = list(range(V))
random.shuffle(idx)
start, stop = int(rank/size*V), int((rank+1)/size*V)
subset = set(idx[start:stop])
positions = [0]*size
with open(corpusfile, 'r') as f:
n = 0
while True:
v = None
with NTF() as tmp:
dump = Counter()
files = comm.allgather(tmp.name)
for k, line in enumerate(f):
if k%size == rank:
doc = line.split()
if unweighted:
for i, j in zip(*doc2cooc_unweighted(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), window_size, V)):
if i in subset:
counts[(i, j)] += one
else:
dump[(i, j)] += one
else:
for i, j, v in zip(*doc2cooc(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), weights, window_size, V)):
if i in subset:
counts[(i, j)] += v
else:
dump[(i, j)] += v
if not (k+1)%CHUNK:
counts2bin(dump, tmp)
dump = Counter()
if verbose:
write('\rProcessed '+str(n+k+1)+' Lines, Time='+str(round(time.time()-t))+' sec', comm)
if not (k+1)%STORE:
n += k+1
break
counts2bin(dump, tmp)
tmp.flush()
for k in range(2):
for i, name in enumerate(files):
if i != rank:
with open(name, 'rb') as g:
g.seek(positions[i])
bin2counts(g, counts, subset)
positions[i] = g.tell() * (k == 0)
checkpoint(comm)
if verbose:
write('\rProcessed '+str(n)+' Lines, Time='+str(round(time.time()-t))+' sec', comm)
if not comm.allreduce(int(not v is None)):
break
if verbose:
write('\rCounted '+str(comm.allreduce(len(counts.items())))+' Cooccurrences, Time='+str(round(time.time()-t))+' sec\n', comm)
for k in range(size):
if k == rank:
mode = 'ab' if rank else 'wb'
with open(coocfile, mode) as f:
counts2bin(counts, f)
checkpoint(comm)
else:
with open(corpusfile, 'r') as f:
for k, line in enumerate(f):
doc = line.split()
if unweighted:
for i, j in zip(*doc2cooc_unweighted(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), window_size, V)):
counts[(i, j)] += one
else:
for i, j, v in zip(*doc2cooc(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), weights, window_size, V)):
counts[(i, j)] += v
if verbose and not (k+1)%CHUNK:
write('\rProcessed '+str(k+1)+' Lines, Time='+str(round(time.time()-t))+' sec')
if verbose:
write('\rCounted '+str(len(counts.items()))+' Cooccurrences, Time='+str(round(time.time()-t))+' sec\n')
with open(coocfile, 'wb') as f:
counts2bin(counts, f)
def reformat_coocfile(inputfile, outputfile):
'''converts full-matrix cooccurrence file upper-triangular cooccurrence file
Args:
inputfile: full-matrix binary cooccurrence file with index starting at 1 in format "int,int,double" (as created by original GloVe code)
outputfile: ouput binary file
Returns:
None
'''
with open(inputfile, 'rb') as f:
with open(outputfile, 'wb') as g:
while True:
try:
i, j, d = struct.unpack('iid', f.read(16))
except struct.error:
break
if i <= j:
g.write(struct.pack(FMT, INT(i-1), INT(j-1), FLOAT(d)))
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class SharedArrayManager:
_shared = []
def __init__(self, comm=None):
self._comm = comm
self._rank, self._size = ranksize(comm)
def __enter__(self):
return self
def __exit__(self, *args):
for array in self._shared:
try:
sa.delete(array)
except FileNotFoundError:
pass
def create(self, array=None, dtype=None):
comm, rank = self._comm, self._rank
if rank:
shared = sa.attach(comm.bcast(None, root=0))
else:
dtype = array.dtype if dtype is None else dtype
if self._size == 1:
return array.astype(dtype)
filename = str(time.time())
shared = sa.create(filename, array.shape, dtype=dtype)
shared += array.astype(dtype)
self._shared.append(comm.bcast(filename, root=0))
checkpoint(comm)
return shared
def splitcooc(f, ncooc=None):
row = deque()
col = deque()
if ncooc is None:
position = f.tell()
ncooc = int((f.seek(0, 2)-position)/NBYTES)
f.seek(position)
for cooc in range(ncooc):
i, j, xij = struct.unpack(FMT, f.read(NBYTES))
row.append(INT(i))
col.append(INT(j))
yield FLOAT(xij)
for idx in [row, col]:
for cooc in range(ncooc):
yield idx.popleft()
def symcooc(coocfile, comm=None):
rank, size = ranksize(comm)
with open(coocfile, 'rb') as f:
flength = f.seek(0, 2)
offset = int(flength*rank/size / NBYTES)
ncooc = int(flength*(rank+1)/size / NBYTES) - offset
f.seek(NBYTES*offset)
coocs = splitcooc(f, ncooc)
val = np.fromiter(coocs, FLOAT, ncooc)
row = np.fromiter(coocs, INT, ncooc)
col = np.fromiter(coocs, INT, ncooc)
sym = row < col
symcooc = ncooc + sum(sym)
values, rowdata, coldata = [np.empty(symcooc, dtype=dtype) for dtype in [FLOAT, INT, INT]]
values[:ncooc], rowdata[:ncooc], coldata[:ncooc] = val, row, col
values[ncooc:], rowdata[ncooc:], coldata[ncooc:] = val[sym], col[sym], row[sym]
return values, rowdata, coldata
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class GloVe(SharedArrayManager):
def _load_cooc_data(self, coocfile, alpha, xmax):
data, self.row, self.col = symcooc(coocfile, self._comm)
self.logcooc = np.log(data)
data /= FLOAT(xmax)
mask = data<1.0
data[mask] **= FLOAT(alpha)
data[~mask] = FLOAT(1.0)
self.weights = data
self.ncooc = data.shape[0]
self._cooc_data = [self.row, self.col, self.weights, self.logcooc]
def _shuffle_cooc_data(self, seed):
for data in self._cooc_data:
np.random.seed(seed)
np.random.shuffle(data)
@staticmethod
def _shapes(V, d):
return [(V, d)]*2 + [(V,)]*2
def _init_vecs(self, shapes, d, seed, init):
create = self.create
if self._rank:
self._params = [create() for shape in shapes]
elif init is None:
np.random.seed(seed)
self._params = [create((np.random.rand(*shape)-0.5)/d, dtype=FLOAT) for shape in shapes]
else:
self._params = [create(param, dtype=FLOAT) for param in init]
def __init__(self, coocfile, V=None, d=None, seed=None, init=None, alpha=0.75, xmax=100.0, comm=None):
'''
Args:
coocfile: binary cooccurrence file (assumed to have only upper triangular entries)
V: vocab size
d: vector dimension
seed: random seed for initializing vectors
init: tuple of numpy arrays to initialize parameters
alpha: GloVe weighting parameter
xmax: GloVe max cooccurrence parameter
comm: MPI Communicator
'''
super().__init__(comm=comm)
self._load_cooc_data(coocfile, alpha, xmax)
assert not (init is None and (V is None or d is None)), "'V' and 'd' must be defined if 'init' not given"
self._init_vecs(self._shapes(V, d), d, seed, init)
def embeddings(self):
'''returns GloVe embeddings using current parameters
Returns:
numpy array of size V x d
'''
return sum(self._params[:2]) / FLOAT(2.0)
def dump(self, fid):
'''dumps GloVe embeddings to binary file
Args:
fid: open file object or filename string
Returns:
None
'''
if not self._rank:
self.embeddings().tofile(fid)
_pnames = ['wv', 'cv', 'wb', 'cb']
_numpar = 4
def save(self, fid):
'''saves parameters to HDF5 file
Args:
fid: filename string
Returns:
None
'''
import h5py
if not self._rank:
f = h5py.File(fid)
for name, param in zip(self._pnames, self._params[:self._numpar]):
f.create_dataset(name, data=param)
f.close()
@staticmethod
@jit
def predict(i, j, wv, cv, wb, cb):
return np.dot(wv[i].T, cv[j])+wb[i]+cb[j]
def loss(self):
row, col = self.row, self.col
ncooc = self.ncooc
checkpoint(self._comm)
params = self._params[:self._numpar]
predict = self.predict
errors = np.fromiter((predict(i, j, *params) for i, j in zip(row, col)), FLOAT, ncooc) - self.logcooc
loss = np.inner(self.weights*errors, errors)
if self._size > 1:
ncooc = self._comm.allreduce(ncooc)
return self._comm.allreduce(loss/ncooc)
return loss/ncooc
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, ncooc, eta):
etax2 = FLOAT(2.0*eta)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj = wv[i], cv[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = werror*etax2
upd = coef*cvj
cvj -= coef*wvi
wvi -= upd
wb[i] -= coef
cb[j] -= coef
loss += werror*error
return loss / ncooc
def sgd(self, epochs=25, eta=0.01, seed=None, verbose=True, cumulative=True):
'''runs SGD on GloVe objective
Args:
epochs: number of epochs
eta: learning rate
seed: random seed for cooccurrence shuffling
verbose: write loss and time information
cumulative: compute cumulative loss instead of true loss; ignored if not verbose
Returns:
None
'''
comm, rank, size = self._comm, self._rank, self._size
random.seed(seed)
if verbose:
write('\rRunning '+str(epochs)+' Epochs of SGD with Learning Rate '+str(eta)+'\n', comm)
if not cumulative:
write('\rInitial Loss='+str(self.loss())+'\n', comm)
ncooc = comm.allreduce(self.ncooc)
t = time.time()
for ep in range(epochs):
if verbose:
write('Epoch '+str(ep+1), comm)
self._shuffle_cooc_data(random.randint(0, 2**32-1))
loss = self.sgd_epoch(*self._cooc_data, *self._params, ncooc, eta)
if verbose:
loss = comm.allreduce(loss) if cumulative else self.loss()
checkpoint(comm)
if verbose:
write(': Loss='+str(loss)+', Time='+str(round(time.time()-t))+' sec\n', comm)
t = time.time()
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, ssg_wv, ssg_cv, ssg_wb, ssg_cb, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj = wv[i], cv[j]
ssg_wvi, ssg_cvj = ssg_wv[i], ssg_cv[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = two*werror
updi = coef*cvj
updj = coef*wvi
reg_wvi = np.sqrt(ssg_wvi)
reg_cvj = np.sqrt(ssg_cvj)
ssg_wvi += updi ** 2
ssg_cvj += updj ** 2
wvi -= eta * updi / reg_wvi
cvj -= eta * updj / reg_cvj
reg_wbi = np.sqrt(ssg_wb[i])
reg_cbj = np.sqrt(ssg_cb[j])
coefsq = coef ** 2
ssg_wb[i] += coefsq
ssg_cb[j] += coefsq
coef *= eta
wb[i] -= coef / reg_wbi
cb[j] -= coef / reg_cbj
loss += werror*error
return loss / ncooc
def adagrad(self, epochs=25, eta=0.05, seed=None, verbose=True, cumulative=True):
'''runs AdaGrad on GloVe objective
Args:
epochs: number of epochs
eta: learning rate
seed: random seed for cooccurrence shuffling
verbose: write loss and time information
cumulative: compute cumulative loss instead of true loss; ignored if not verbose
Returns:
None
'''
comm, rank, size = self._comm, self._rank, self._size
random.seed(seed)
if not hasattr(self, '_ssg'):
self._ssg = [self.create(np.ones(param.shape, dtype=FLOAT)) for param in self._params[:self._numpar]]
if verbose:
write('\rRunning '+str(epochs)+' Epochs of AdaGrad with Learning Rate '+str(eta)+'\n', comm)
if not cumulative:
write('\rInitial Loss='+str(self.loss())+'\n', comm)
ncooc = comm.allreduce(self.ncooc)
t = time.time()
for ep in range(epochs):
if verbose:
write('Epoch '+str(ep+1), comm)
self._shuffle_cooc_data(random.randint(0, 2**32-1))
loss = self.adagrad_epoch(*self._cooc_data, *self._params, *self._ssg, ncooc, eta)
if verbose:
loss = comm.allreduce(loss) if cumulative else self.loss()
checkpoint(comm)
if verbose:
write(': Loss='+str(loss)+', Time='+str(round(time.time()-t))+' sec\n', comm)
t = time.time()
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class SN(GloVe):
@staticmethod
def _shapes(V, d):
return [(V, d), (1,)]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def embeddings(self):
return self._params[0]
_pnames = ['wv', 'b']
_numpar = 2
@staticmethod
@jit
def predict(i, j, wv, b):
sumij = wv[i] + wv[j]
return np.dot(sumij.T, sumij) + b[0]
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, b, ncooc, eta):
etax2 = FLOAT(2.0*eta)
two = FLOAT(2.0)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj = wv[i], wv[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = werror*etax2
b -= coef
upd = (two*coef)*sumij
wvi -= upd
wvj -= upd
loss += werror * error
return loss / ncooc
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, b, ssg_wv, ssg_b, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj = wv[i], wv[j]
ssg_wvi, ssg_wvj = ssg_wv[i], ssg_wv[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = two*werror
reg_b = np.sqrt(ssg_b)
ssg_b += coef ** 2
b -= eta*coef
upd = (two*coef)*sumij
updsq = upd ** 2
reg_wvi = np.sqrt(ssg_wvi)
ssg_wvi += updsq
reg_wvj = np.sqrt(ssg_wvj)
ssg_wvj += updsq
upd *= eta
wvi -= upd / reg_wvi
wvj -= upd / reg_wvj
loss += werror * error
return loss / ncooc
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class RegularizedGloVe(GloVe):
def _word_cooc_counts(self, V):
counts = Counter(self.row)+Counter(self.col)
array = np.fromiter((counts[i] for i in range(V)), INT, V)
if self._size > 1:
output = None if self._rank else np.empty(V, dtype=INT)
self._comm.Reduce(array, output, root=0)
return output
return array
def __init__(self, src, *args, reg=1.0, **kwargs):
super().__init__(*args, **kwargs)
create = self.create
params = self._params
params.append(create(src, dtype=FLOAT))
params.append(FLOAT(reg))
params.append(create(self._word_cooc_counts(src.shape[0]), dtype=FLOAT))
oloss = self.loss
if self._rank:
self.loss = lambda: oloss() + self._comm.bcast(None, root=0)
else:
rloss = lambda: reg/src.shape[0]*norm(self.embeddings()-src)**2
if self._size > 1:
self.loss = lambda: oloss() + self._comm.bcast(rloss(), root=0)
else:
self.loss = lambda: oloss() + rloss()
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, src, reg, wcc, ncooc, eta):
etax2 = FLOAT(2.0*eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(eta * ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj, wcci, wccj = wv[i], cv[j], wcc[i], wcc[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = werror*etax2
diffi = (wvi+cv[i])/two - src[i]
diffj = (wv[j]+cvj)/two - src[j]
upd = coef*cvj + (regcoef/wcci)*diffi
cvj -= coef*wvi + (regcoef/wccj)*diffj
wvi -= upd
wb[i] -= coef
cb[j] -= coef
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, src, reg, wcc, ssg_wv, ssg_cv, ssg_wb, ssg_cb, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj, wcci, wccj = wv[i], cv[j], wcc[i], wcc[j]
ssg_wvi, ssg_cvj = ssg_wv[i], ssg_cv[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = two*werror
diffi = (wvi+cv[i])/two - src[i]
diffj = (wv[j]+cvj)/two - src[j]
updi = coef*cvj + (regcoef/wcci)*diffi
updj = coef*wvi + (regcoef/wccj)*diffj
reg_wvi = np.sqrt(ssg_wvi)
reg_cvj = np.sqrt(ssg_cvj)
ssg_wvi += updi ** 2
ssg_cvj += updj ** 2
wvi -= eta * updi / reg_wvi
cvj -= eta * updj / reg_cvj
reg_wbi = np.sqrt(ssg_wb[i])
reg_cbj = np.sqrt(ssg_cb[j])
coefsq = coef ** 2
ssg_wb[i] += coefsq
ssg_cb[j] += coefsq
coef *= eta
wb[i] -= coef / reg_wbi
cb[j] -= coef / reg_cbj
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class RegularizedSN(SN, RegularizedGloVe):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, b, src, reg, wcc, ncooc, eta):
etax2 = FLOAT(2.0*eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(etax2 * ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj, wcci, wccj = wv[i], wv[j], wcc[i], wcc[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = werror*etax2
b -= coef
diffi = wvi - src[i]
diffj = wvj - src[j]
upd = (two*coef)*sumij
wvi -= upd + (regcoef/wcci)*diffi
wvj -= upd + (regcoef/wccj)*diffj
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, b, src, reg, wcc, ssg_wv, ssg_b, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj, wcci, wccj = wv[i], wv[j], wcc[i], wcc[j]
ssg_wvi, ssg_wvj = ssg_wv[i], ssg_wv[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = two*werror
reg_b = np.sqrt(ssg_b)
ssg_b += coef ** 2
b -= eta*coef
diffi = wvi - src[i]
diffj = wvj - src[j]
upd = (two*coef)*sumij
updi = upd + (regcoef/wcci)*diffi
updj = upd + (regcoef/wccj)*diffj
regi = np.sqrt(ssg_wvi)
regj = np.sqrt(ssg_wvj)
ssg_wvi += updi ** 2
ssg_wvj += updj ** 2
wvi -= eta * updi
wvj -= eta * updj
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
def align_params(params, srcvocab, tgtvocab, mean_fill=True):
output = []
for param in params:
if len(param.shape) == 1:
if param.shape[0] == 1:
output.append(param)
continue
shape = (len(tgtvocab),)
default = np.mean(param)
else:
shape = (len(tgtvocab), param.shape[1])
default = np.mean(param, axis=0)
array = np.empty(shape, dtype=FLOAT)
if not mean_fill:
default *= FLOAT(0.0)
w2e = dict(zip(srcvocab, param))
for i, w in enumerate(tgtvocab):
array[i] = w2e.get(w, default)
output.append(array)
return output
def induce_embeddings(srcvocab, srccooc, srcvecs, tgtvocab, tgtcooc, comm=None):
from scipy import sparse as sp
from sklearn.linear_model import LinearRegression as LR
rank, size = ranksize(comm)
Vsrc, d = srcvecs.shape
Vtgt = len(tgtvocab)
with SharedArrayManager(comm=comm) as sam:
write('Loading Source Cooccurrences\n', comm)
data, row, col = symcooc(srccooc, comm)
srcvecs = sam.create(srcvecs, dtype=FLOAT)
X = sp.csr_matrix((data, (row, col)), shape=(Vsrc, Vsrc), dtype=FLOAT)
write('Computing Source Counts\n', comm)
if size > 1:
C = None if rank else np.empty(Vsrc, dtype=FLOAT)
comm.Reduce(np.array(X.sum(1))[:,0], C, root=0)
C = sam.create(C)
else:
C = np.array(X.sum(1))[:,0]
write('Building Source Context Vectors\n', comm)
if size > 1:
U = None if rank else np.empty((Vsrc, d), dtype=FLOAT)
comm.Reduce(X.dot(srcvecs), U, root=0)
U = sam.create(U)
else:
U = X.dot(srcvecs)
U = U[C>0]
C = C[C>0]
start, stop = int(rank/size*Vsrc), int((rank+1)/size*Vsrc)
U[start:stop] /= C[start:stop, None]
checkpoint(comm)
write('Learning Induction Matrix\n', comm)
M = sam.create(np.zeros((d, d), dtype=FLOAT))
start, stop = int(rank/size*d), int((rank+1)/size*d)
M[:,start:stop] = LR(fit_intercept=False).fit(X[:,start:stop], srcvecs).coef_
checkpoint(comm)
write('Loading Target Cooccurrences\n', comm)
data, row, col = symcooc(tgtcooc, comm)
tgt2idx = {w: i for i, w in enumerate(tgtvocab)}
tgt2src = {tgt2idx.get(w): i for i, w in enumerate(srcvocab)}
zero = FLOAT(0.0)
for i, j in enumerate(col):
try:
col[i] = tgt2src[j]
except KeyError:
data[i] = zero
X = sp.csr_matrix((data, (row, col)), shape=(Vtgt, Vsrc), dtype=FLOAT)
X.eliminate_zeros()
write('Computing Target Counts\n', comm)
if size > 1:
C = None if rank else np.empty(Vtgt, dtype=FLOAT)
comm.Reduce(np.array(X.sum(1))[:,0], C, root=0)
C = sam.create(C)
else:
C = np.array(X.sum(1))[:,0]
write('Building Target Context Vectors\n', comm)
rank, size = ranksize(comm)
if size > 1:
U = None if rank else np.empty((Vtgt, d), dtype=FLOAT)
comm.Reduce(X.dot(srcvecs), U, root=0)
U = sam.create(U)
else:
U = X.dot(srcvecs)
nz = sum(C>0)
start, stop = int(rank/size*nz), int((rank+1)/size*nz)
U[C>0][start:stop] /= C[C>0][start:stop, None]
write('Computing Induced Embeddings\n', comm)
tgtvecs = sam.create(np.zeros((Vtgt, d), dtype=FLOAT))
tgtvecs[start:stop] = U[start:stop].dot(M.T)
checkpoint(comm)
if not rank:
return tgtvecs
def main(args, comm=None):
if args.mode == 'vocab' or args.mode[:4] in 'thru':
vocab_count(args.input, args.vocab, args.min_count, args.verbose, comm)
if args.mode == 'cooc' or args.mode[:4] in 'thru':
cooc_count(args.input, args.vocab, args.cooc, args.window_size, args.unweighted, args.verbose, comm)
Embedding = GloVe if args.mode[-5:] == 'glove' else SN if args.mode[-2:] == 'sn' else None
if Embedding is None:
if not args.mode in {'vocab', 'cooc', 'thru-cooc'}:
raise(NotImplementedError)
return
with open(args.vocab, 'r') as f:
V = len(f.readlines())
with Embedding(args.cooc, V, args.dimension, alpha=args.alpha, xmax=args.xmax, comm=comm) as E:
if args.sgd:
E.sgd(args.niter, args.eta, verbose=args.verbose)
else:
E.adagrad(args.niter, args.eta, verbose=args.verbose)
E.dump(args.output)
def parse():
parser = argparse.ArgumentParser(prog='python text_embeddings/solvers.py')
parser.add_argument('mode', help="'vocab', 'cooc', 'glove', 'sn', 'thru-cooc', 'thru-glove', or 'thru-sn'")
parser.add_argument('vocab', help='vocabulary .txt file')
parser.add_argument('-i', '--input', help='corpus .txt file')
parser.add_argument('-c', '--cooc', help='cooccurrence .bin file')
parser.add_argument('-o', '--output', help='embedding .bin file')
parser.add_argument('-m', '--min_count', default=1, help='minimum word count in corpus', type=int)
parser.add_argument('-w', '--window_size', default=10, help='size of cooccurrence window', type=int)
parser.add_argument('-u', '--unweighted', action='store_true', help='no distance weighting')
parser.add_argument('-d', '--dimension', default=300, help='embedding dimension', type=int)
parser.add_argument('-x', '--xmax', default=100.0, help='maximum cooccurrence', type=float)
parser.add_argument('-a', '--alpha', default=0.75, help='weighting exponent', type=float)
parser.add_argument('-s', '--sgd', action='store_true', help='use SGD')
parser.add_argument('-n', '--niter', default=25, help='number of training epochs', type=int)
parser.add_argument('-e', '--eta', default=0.05, help='learning rate', type=float)
parser.add_argument('-v', '--verbose', action='store_true', help='display output')
return parser.parse_args()
if __name__ == '__main__':
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
except ImportError:
comm = None
main(parse(), comm=comm)
|
py
|
1a58603f5a0b9252a83b356150fafd44a32d9225
|
# Copyright (c) 2019-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import multiprocessing
import os
import signal
import sys
from multiprocessing import Event
from pathlib import Path
from typing import Any, Dict, List, NamedTuple
from .filesystem import acquire_lock, remove_if_exists
from .process import register_unique_process
LOG: logging.Logger = logging.getLogger(__name__)
Subscription = NamedTuple(
"Subscription", [("root", str), ("name", str), ("subscription", Dict[str, Any])]
)
class WatchmanSubscriber(object):
def __init__(self, base_path: str) -> None:
self._base_path: str = base_path
self._alive: bool = True
self._ready: multiprocessing.synchronize.Event = Event()
@property
def _name(self) -> str:
"""
A name to identify the subscriber. Used as the directory and file names
for the log, lock, and pid files.
"""
raise NotImplementedError
@property
def _subscriptions(self) -> List[Subscription]:
"""
List of subscriptions
"""
raise NotImplementedError
def _handle_response(self, response: Dict[str, Any]) -> None:
"""
Callback invoked when a message is received from watchman
"""
raise NotImplementedError
@staticmethod
def _compute_pid_path(base_path: str, name: str) -> str:
return str(Path(base_path, f"{name}.pid"))
@property
@functools.lru_cache(1)
def _watchman_client(self) -> "pywatchman.client": # noqa
try:
import pywatchman # noqa
# The client will block indefinitely when timeout is None.
return pywatchman.client(timeout=None)
except ImportError as exception:
LOG.info("Not starting %s due to %s", self._name, str(exception))
sys.exit(1)
def _subscribe_to_watchman(self, subscription: Subscription) -> None:
self._watchman_client.query("watch", subscription.root)
self._watchman_client.query(
"subscribe", subscription.root, subscription.name, subscription.subscription
)
def _run(self) -> None:
try:
os.makedirs(self._base_path)
except OSError:
pass
lock_path: str = os.path.join(self._base_path, "{}.lock".format(self._name))
LOG.debug(f"WatchmanSubscriber: Trying to acquire lock file {lock_path}.")
def cleanup() -> None:
LOG.info("Cleaning up lock and pid files before exiting.")
remove_if_exists(lock_path)
def interrupt_handler(_signal_number=None, _frame=None) -> None:
LOG.info("Interrupt signal received.")
cleanup()
sys.exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
# Die silently if unable to acquire the lock.
with acquire_lock(lock_path, blocking=False), (
register_unique_process(
os.getpid(), self._compute_pid_path(self._base_path, self._name)
)
):
LOG.debug("Acquired lock on %s", lock_path)
file_handler = logging.FileHandler(
os.path.join(self._base_path, "%s.log" % self._name), mode="w"
)
file_handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(message)s")
)
LOG.addHandler(file_handler)
subscriptions = self._subscriptions
for subscription in subscriptions:
self._subscribe_to_watchman(subscription)
if not subscriptions:
LOG.info("No watchman roots to subscribe to.")
connection = self._watchman_client.recvConn
if not connection:
LOG.error("Connection to Watchman for %s not found", self._name)
sys.exit(1)
while self._alive:
# This call is blocking, which prevents this loop from burning CPU.
response = connection.receive()
if response.get("is_fresh_instance", False):
LOG.info(
"Ignoring initial watchman message for %s",
response.get("root", "<no-root-found>"),
)
else:
self._handle_response(response)
self._ready.set() # At least one message has been received.
cleanup()
def daemonize(self) -> None:
"""We double-fork here to detach the daemon process from the parent.
If we were to just fork the child as a daemon, we'd have to worry about the
parent process exiting zombifying the daemon."""
LOG.debug("Daemonizing the %s.", self._name)
if os.fork() == 0:
pid = os.fork()
if pid == 0:
try:
LOG.propagate = False
# Closing the sys.stdout and stderr file descriptors here causes
# the program to crash when attempting to log.
os.close(sys.stdout.fileno())
os.close(sys.stderr.fileno())
self._run()
sys.exit(0)
except Exception as exception:
LOG.info("Not running %s due to %s", self._name, str(exception))
sys.exit(1)
else:
sys.exit(0)
@staticmethod
def stop_subscriber(base_path: str, subscriber_name: str) -> None:
try:
pid_path = Path(
WatchmanSubscriber._compute_pid_path(base_path, subscriber_name)
)
pid = int(pid_path.read_text())
os.kill(pid, signal.SIGINT)
LOG.debug("Stopped the %s with pid %d.", subscriber_name, pid)
except FileNotFoundError:
LOG.debug(f"Could not stop the {subscriber_name} because it was not found.")
except (OSError, ValueError) as exception:
LOG.debug(
f"Could not stop the {subscriber_name} "
f"because of exception `{exception}`."
)
|
py
|
1a58629773310e018336581634613ff134ff2a41
|
def compare(before, after):
def extract(f):
for i in open(f):
if i.startswith(' '):
yield i.strip()
bwords = set(extract(before))
awords = set(extract(after))
print(len(bwords), len(awords))
print('Removed:')
for w in sorted(awords - bwords):
print(' ', w)
print('Added:')
for w in sorted(bwords - awords):
print(' ', w)
if __name__ == '__main__':
import sys
compare(*sys.argv[1:])
|
py
|
1a5863ad446037a3cdc3adbbd1f4adafcd868e85
|
from devito.ir.iet import IterationTree, FindSections, FindSymbols
from devito.symbolics import Keyword, Macro
from devito.tools import as_tuple, filter_ordered, split
from devito.types import Array, Global, LocalObject
__all__ = ['filter_iterations', 'retrieve_iteration_tree', 'derive_parameters',
'diff_parameters']
def retrieve_iteration_tree(node, mode='normal'):
"""
A list with all Iteration sub-trees within an IET.
Examples
--------
Given the Iteration tree:
.. code-block:: c
Iteration i
expr0
Iteration j
Iteration k
expr1
Iteration p
expr2
Return the list: ::
[(Iteration i, Iteration j, Iteration k), (Iteration i, Iteration p)]
Parameters
----------
iet : Node
The searched Iteration/Expression tree.
mode : str, optional
- ``normal``
- ``superset``: Iteration trees that are subset of larger iteration trees
are dropped.
"""
assert mode in ('normal', 'superset')
trees = [IterationTree(i) for i in FindSections().visit(node) if i]
if mode == 'normal':
return trees
else:
found = []
for i in trees:
if any(set(i).issubset(set(j)) for j in trees if i != j):
continue
found.append(i)
return found
def filter_iterations(tree, key=lambda i: i):
"""
Return the first sub-sequence of consecutive Iterations such that
``key(iteration)`` is True.
"""
filtered = []
for i in tree:
if key(i):
filtered.append(i)
elif len(filtered) > 0:
break
return filtered
def derive_parameters(iet, drop_locals=False):
"""
Derive all input parameters (function call arguments) from an IET
by collecting all symbols not defined in the tree itself.
"""
# Extract all candidate parameters
candidates = FindSymbols().visit(iet)
# Symbols, Objects, etc, become input parameters as well
basics = FindSymbols('basics').visit(iet)
candidates.extend(i.function for i in basics)
# Filter off duplicates (e.g., `x_size` is extracted by both calls to FindSymbols)
candidates = filter_ordered(candidates)
# Filter off symbols which are defined somewhere within `iet`
defines = [s.name for s in FindSymbols('defines').visit(iet)]
parameters = [s for s in candidates if s.name not in defines]
# Drop globally-visible objects
parameters = [p for p in parameters if not isinstance(p, (Global, Keyword, Macro))]
# Maybe filter out all other compiler-generated objects
if drop_locals:
parameters = [p for p in parameters if not isinstance(p, (Array, LocalObject))]
return parameters
def diff_parameters(iet, root, indirectly_provided=None):
"""
Derive the parameters of a sub-IET, `iet`, within a Callable, `root`, and
split them into two groups:
* the "read-only" parameters, and
* the "dynamic" parameters, whose value changes at some point in `root`.
The `indirectly_provided` are the parameters that are provided indirectly to
`iet`, for example via a composite type (e.g., a C struct).
"""
required = derive_parameters(iet)
required = [i for i in required if i not in as_tuple(indirectly_provided)]
known = set(root.parameters) | set(i for i in required if i.is_Array)
parameters, dynamic_parameters = split(required, lambda i: i in known)
return required, parameters, dynamic_parameters
|
py
|
1a5863b50384dab7968f5455286c9ea5235f83de
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( [email protected] )
"""
Suds library transport related unit tests.
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
from . import __init__
__init__.runUsingPyTest(globals())
import suds
from suds.transport import Reply, Request
import pytest
import sys
@pytest.mark.parametrize("message", (
"",
"for a bitch it's haaaard...",
"I'm here to kick ass,\nand chew bubble gum...\nand I'm all out of gum.",
"šuć-muć pa ožeži.. za 100 €\n\nwith multiple\nlines...",
"\n\n\n\n\n\n",
"中原千军逐蒋"))
def test_reply_as_string(message):
code = 17
reply = Reply(code, {"aaa":1}, message)
expected = """\
CODE: %s
HEADERS: %s
MESSAGE:
%s""" % (code, reply.headers, message)
assert str(reply) == expected
if sys.version_info < (3, 0):
assert str(reply) == expected.encode("utf-8")
@pytest.mark.parametrize(("code", "headers", "message"), (
(1, {}, "ola"),
(2, {"semper":"fi"}, "中原千军逐蒋\n城楼万众检阅")))
def test_reply_constructor(code, headers, message):
reply = Reply(code, headers, message)
assert reply.code == code
assert reply.headers == headers
assert reply.message == message
@pytest.mark.parametrize("message", (
"",
"for a bitch it's haaaard...",
"I'm here to kick ass,\nand chew bubble gum...\nand I'm all out of gum.",
"šuć-muć pa ožeži.. za 100 €\n\nwith multiple\nlines...",
"\n\n\n\n\n\n",
"中原千军逐蒋"))
def test_request_as_string(message):
request = Request("my url", message)
request.headers["aaa"] = 1
expected = """\
URL: my url
HEADERS: %s
MESSAGE:
%s""" % (request.headers, message)
assert str(request) == expected
if sys.version_info < (3, 0):
assert str(request) == expected.encode("utf-8")
@pytest.mark.parametrize(("url", "message"), (
("for a bitch it's haaaard...", "it's hard out here..."),
("中原千军逐蒋", "城楼万众检阅")))
def test_request_constructor(url, message):
request = Request(url, message)
assert request.url == url
assert request.message == message
assert request.headers == {}
def test_request_without_message():
request = Request("for a bitch it's haaaard...")
assert request.url == "for a bitch it's haaaard..."
assert request.message is None
assert request.headers == {}
|
py
|
1a5863c1b31a7d7a1d77827be77b0e8b5f7363fd
|
import os
_TEST_ROOT = os.path.dirname(__file__) # root of test folder
_PROJECT_ROOT = os.path.dirname(_TEST_ROOT) # root of project
_PATH_DATA = os.path.join(_PROJECT_ROOT, "data") # root of data
|
py
|
1a5863f430acf2210ed94925a99ccce78c66c5a8
|
import scrapy
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class Proj2031Pipeline:
def process_item(self, item, spider):
image_urls = scrapy.Field()
images = scrapy.Field()
return item
|
py
|
1a58640ecba5b4305299d1df5945c54a05060713
|
#!/usr/bin/env python3
# Copyright 2014 Brett Slatkin, Pearson Education Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Preamble to mimick book environment
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Example 9
__all__ = ['Projectile']
class Projectile(object):
def __init__(self, mass, velocity):
self.mass = mass
self.velocity = velocity
|
py
|
1a58642e27e311426f39a079073f1555e48cba6e
|
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import random
from django.db.models import Max, Min, Sum
from django.db.models.query import F
from kolibri.auth.filters import HierarchyRelationsFilter
from kolibri.auth.models import Classroom, Facility, FacilityUser
from kolibri.content.models import ContentNode
from kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, MasteryLog
from le_utils.constants import content_kinds
def get_or_create_facilities(**options):
n_facilities = options['n_facilities']
n_on_device = Facility.objects.all().count()
n_to_create = n_facilities - n_on_device
if n_to_create > 0:
print('Generating {n} facility object(s)'.format(n=n_to_create))
for i in range(0, n_to_create):
Facility.objects.create(name='Test Facility {i}'.format(i=i + 1))
return Facility.objects.all()[0:n_facilities]
def get_or_create_classrooms(**options):
n_classes = options['n_classes']
facility = options['facility']
n_on_device = Classroom.objects.filter(parent=facility).count()
n_to_create = n_classes - n_on_device
if n_to_create > 0:
print('Generating {n} classroom object(s) for facility: {name}'.format(
n=n_to_create,
name=facility.name,
))
for i in range(0, n_to_create):
Classroom.objects.create(
parent=facility,
name='Classroom {i}{a}'.format(i=i + 1, a=random.choice('ABCD'))
)
return Classroom.objects.filter(parent=facility)[0:n_classes]
def get_or_create_classroom_users(**options):
classroom = options['classroom']
n_users = options['n_users']
user_data = options['user_data']
facility = options['facility']
# The headers in the user_data.csv file that we use to generate user Full Names
# Note, we randomly pick from these to give deliberately varied (and sometimes idiosyncratic)
# Full names - because we should never assume that users have names like us
user_data_name_fields = ["GivenName", "MiddleInitial", "Surname"]
n_in_classroom = HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
ancestor_collection=classroom,
target_user=F("id"),
).count()
# Only generate new users if there are fewer users than requested.
n_to_create = n_users - n_in_classroom
if n_to_create > 0:
print('Generating {n} user object(s) for class: {classroom} in facility: {facility}'.format(
n=n_to_create,
classroom=classroom,
facility=facility,
))
for i in range(0, n_to_create):
# Get the first base data that does not have a matching user already
base_data = user_data[n_in_classroom + i]
# Randomly create the name from 1 to 3 of the three user name fields
name = " ".join([base_data[key] for key in random.sample(user_data_name_fields, random.randint(1, 3))])
user = FacilityUser.objects.create(
facility=facility,
full_name=name,
username=base_data['Username']
)
# Set a dummy password so that if we want to login as this learner later, we can.
user.set_password('password')
user.save()
# Add the user to the current classroom
classroom.add_member(user)
return HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
target_user=F("id"),
ancestor_collection=classroom,
)[0:n_users]
def add_channel_activity_for_user(**options): # noqa: max-complexity=16
n_content_items = options['n_content_items']
channel = options['channel']
user = options['user']
now = options['now']
channel_id = channel.id
default_channel_content = ContentNode.objects.exclude(kind=content_kinds.TOPIC).filter(channel_id=channel_id)
print('Generating {i} user interaction(s) for user: {user} for channel: {channel}'.format(
i=n_content_items,
user=user,
channel=channel.name
))
# Generate a content interaction history for this many content items
for i in range(0, n_content_items):
# Use this to randomly select a content node to generate the interaction for
index = random.randint(0, default_channel_content.count() - 1)
random_node = default_channel_content[index]
# We will generate between 1 and 5 content session logs for this content item
session_logs = []
for j in range(0, random.randint(1, 5)):
# How many minutes did they spend in this session? Up to 15
duration = random.random() * 15
# Assume they spent some of this session time not doing anything - the lazy...
idle_time = random.random() * duration
session_logs.append(ContentSessionLog(
user=user,
channel_id=channel_id,
content_id=random_node.content_id,
start_timestamp=now - datetime.timedelta(i + j, 0, duration),
end_timestamp=now - datetime.timedelta(i + j),
# How many seconds did they actually spend doing something?
time_spent=(duration - idle_time) * 60,
progress=random.random(),
kind=random_node.kind,
))
# Assume they have not completed
completion_timestamp = None
cumulative_progress = 0
# Go through all the session logs and add up the progress in each
for session_log in session_logs:
cumulative_progress = min(cumulative_progress + session_log.progress, 1.0)
# If the progress is 1 or more, they have completed! Set the completion timestamp
# For the end of this session, for the sake of argument.
if cumulative_progress >= 1.0:
completion_timestamp = session_log.end_timestamp
session_log.save()
# Now that we have created all the Session Logs, infer the summary log from them
summary_log, created = ContentSummaryLog.objects.get_or_create(
user=user,
kind=random_node.kind,
content_id=random_node.content_id,
# Use defaults here so that we don't try to create a new Summary Log with the same
# kind/content_id/user combo, as this would violate uniqueness constraints
defaults={
'channel_id': channel_id,
# Start timestamp is the earliest start timestamp of the session logs
'start_timestamp': min(session_logs, key=lambda x: x.start_timestamp).start_timestamp,
# End timestamp is the latest of all the end timestamps
'end_timestamp': max(session_logs, key=lambda x: x.end_timestamp).end_timestamp,
'completion_timestamp': completion_timestamp,
'time_spent': sum(session_log.time_spent for session_log in session_logs),
'progress': min(sum(session_log.progress for session_log in session_logs), 1.0),
}
)
if not created:
# We didn't create the summary log this time, so it probably means it has other session logs
# Aggregate the information from there to update the relevant fields on the summary log
updates = ContentSessionLog.objects.filter(
user=user,
kind=random_node.kind,
content_id=random_node.content_id
).aggregate(
start_timestamp=Min('start_timestamp'),
end_timestamp=Max('end_timestamp'),
progress=Sum('progress')
)
summary_log.start_timestamp = updates['start_timestamp']
summary_log.end_timestamp = updates['end_timestamp']
if summary_log.progress < 1.0 and updates['progress'] >= 1.0:
# If it was not previously completed, and is now, set the completion timestamp to the
# final end timestamp of the session logs.
summary_log.completion_timestamp = updates['end_timestamp']
summary_log.progress = min(1.0, updates['progress'])
summary_log.save()
# If we are dealing with anything but an assessment (currently only exercises)
# we are done - if not, create additional data here
if random_node.kind == content_kinds.EXERCISE:
# Generate a mastery log if needed
mastery_log, created = MasteryLog.objects.get_or_create(
user=user,
mastery_level=1,
summarylog=summary_log,
defaults={
'start_timestamp': summary_log.start_timestamp,
'end_timestamp': summary_log.end_timestamp,
'complete': summary_log.progress >= 1.0,
'completion_timestamp': completion_timestamp,
'mastery_criterion': {
'm': 5,
'n': 5,
'type': 'm_of_n',
},
}
)
if not created:
# Not created, so update relevant fields on it based on new interactions
if not mastery_log.complete and summary_log.progress >= 1.0:
mastery_log.complete = True
mastery_log.completion_timestamp = summary_log.completion_timestamp
mastery_log.end_timestamp = summary_log.end_timestamp
# Get the list of assessment item ids from the assessment meta data
assessment_item_ids = random_node.assessmentmetadata.first().assessment_item_ids
for i, session_log in enumerate(reversed(session_logs)):
# Always make students get 5 attempts correct in the most recent session
# if the exercise is complete
complete = (i == 0 and mastery_log.complete)
if complete:
n = 5
else:
# Otherwise, let them have answered between 1 and 5 questions per session
n = random.randint(1, 5)
# How long did they spend on these n questions?
timespan = session_log.end_timestamp - session_log.start_timestamp
# Index through each individual question
for j in range(0, n):
if complete:
# If this is the session where they completed the exercise, always
# make them get it right
correct = True
else:
# Otherwise only let students get odd indexed questions right,
# ensuring they will always have a mastery breaking sequence
# as zero based indexing means their first attempt will always be wrong!
correct = j % 2 == 1
start_timestamp = session_log.end_timestamp - (timespan / n) * (j + 1)
end_timestamp = session_log.end_timestamp - (timespan / n) * j
# If incorrect, must have made at least two attempts at the question
question_attempts = 1 if correct else random.randint(2, 5)
question_interval = (end_timestamp - start_timestamp) / question_attempts
# If they got it wrong, give 20/80 chance that they took a hint to do so
hinted = random.choice((False, False, False, False, not correct))
if hinted:
first_interaction = {
'correct': False,
'type': 'hint',
}
else:
first_interaction = {
'correct': correct,
'type': 'answer',
}
first_interaction.update({
'answer': {},
'timestamp': start_timestamp + question_interval
})
interaction_history = [first_interaction]
# If it is correct, this can be our only response, otherwise, add more.
if not correct:
for att in range(1, question_attempts - 1):
# Add on additional attempts for intervening incorrect responses
interaction_history += [{
'correct': False,
'type': 'answer',
'answer': {},
'timestamp': start_timestamp + question_interval * (att + 1),
}]
# Finally, add a correct response that allows the user to move onto the next question
interaction_history += [{
'correct': True,
'type': 'answer',
'answer': {},
'timestamp': end_timestamp,
}]
AttemptLog.objects.create(
# Choose a random assessment item id from the exercise
item=random.choice(assessment_item_ids),
# Just let each attempt be a fixed proportion of the total time spent on the exercise
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
time_spent=timespan.total_seconds(),
# Mark all attempts as complete, as assume that student gave correct answer eventually
complete=True,
# Mark as correct or incorrect
correct=correct,
hinted=hinted,
# We can't meaningfully generate fake answer data for Perseus exercises
# (which are currently our only exercise type) - so don't bother.
answer={},
simple_answer='',
interaction_history=interaction_history,
user=user,
masterylog=mastery_log,
sessionlog=session_log,
)
|
py
|
1a5864b5bb8c99538329684373682a87b351bae8
|
def find_duplicates(list_of_numbers):
# start writing your code here
list_of_duplicates = []
for each in set(list_of_numbers):
if list_of_numbers.count(each) > 1:
list_of_duplicates.append(each)
return list_of_duplicates
list_of_numbers = [1, 2, 2, 3, 3, 3, 4, 4, 4, 4]
list_of_duplicates = find_duplicates(list_of_numbers)
print(list_of_duplicates)
|
py
|
1a5864c6075c81b286c40a0eb5b4683e8a0fc8c8
|
#!/usr/bin/env python
# encoding: utf-8
# Sample-based Monte Carlo Denoising using a Kernel-Splatting Network
# Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand
# Siggraph 2019
#
# Copyright (c) 2019 Michaël Gharbi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Denoise an image using a previously trained model."""
import os
import argparse
import shutil
import tempfile
import time
import pyexr
import torch as th
import numpy as np
from torch.utils.data import DataLoader
import skimage.io as skio
from sbmc import losses
from denoise import _pad
import ttools
from ttools.modules.image_operators import crop_like
import sbmc
LOG = ttools.get_logger(__name__)
def main(args):
if not os.path.exists(args.data):
raise ValueError("input {} does not exist".format(args.data))
# Load the data
data_params = dict(spp=args.spp)
kpcn_data_params = dict(spp=args.spp, kpcn_mode=True)
data = sbmc.FullImagesDataset(args.data, **data_params)
data_kpcn = sbmc.FullImagesDataset(args.data, **data_params, mode="kpcn")
dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=0)
dataloader_kpcn = DataLoader(data_kpcn, batch_size=1, shuffle=False, num_workers=0)
# Load the model
temp = th.load(f"{args.model1}", map_location=th.device('cpu'))
model_one = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features)
model_one.load_state_dict(temp['model'])
model_one.train(False)
temp = th.load("/home/emil/Documents/Temporal-SBMC-extension/data/pretrained_models/gharbi2019_sbmc/final.pth" , map_location=th.device("cpu"))
sbmc_model = sbmc.Multisteps(data.num_features, data.num_global_features)
sbmc_model.load_state_dict(temp["model"])
sbmc_model.train(False)
temp = th.load("/home/emil/Documents/Temporal-SBMC-extension/data/pretrained_models/bako2017_finetuned/final.pth", map_location=th.device("cpu"))
kpcn_model = sbmc.KPCN(27)
kpcn_model.load_state_dict(temp["model"])
kpcn_model.train(False)
device = "cuda" if th.cuda.is_available() else "cpu"
if (device == "cuda"):
LOG.info("Using CUDA")
model_one.cuda()
sbmc_model.cuda()
kpcn_model.cuda()
rmse_checker = losses.RelativeMSE()
rmse_checker.to(device)
radiances = []
batch = next(iter(dataloader))
kpcn_batch = next(iter(dataloader_kpcn))
for k in batch.keys():
if not batch[k].__class__ == th.Tensor:
continue
batch[k] = batch[k].to(device) # Sets the tensors to the correct device type
for k in kpcn_batch.keys():
print(k)
if not kpcn_batch[k].__class__ == th.Tensor:
continue
kpcn_batch[k] = kpcn_batch[k].to(device) # Sets the tensors to the correct device type
# Compute the output with RSBMC
with th.no_grad():
output = model_one(batch)["radiance"]
output_sbmc = sbmc_model(batch)["radiance"]
output_kpcn = kpcn_model(kpcn_batch)["radiance"]
# tgt = crop_like(batch["target_image"], output)
radiances.append(batch["low_spp"])
radiances.append(_pad(batch, output, False)) # Add RSBMC to the output
radiances.append(_pad(batch, output_sbmc, False))
radiances.append(_pad(kpcn_batch, output_kpcn, True))
radiances.append(batch["target_image"]) # Add target to the output
save_img(radiances, args.save_dir)
def save_img(radiances, checkpoint_dir):
tmp_empty = th.zeros_like(radiances[0]) # Empty filler tensor
# Difference between models and ground thruth
# diff_model1 = (radiance1 - tgt).abs()
# diff_model2 = (radiance2 - tgt).abs()
# Create output data in the form:
# low spp input --
# ouput model1 -- Diff with tgt
# ouput model2 -- Diff with tgt
# tgt --
# first_row = th.cat([tmp_empty, low_radiance, tmp_empty], -1)
# second_row = th.cat([tmp_empty, radiance1, diff_model1], -1)
# third_row = th.cat([tmp_empty, radiance2, diff_model2], -1)
# fourth_row = th.cat([tmp_empty, tgt, tmp_empty], -1)
# Concate the data in a vertical stack
# data = th.cat([first_row, second_row, third_row, fourth_row], -2)
data = th.cat(radiances, -1)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
# Add text to the images
os.makedirs(checkpoint_dir, exist_ok=True)
outputfile = os.path.join(checkpoint_dir, f'spp.png')
pyexr.write(outputfile, data)
png = outputfile.replace(".exr", ".png")
skio.imsave(png, (np.clip(data, 0, 1)*255).astype(np.uint8))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model1', required=True, help="path to the first model")
parser.add_argument(
'--save_dir', required=True, help="path to the dir where everything has to be saved")
parser.add_argument(
'--data', required=True, help="path to the training data.")
parser.add_argument(
'--amount', required=False, type=int,default=1, help="Amount of frames to denoise and compare")
parser.add_argument('--spp', type=int,
help="number of samples to use as input.")
args = parser.parse_args()
ttools.set_logger(True)
main(args)
|
py
|
1a586659e5f39f22c53606d7a6e3c34c0597b905
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class DealersPipeline:
def process_item(self, item, spider):
return item
|
py
|
1a5866f61adb91ed962e337b97a8fd26452b33ab
|
#!/usr/bin/env python3
# Copyright 2019 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, csv_dialect):
self.csv_dialect = csv_dialect
self.header = ['name', 'ngraphs', 'type', 'nodes', 'metg']
self.table = []
def process(self, row, data):
self.table.append({'metg': data, **row})
def error_value(self):
return 'error'
def complete(self):
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for row in self.table:
out.writerow(row)
def driver(machine, threshold, csv_dialect, verbose):
parser = Parser(csv_dialect)
parser.parse(machine, threshold, True, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
|
py
|
1a5867704e7d383fd09155b7d7cefa33ce14f147
|
# MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from blockchainetl import misc_utils
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-i', '--input', default='-', type=str, help='The input file. If not specified stdin is used.')
@click.option('-o', '--output', default='-', type=str, help='The output file. If not specified stdout is used.')
@click.option('-p', '--predicate', required=True, type=str,
help='Predicate in Python code e.g. "item[\'is_erc20\']".')
def filter_items(input, output, predicate):
def evaluated_predicate(item):
eval_environment = globals()
if 'datetime' in predicate:
import datetime
eval_environment['datetime'] = datetime
return eval(predicate, eval_environment, {'item': item})
misc_utils.filter_items(input, output, evaluated_predicate)
|
py
|
1a5867b9554547c9367553912fbb390d8cca76d2
|
# coding=utf-8
class DiamondException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return self.message
|
py
|
1a58684006a56561eb0a737a1758461357e145a5
|
r"""Inspect MEG and EEG raw data, and interactively mark channels as bad.
example usage:
$ mne_bids inspect --subject_id=01 --task=experiment --session=test \
--datatype=meg --suffix=meg --bids_root=bids_root
"""
# Authors: Richard Höchenberger <[email protected]>
#
# License: BSD (3-clause)
from mne.utils import logger
import mne_bids
from mne_bids import BIDSPath, inspect_dataset
def run():
"""Run the mark_bad_channels command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--bids_root', dest='bids_root',
help='The path of the folder containing the BIDS '
'dataset')
parser.add_option('--subject_id', dest='subject',
help=('Subject name'))
parser.add_option('--session_id', dest='session',
help='Session name')
parser.add_option('--task', dest='task',
help='Task name')
parser.add_option('--acq', dest='acquisition',
help='Acquisition parameter')
parser.add_option('--run', dest='run',
help='Run number')
parser.add_option('--proc', dest='processing',
help='Processing label.')
parser.add_option('--rec', dest='recording',
help='Recording name')
parser.add_option('--type', dest='datatype',
help='Recording data type, e.g. meg, ieeg or eeg')
parser.add_option('--suffix', dest='suffix',
help='The filename suffix, i.e. the last part before '
'the extension')
parser.add_option('--ext', dest='extension',
help='The filename extension, including the leading '
'period, e.g. .fif')
parser.add_option('--find_flat', dest='find_flat',
help='Whether to auto-detect flat channels and time '
'segments')
parser.add_option('--l_freq', dest='l_freq',
help='The high-pass filter cutoff frequency')
parser.add_option('--h_freq', dest='h_freq',
help='The low-pass filter cutoff frequency')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='Whether do generate additional diagnostic output')
opt, args = parser.parse_args()
if args:
parser.print_help()
parser.error(f'Please do not specify arguments without flags. '
f'Got: {args}.\n')
if opt.bids_root is None:
parser.print_help()
parser.error('You must specify bids_root')
bids_path = BIDSPath(subject=opt.subject, session=opt.session,
task=opt.task, acquisition=opt.acquisition,
run=opt.run, processing=opt.processing,
recording=opt.recording, datatype=opt.datatype,
suffix=opt.suffix, extension=opt.extension,
root=opt.bids_root)
find_flat = True if opt.find_flat is None else bool(opt.find_flat)
l_freq = None if opt.l_freq is None else float(opt.l_freq)
h_freq = None if opt.h_freq is None else float(opt.h_freq)
logger.info(f'Inspecting {bids_path.basename} …')
inspect_dataset(bids_path=bids_path, find_flat=find_flat,
l_freq=l_freq, h_freq=h_freq,
verbose=opt.verbose)
if __name__ == '__main__':
run()
|
py
|
1a5868524b26e3237e55c98ec227afff43fca08a
|
'''
Created on Nov 22, 2017
@author: NeilShah-MacBookPro
'''
from tkinter import Button, Label, Canvas
import model
generation = None
canvas = None
# The view calls these functions to place buttons in a frame
def start_button(parent, **config) -> Button:
return Button(parent, command = model.start, **config)
def stop_button(parent, **config) -> Button:
return Button(parent, command = model.stop, **config)
def reset_button(parent, **config) -> Button:
return Button(parent, command = model.reset, **config)
def generation_label(parent, **config) -> Label:
global generation
generation = Label(parent, **config)
return generation
def grid_simulation_canvas(parent, **config) -> Canvas:
global canvas
canvas = Canvas(parent, **config)
canvas.bind('<Button-1>', model.mouse_click)
return canvas
# The repeater calls itself every 1000 milliseconds
# The grid updates itself every second
# Calls functions from the models to update and display the grid
def repeater(root) -> None:
model.update_grid()
model.display_grid()
root.after(1000, repeater, root)
|
py
|
1a58689d732fdd0ebfb5cd15d361d8e24f75c6ef
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import drf_yasg.openapi as openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import generics, status
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
from rest_framework.views import APIView
from rest_framework.response import Response
from core.permissions import all_permissions
from core.utils.common import get_object_with_check_and_log
from projects.models import Project
from ml.serializers import MLBackendSerializer
from ml.models import MLBackend
from core.utils.common import bool_from_request
logger = logging.getLogger(__name__)
class MLBackendListAPI(generics.ListCreateAPIView):
parser_classes = (JSONParser, FormParser, MultiPartParser)
permission_required = all_permissions.projects_change
serializer_class = MLBackendSerializer
swagger_schema = None
def get_queryset(self):
project_pk = self.request.query_params.get("project")
project = get_object_with_check_and_log(self.request, Project, pk=project_pk)
self.check_object_permissions(self.request, project)
ml_backends = MLBackend.objects.filter(project_id=project.id)
for mlb in ml_backends:
mlb.update_state()
return ml_backends
def perform_create(self, serializer):
ml_backend = serializer.save()
ml_backend.update_state()
class MLBackendDetailAPI(generics.RetrieveUpdateDestroyAPIView):
"""RUD storage by pk specified in URL"""
parser_classes = (JSONParser, FormParser, MultiPartParser)
serializer_class = MLBackendSerializer
permission_required = all_permissions.projects_change
queryset = MLBackend.objects.all()
swagger_schema = None
def get_object(self):
ml_backend = super(MLBackendDetailAPI, self).get_object()
ml_backend.update_state()
return ml_backend
def perform_update(self, serializer):
ml_backend = serializer.save()
ml_backend.update_state()
class MLBackendTrainAPI(APIView):
"""Train
After you've activated an ML backend, call this API to start training with the already-labeled tasks.
"""
permission_required = all_permissions.projects_change
@swagger_auto_schema(
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
"use_ground_truth": openapi.Schema(
type=openapi.TYPE_BOOLEAN,
description="Whether to include ground truth annotations in training",
)
},
),
responses={
200: openapi.Response(
title="Training OK", description="Training has successfully started."
),
500: openapi.Response(
description="Training error",
schema=openapi.Schema(
title="Error message",
desciption="Error message",
type=openapi.TYPE_STRING,
example="Server responded with an error.",
),
),
},
tags=["Machine Learning"],
)
def post(self, request, *args, **kwargs):
ml_backend = get_object_with_check_and_log(request, MLBackend, pk=self.kwargs["pk"])
self.check_object_permissions(self.request, ml_backend)
ml_backend.train()
return Response(status=status.HTTP_200_OK)
|
py
|
1a58691abe01ea3b37e2a9b0eac662e3f54dd071
|
import csv
from decimal import Decimal
from io import BytesIO, StringIO
import os
from collections import OrderedDict
from tempfile import TemporaryDirectory
from unittest import skipIf
from zipfile import ZipFile
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.geos import Point, LineString, GeometryCollection
from django.contrib.gis import gdal
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import activate, deactivate_all
from geotrek.common.tests import CommonTest
from mapentity.tests.factories import SuperUserFactory
from mapentity.serializers.shapefile import ZipShapeSerializer
from geotrek.authent.tests.factories import PathManagerFactory, StructureFactory
from geotrek.core.tests.factories import StakeFactory
from geotrek.core.models import PathAggregation
from geotrek.common.tests.factories import OrganismFactory
from geotrek.common.tests import TranslationResetMixin
from geotrek.maintenance.models import Intervention, InterventionStatus, Project
from geotrek.maintenance.views import InterventionFormatList, ProjectFormatList
from geotrek.core.tests.factories import PathFactory, TopologyFactory
from geotrek.infrastructure.models import Infrastructure
from geotrek.infrastructure.tests.factories import InfrastructureFactory
from geotrek.outdoor.tests.factories import CourseFactory
from geotrek.signage.tests.factories import BladeFactory, SignageFactory
from geotrek.signage.models import Signage
from geotrek.maintenance.tests.factories import (InterventionFactory, InfrastructureInterventionFactory,
InterventionDisorderFactory, InterventionStatusFactory, ManDayFactory,
ProjectFactory, ContractorFactory, InterventionJobFactory,
SignageInterventionFactory, ProjectWithInterventionFactory)
from geotrek.trekking.tests.factories import POIFactory, TrekFactory, ServiceFactory
class InterventionViewsTest(CommonTest):
model = Intervention
modelfactory = InterventionFactory
userfactory = PathManagerFactory
get_expected_json_attrs = None # Disable API tests
extra_column_list = ['heliport_cost', 'subcontract_cost', 'disorders', 'jobs']
expected_column_list_extra = ['id', 'name', 'heliport_cost', 'subcontract_cost', 'disorders', 'jobs']
expected_column_formatlist_extra = ['id', 'heliport_cost', 'subcontract_cost', 'disorders', 'jobs']
def get_bad_data(self):
return OrderedDict([
('name', ''),
('manday_set-TOTAL_FORMS', '0'),
('manday_set-INITIAL_FORMS', '1'),
('manday_set-MAX_NUM_FORMS', '0'),
]), 'This field is required.'
def get_good_data(self):
InterventionStatusFactory.create()
good_data = {
'name': 'test',
'date': '2012-08-23',
'disorders': InterventionDisorderFactory.create().pk,
'comments': '',
'slope': 0,
'area': 0,
'subcontract_cost': 0.0,
'stake': StakeFactory.create().pk,
'height': 0.0,
'project': '',
'width': 0.0,
'length': 0.0,
'status': InterventionStatus.objects.all()[0].pk,
'heliport_cost': 0.0,
'material_cost': 0.0,
'manday_set-TOTAL_FORMS': '2',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
'manday_set-0-nb_days': '48.75',
'manday_set-0-job': InterventionJobFactory.create().pk,
'manday_set-0-id': '',
'manday_set-0-DELETE': '',
'manday_set-1-nb_days': '12',
'manday_set-1-job': InterventionJobFactory.create().pk,
'manday_set-1-id': '',
'manday_set-1-DELETE': '',
}
if settings.TREKKING_TOPOLOGY_ENABLED:
path = PathFactory.create()
good_data['topology'] = '{"paths": [%s]}' % path.pk,
else:
good_data['topology'] = 'SRID=4326;POINT (5.1 6.6)'
return good_data
def test_creation_form_on_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = "%s" % signa
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
))
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
# Should be able to save form successfully
data = self.get_good_data()
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
),
data)
self.assertEqual(response.status_code, 302)
self.assertEqual(signa, Intervention.objects.get().target)
def test_detail_target_objects(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
path = PathFactory.create(geom=LineString((200, 200), (300, 300)))
signa = SignageFactory.create(paths=[(path, .5, .5)])
signa.save()
infrastructure = InfrastructureFactory.create(paths=[(path, .5, .5)])
infrastructure.save()
poi = POIFactory.create(paths=[(path, .5, .5)])
trek = TrekFactory.create(paths=[(path, .5, .5)])
service = ServiceFactory.create(paths=[(path, .5, .5)])
topo = TopologyFactory.create(paths=[(path, .5, .5)])
topo.save()
path_other = PathFactory.create(geom=LineString((10000, 0), (10010, 0)))
signa_other = SignageFactory.create(paths=[(path_other, .5, .5)])
signa_other.save()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (250 250)')
infrastructure = InfrastructureFactory.create(geom='SRID=2154;POINT (250 250)')
poi = POIFactory.create(geom='SRID=2154;POINT (250 250)')
trek = TrekFactory.create(geom='SRID=2154;POINT (250 250)')
service = ServiceFactory.create(geom='SRID=2154;POINT (250 250)')
topo = TopologyFactory.create(geom='SRID=2154;POINT (250 250)')
signa_other = SignageFactory.create(geom='SRID=2154;POINT (10005 0)')
intervention_signa = InterventionFactory.create(target=signa)
intervention_infra = InterventionFactory.create(target=infrastructure)
intervention_poi = InterventionFactory.create(target=poi)
intervention_trek = InterventionFactory.create(target=trek)
intervention_service = InterventionFactory.create(target=service)
intervention_topo = InterventionFactory.create(target=topo)
blade = BladeFactory(signage=signa, number="1")
intervention_blade = InterventionFactory.create(target=blade)
intervention_other = InterventionFactory.create(target=signa_other)
response = self.client.get(signa.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, intervention_signa.target_display)
self.assertContains(response, intervention_infra.target_display)
self.assertContains(response, intervention_poi.target_display)
self.assertContains(response, intervention_trek.target_display)
self.assertContains(response, intervention_service.target_display)
self.assertContains(response, intervention_blade.target_display)
self.assertContains(response, intervention_topo.target_display)
self.assertNotContains(response, intervention_other.target_display)
def test_creation_form_on_signage_with_errors(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = "%s" % signa
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
))
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
data = self.get_good_data()
# If form invalid, it should not fail
data.pop('status')
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
),
data)
self.assertEqual(response.status_code, 200)
self.assertFalse(Intervention.objects.exists())
def test_update_form_on_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = "%s" % signa
intervention = InterventionFactory.create(target=signa)
response = self.client.get(intervention.get_update_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
# Should be able to save form successfully
form = response.context['form']
data = form.initial
data['disorders'] = data['disorders'][0].pk
data['project'] = ''
data.update(**{
'manday_set-TOTAL_FORMS': '0',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
})
# Form URL is modified in form init
formurl = '%s?target_id=%s&target_type=%s' % (intervention.get_update_url(), signa.pk, ContentType.objects.get_for_model(Signage).pk)
response = self.client.post(formurl, data)
self.assertEqual(response.status_code, 302)
def test_update_signage(self):
target_year = 2017
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = SignageInterventionFactory.create()
else:
intervention = SignageInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signa = intervention.target
# Save infrastructure form
response = self.client.get(signa.get_update_url())
form = response.context['form']
data = form.initial
data['name_en'] = 'modified'
data['implantation_year'] = target_year
if settings.TREKKING_TOPOLOGY_ENABLED:
data['topology'] = '{"paths": [%s]}' % PathFactory.create().pk
else:
data['geom'] = 'SRID=4326;POINT (2.0 6.6)'
data['manager'] = OrganismFactory.create().pk
response = self.client.post(signa.get_update_url(), data)
self.assertEqual(response.status_code, 302)
# Check that intervention was not deleted (bug #783)
intervention = Intervention.objects.first()
self.assertFalse(intervention.deleted)
self.assertEqual(intervention.target.name, 'modified')
self.assertEqual(intervention.target.implantation_year, target_year)
def test_creation_form_on_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk))
self.assertEqual(response.status_code, 200)
# Should be able to save form successfully
data = self.get_good_data()
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk),
data)
self.assertEqual(response.status_code, 302)
def test_creation_form_on_infrastructure_with_errors(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk))
self.assertEqual(response.status_code, 200)
data = self.get_good_data()
# If form invalid, it should not fail
data.pop('status')
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk), data)
self.assertEqual(response.status_code, 200)
def test_update_form_on_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
intervention = InterventionFactory.create(target=infra)
response = self.client.get(intervention.get_update_url())
self.assertEqual(response.status_code, 200)
# Should be able to save form successfully
form = response.context['form']
data = form.initial
data['disorders'] = data['disorders'][0].pk
data['project'] = ''
data.update(**{
'manday_set-TOTAL_FORMS': '0',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
})
# Form URL is modified in form init
formurl = '%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk)
response = self.client.post(formurl, data)
self.assertEqual(response.status_code, 302)
def test_disorders_not_mandatory(self):
data = self.get_good_data()
data.pop('disorders')
response = self.client.post(Intervention.get_add_url(), data)
self.assertEqual(response.status_code, 302)
def test_update_infrastructure(self):
target_year = 2017
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = InfrastructureInterventionFactory.create()
else:
intervention = InfrastructureInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
infra = intervention.target
# Save infrastructure form
response = self.client.get(infra.get_update_url())
form = response.context['form']
data = form.initial
data['name_en'] = 'modified'
data['implantation_year'] = target_year
data['accessibility'] = ''
if settings.TREKKING_TOPOLOGY_ENABLED:
data['topology'] = '{"paths": [%s]}' % PathFactory.create().pk
else:
data['geom'] = 'SRID=4326;POINT (2.0 6.6)'
response = self.client.post(infra.get_update_url(), data)
self.assertEqual(response.status_code, 302)
intervention = Intervention.objects.first()
self.assertFalse(intervention.deleted)
self.assertEqual(intervention.target.name, 'modified')
self.assertEqual(intervention.target.implantation_year, target_year)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_form_default_stake(self):
"""
Without segmentation dynamic we do not have paths so we can't put any stake by default coming from paths
"""
good_data = self.get_good_data()
good_data['stake'] = ''
good_data['topology'] = """
{"offset":0,"positions":{"0":[0.8298653170816073,1],"2":[0,0.04593024777973237]},"paths":[%s,%s,%s]}
""" % (PathFactory.create().pk, PathFactory.create().pk, PathFactory.create().pk)
response = self.client.post(Intervention.get_add_url(), good_data)
self.assertEqual(response.status_code, 302)
response = self.client.get(response._headers['location'][1])
self.assertTrue('object' in response.context)
intervention = response.context['object']
self.assertFalse(intervention.stake is None)
def test_form_deleted_projects(self):
p1 = ProjectFactory.create()
p2 = ProjectFactory.create()
i = InterventionFactory.create(project=p1)
response = self.client.get(i.get_update_url())
self.assertEqual(response.status_code, 200)
form = self.get_form(response)
projects = form.fields['project'].queryset.all()
self.assertCountEqual(projects, [p1, p2])
p2.delete()
projects = form.fields['project'].queryset.all()
self.assertCountEqual(projects, [p1])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_csv_on_topology_multiple_paths(self):
# We create an intervention on multiple paths and we check in csv target's field we have all the paths
path_AB = PathFactory.create(name="PATH_AB", geom=LineString((0, 0), (4, 0)))
path_CD = PathFactory.create(name="PATH_CD", geom=LineString((4, 0), (8, 0)))
InterventionFactory.create(target=TopologyFactory.create(paths=[(path_AB, 0.2, 1),
(path_CD, 0, 1)]))
response = self.client.get(self.model.get_format_list_url() + '?format=csv')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
# Read the csv
lines = list(csv.reader(StringIO(response.content.decode("utf-8")), delimiter=','))
index_line = lines[0].index('On')
self.assertEqual(lines[1][index_line],
f'Path: {path_AB.name} ({path_AB.pk}), Path: {path_CD.name} ({path_CD.pk})')
def test_no_html_in_csv_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
InfrastructureInterventionFactory.create()
else:
InfrastructureInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
super().test_no_html_in_csv()
def test_no_html_in_csv_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
SignageInterventionFactory.create()
else:
SignageInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
super().test_no_html_in_csv()
def test_structurerelated_not_loggedin(self):
# Test that it does not fail on update if not logged in
self.client.logout()
response = self.client.get(Intervention.get_add_url())
self.assertEqual(response.status_code, 302)
i = InterventionFactory.create()
response = self.client.get(i.get_update_url())
self.assertEqual(response.status_code, 302)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_creation_form_line(self):
path = PathFactory.create(geom=LineString(Point(700000, 6600000), Point(700300, 6600300), srid=settings.SRID))
self.super_user = SuperUserFactory.create(username='admin', password='super')
self.client.login(username='admin', password='super')
data = self.get_good_data()
data['structure'] = StructureFactory.create().pk
data['topology'] = '{"paths": [%s], "positions":{"0":[0,1]}}' % path.pk,
response = self.client.post('%s' % (Intervention.get_add_url()),
data)
self.assertEqual(PathAggregation.objects.count(), 1)
self.assertEqual(response.status_code, 302)
self.assertEqual(Intervention.objects.first().geom, path.geom)
self.assertEqual(Intervention.objects.first().target.kind, 'INTERVENTION')
class ProjectViewsTest(CommonTest):
model = Project
modelfactory = ProjectWithInterventionFactory
userfactory = PathManagerFactory
get_expected_json_attrs = None # Disable API tests
extra_column_list = ['domain', 'contractors']
expected_column_list_extra = ['id', 'name', 'domain', 'contractors']
expected_column_formatlist_extra = ['id', 'domain', 'contractors']
def get_bad_data(self):
return OrderedDict([
('begin_year', ''),
('funding_set-TOTAL_FORMS', '0'),
('funding_set-INITIAL_FORMS', '1'),
('funding_set-MAX_NUM_FORMS', '0'),
]), 'This field is required.'
def get_good_data(self):
return {
'name': 'test',
'stake': '',
'type': '',
'domain': '',
'begin_year': '2010',
'end_year': '2012',
'constraints': '',
'global_cost': '12',
'comments': '',
'contractors': ContractorFactory.create().pk,
'project_owner': OrganismFactory.create().pk,
'project_manager': OrganismFactory.create().pk,
'funding_set-TOTAL_FORMS': '2',
'funding_set-INITIAL_FORMS': '0',
'funding_set-MAX_NUM_FORMS': '',
'funding_set-0-amount': '468.0',
'funding_set-0-organism': OrganismFactory.create().pk,
'funding_set-0-project': '',
'funding_set-0-id': '',
'funding_set-0-DELETE': '',
'funding_set-1-amount': '789',
'funding_set-1-organism': OrganismFactory.create().pk,
'funding_set-1-project': '',
'funding_set-1-id': '',
'funding_set-1-DELETE': ''
}
def _check_update_geom_permission(self, response):
pass
def test_project_layer(self):
p1 = ProjectFactory.create()
ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
InterventionFactory.create(project=p1)
else:
InterventionFactory.create(project=p1, geom='SRID=2154;POINT (700000 6600000)')
# Check that only p1 is in geojson
response = self.client.get(self.model.get_layer_url())
self.assertEqual(response.status_code, 200)
geojson = response.json()
features = geojson['features']
self.assertEqual(len(Project.objects.all()), 2)
self.assertEqual(len(features), 1)
self.assertEqual(features[0]['properties']['pk'], p1.pk)
def test_project_bbox_filter(self):
p1 = ProjectFactory.create()
ProjectFactory.create()
ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
t = TopologyFactory.create()
else:
t = TopologyFactory.create(geom='SRID=2154;POINT (700000 6600000)')
InterventionFactory.create(project=p1, target=t)
def jsonlist(bbox):
url = self.model.get_jsonlist_url() + bbox
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
jsondict = response.json()
return jsondict['aaData']
# Check that projects without interventions are always present
self.assertEqual(len(Project.objects.all()), 3)
self.assertEqual(len(jsonlist('')), 3)
self.assertEqual(len(jsonlist('?bbox=POLYGON((1%202%200%2C1%202%200%2C1%202%200%2C1%202%200%2C1%202%200))')), 2)
# Give a bbox that match intervention, and check that all 3 projects are back
bbox = '?bbox=POLYGON((2.9%2046.4%2C%203.1%2046.4%2C%203.1%2046.6%2C%202.9%2046.6%2C%202.9%2046.4))'
self.assertEqual(len(jsonlist(bbox)), 3)
def test_deleted_interventions(self):
project = ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = InterventionFactory.create()
else:
intervention = InterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
project.interventions.add(intervention)
response = self.client.get(project.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, intervention.name)
intervention.delete()
response = self.client.get(project.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, intervention.name)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class ExportTest(TranslationResetMixin, TestCase):
def test_shape_mixed(self):
"""
Test that a project made of intervention of different geom create multiple files.
Check that those files are each of a different type (Point/LineString) and that
the project and the intervention are correctly referenced in it.
"""
# Create topology line
line = PathFactory.create(geom=LineString(Point(10, 10), Point(11, 10)))
topo_line = TopologyFactory.create(paths=[line])
closest_path = PathFactory(geom=LineString(Point(0, 0), Point(1, 1), srid=settings.SRID))
topo_point = TopologyFactory.create(paths=[(closest_path, 0.5, 0.5)])
self.assertEqual(topo_point.paths.get(), closest_path)
# Create one intervention by geometry (point/linestring/geometrycollection)
it_point = InterventionFactory.create(target=topo_point)
it_line = InterventionFactory.create(target=topo_line)
course_point_a = Point(0, 0, srid=2154)
course_point_b = Point(5, 5, srid=2154)
course_line = LineString((0, 0), (1, 1), srid=2154)
course_geometry_collection = GeometryCollection(course_point_a, course_point_b, course_line, srid=2154)
course = CourseFactory.create(geom=course_geometry_collection)
it_geometrycollection = InterventionFactory.create(target=course)
# reload
it_point = type(it_point).objects.get(pk=it_point.pk)
it_line = type(it_line).objects.get(pk=it_line.pk)
proj = ProjectFactory.create()
proj.interventions.add(it_point)
proj.interventions.add(it_line)
proj.interventions.add(it_geometrycollection)
# instanciate the class based view 'abnormally' to use create_shape directly
# to avoid making http request, authent and reading from a zip
pfl = ZipShapeSerializer()
devnull = open(os.devnull, "wb")
pfl.serialize(Project.objects.all(), stream=devnull, delete=False,
fields=ProjectFormatList().columns)
shapefiles = pfl.path_directory
shapefiles = [shapefile for shapefile in os.listdir(shapefiles) if shapefile[-3:] == "shp"]
layers = {
s: gdal.DataSource(os.path.join(pfl.path_directory, s))[0] for s in shapefiles
}
self.assertEqual(len(layers), 2)
geom_type_layer = {layer.name: layer for layer in layers.values()}
geom_types = geom_type_layer.keys()
self.assertIn('MultiPoint', geom_types)
self.assertIn('MultiLineString', geom_types)
for layer in layers.values():
self.assertEqual(layer.srs.name, 'RGF93_Lambert_93')
self.assertCountEqual(layer.fields, [
'id', 'name', 'period', 'type', 'domain', 'constraint',
'global_cos', 'interventi', 'comments',
'contractor', 'project_ow', 'project_ma', 'founders',
'related_st', 'insertion_', 'update_dat',
'cities', 'districts', 'restricted'
])
self.assertEqual(len(layer), 1)
self.assertEqual(len(layer), 1)
for feature in geom_type_layer['MultiPoint']:
self.assertEqual(str(feature['id']), str(proj.pk))
self.assertEqual(len(feature.geom.geos), 3)
geoms = {geos.wkt for geos in feature.geom.geos}
self.assertSetEqual(geoms, {it_point.geom.wkt, course_point_a.wkt, course_point_b.wkt})
for feature in geom_type_layer['MultiLineString']:
self.assertEqual(str(feature['id']), str(proj.pk))
self.assertEqual(len(feature.geom.geos), 2)
geoms = {geos.wkt for geos in feature.geom.geos}
self.assertSetEqual(geoms, {it_line.geom.wkt, course_line.wkt})
@override_settings(ENABLE_JOBS_COSTS_DETAILED_EXPORT=True)
class TestDetailedJobCostsExports(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = SuperUserFactory.create()
cls.job1 = InterventionJobFactory(job="Worker", cost=12)
cls.job2 = InterventionJobFactory(job="Streamer", cost=60)
cls.job1_column_name = "Cost Worker"
cls.job2_column_name = "Cost Streamer"
cls.interv = InterventionFactory()
cls.manday1 = ManDayFactory(nb_days=3, job=cls.job1, intervention=cls.interv)
cls.manday2 = ManDayFactory(nb_days=2, job=cls.job2, intervention=cls.interv)
cls.job3 = InterventionJobFactory(job="Banker", cost=5000)
cls.job3_column_name = "Cost Banker"
def setUp(self):
self.client.force_login(self.user)
def test_detailed_mandays_export(self):
'''Test detailed intervention job costs are exported properly, and follow data changes'''
# Assert each job used in intervention has a column in export view
columns = InterventionFormatList().columns
self.assertIn(self.job1_column_name, columns)
self.assertIn(self.job2_column_name, columns)
# Assert no duplicate in column exports
self.assertEqual(len(columns), len(set(columns)))
# Assert job not used in intervention is not exported
self.assertNotIn(self.job3_column_name, columns)
# Assert queryset contains right amount for each cost
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost1_in_query_set = getattr(interv_in_query_set, self.job1_column_name)
self.assertEqual(cost1_in_query_set, self.job1.cost * self.manday1.nb_days)
cost2_in_query_set = getattr(interv_in_query_set, self.job2_column_name)
self.assertEqual(cost2_in_query_set, self.job2.cost * self.manday2.nb_days)
# Assert cost is calculated properly when we add and remove mandays on the same job
# Add manday and refresh
manday1bis = ManDayFactory(nb_days=1, job=self.job1, intervention=self.interv)
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost1_in_query_set = getattr(interv_in_query_set, self.job1_column_name)
self.assertEqual(cost1_in_query_set, self.job1.cost * (self.manday1.nb_days + manday1bis.nb_days))
# Remove manday and refresh
manday1bis.delete()
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost1_in_query_set = getattr(interv_in_query_set, self.job1_column_name)
self.assertEqual(cost1_in_query_set, self.job1.cost * self.manday1.nb_days)
# Assert deleted manday does not create an entry
self.manday1.delete()
columns = InterventionFormatList().columns
self.assertNotIn(self.job1_column_name, columns)
# Test column translations don't mess it up
activate('fr')
columns = InterventionFormatList().columns
self.assertIn(f"Coût {self.job2}", columns)
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost2_in_query_set = getattr(interv_in_query_set, f"Coût {self.job2}")
self.assertEqual(cost2_in_query_set, self.job2.cost * self.manday2.nb_days)
deactivate_all()
def test_csv_detailed_cost_content(self):
'''Test CSV job costs exports contain accurate total price'''
response = self.client.get('/intervention/list/export/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
# Assert right costs in CSV
reader = csv.DictReader(StringIO(response.content.decode("utf-8")), delimiter=',')
for row in reader:
self.assertEqual(Decimal(row[self.job1_column_name]), self.job1.cost * self.manday1.nb_days)
self.assertEqual(Decimal(row[self.job2_column_name]), self.job2.cost * self.manday2.nb_days)
def test_shp_detailed_cost_content(self):
'''Test SHP job costs exports contain accurate total price'''
signage = SignageFactory.create()
InterventionFactory.create(target=signage)
i_course = InterventionFactory.create(target=CourseFactory.create())
ManDayFactory.create(intervention=i_course, nb_days=2)
response = self.client.get('/intervention/list/export/?format=shp')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'application/zip')
# Assert right costs in CSV
with ZipFile(BytesIO(response.content)) as mzip:
temp_directory = TemporaryDirectory()
mzip.extractall(path=temp_directory.name)
shapefiles = [shapefile for shapefile in os.listdir(temp_directory.name) if shapefile[-3:] == "shp"]
layers = {
s: gdal.DataSource(os.path.join(temp_directory.name, s))[0] for s in shapefiles
}
l_linestring = layers['LineString.shp']
l_point = layers['Point.shp']
feature_linestring = l_linestring[0]
feature_point = l_point[0]
self.assertEqual(Decimal(str(feature_linestring['cost_worke'])), self.job1.cost * self.manday1.nb_days)
self.assertEqual(Decimal(str(feature_linestring['cost_strea'])), self.job2.cost * self.manday2.nb_days)
self.assertIsNone(feature_point.get('cost_worke'))
self.assertIsNone(feature_point.get('cost_strea'))
@override_settings(ENABLE_JOBS_COSTS_DETAILED_EXPORT=True)
class TestInterventionTargetExports(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = SuperUserFactory.create()
cls.path = PathFactory(name="mypath")
cls.interv = InterventionFactory(target=cls.path)
def setUp(self):
self.client.force_login(self.user)
def test_csv_target_content(self):
response = self.client.get('/intervention/list/export/', params={'format': 'csv'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
# Assert right format in CSV
reader = csv.DictReader(StringIO(response.content.decode("utf-8")), delimiter=',')
for row in reader:
self.assertEqual(row["On"], f"Path: {self.path.name} ({self.path.pk})")
|
py
|
1a58696e31ad8a9d35ea8cb8384f805a6eaa3079
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import yaml
import os.path as osp
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve, auc
class Evaluator(object):
def __init__(self, model_path, topk=5):
with open(osp.join(model_path, "model.yml")) as f:
model_info = yaml.load(f.read(), Loader=yaml.Loader)
with open(osp.join(model_path, 'eval_details.json'), 'r') as f:
eval_details = json.load(f)
self.topk = topk
self.labels = model_info['_Attributes']['labels']
self.true_labels = np.array(eval_details['true_labels'])
self.pred_scores = np.array(eval_details['pred_scores'])
label_ids_list = list(range(len(self.labels)))
self.no_appear_label_ids = set(label_ids_list) - set(
self.true_labels.tolist())
def cal_confusion_matrix(self):
'''计算混淆矩阵。
'''
pred_labels = np.argsort(self.pred_scores)[:, -1:].flatten()
cm = confusion_matrix(
self.true_labels.tolist(),
pred_labels.tolist(),
labels=list(range(len(self.labels))))
return cm
def cal_precision_recall_F1(self):
'''计算precision、recall、F1。
'''
out = {}
out_avg = {}
out_avg['precision'] = 0.0
out_avg['recall'] = 0.0
out_avg['F1'] = 0.0
pred_labels = np.argsort(self.pred_scores)[:, -1:].flatten()
for label_id in range(len(self.labels)):
out[self.labels[label_id]] = {}
if label_id in self.no_appear_label_ids:
out[self.labels[label_id]]['precision'] = -1.0
out[self.labels[label_id]]['recall'] = -1.0
out[self.labels[label_id]]['F1'] = -1.0
continue
pred_index = np.where(pred_labels == label_id)[0].tolist()
tp = np.sum(
self.true_labels[pred_index] == pred_labels[pred_index])
tp_fp = len(pred_index)
tp_fn = len(np.where(self.true_labels == label_id)[0].tolist())
out[self.labels[label_id]]['precision'] = tp * 1.0 / tp_fp
out[self.labels[label_id]]['recall'] = tp * 1.0 / tp_fn
out[self.labels[label_id]]['F1'] = 2 * tp * 1.0 / (tp_fp + tp_fn)
ratio = tp_fn * 1.0 / self.true_labels.shape[0]
out_avg['precision'] += out[self.labels[label_id]][
'precision'] * ratio
out_avg['recall'] += out[self.labels[label_id]]['recall'] * ratio
out_avg['F1'] += out[self.labels[label_id]]['F1'] * ratio
return out, out_avg
def cal_auc(self):
'''计算AUC。
'''
out = {}
for label_id in range(len(self.labels)):
part_pred_scores = self.pred_scores[:, label_id:label_id + 1]
part_pred_scores = part_pred_scores.flatten()
fpr, tpr, thresholds = roc_curve(
self.true_labels, part_pred_scores, pos_label=label_id)
label_auc = auc(fpr, tpr)
if label_id in self.no_appear_label_ids:
out[self.labels[label_id]] = -1.0
continue
out[self.labels[label_id]] = label_auc
return out
def cal_accuracy(self):
'''计算Accuracy。
'''
out = {}
k = min(self.topk, len(self.labels))
pred_top1_label = np.argsort(self.pred_scores)[:, -1]
pred_topk_label = np.argsort(self.pred_scores)[:, -k:]
acc1 = sum(pred_top1_label == self.true_labels) / len(self.true_labels)
acck = sum([
np.isin(x, y) for x, y in zip(self.true_labels, pred_topk_label)
]) / len(self.true_labels)
out['acc1'] = acc1
out['acck'] = acck
out['k'] = k
return out
def generate_report(self):
'''生成评估报告。
'''
report = dict()
report['Confusion_Matrix'] = self.cal_confusion_matrix().tolist()
report['PRF1_average'] = {}
report['PRF1'], report['PRF1_average'][
'over_all'] = self.cal_precision_recall_F1()
auc = self.cal_auc()
for k, v in auc.items():
report['PRF1'][k]['auc'] = v
acc = self.cal_accuracy()
report["Acc1"] = acc["acc1"]
report["Acck"] = acc["acck"]
report["topk"] = acc["k"]
report['label_list'] = self.labels
return report
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.