blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
967cd0a9fdaedfc7ac4e017aea380c1dd7e3838b | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/traits/util/tests/test_deprecated.py | ead58b2f27d7d3149d34c03bf439b923de0956b4 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 2,061 | py | # ------------------------------------------------------------------------------
# Copyright (c) 2005-2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
# ------------------------------------------------------------------------------
from traits.testing.api import UnittestTools
from traits.testing.unittest_tools import unittest
from traits.util.api import deprecated
@deprecated("Addition is deprecated; use subtraction instead.")
def my_deprecated_addition(x, y):
return x + y
@deprecated("Broken code. Use something else.")
def my_bad_function():
1 / 0
class ClassWithDeprecatedBits(object):
@deprecated("bits are deprecated; use bytes")
def bits(self):
return 42
@deprecated("bytes are deprecated too. Use base 10.")
def bytes(self, required_arg, *args, **kwargs):
return required_arg, args, kwargs
class TestDeprecated(unittest.TestCase, UnittestTools):
def test_deprecated_function(self):
with self.assertDeprecated():
result = my_deprecated_addition(42, 1729)
self.assertEqual(result, 1771)
def test_deprecated_exception_raising_function(self):
with self.assertRaises(ZeroDivisionError):
with self.assertDeprecated():
my_bad_function()
def test_deprecated_method(self):
obj = ClassWithDeprecatedBits()
with self.assertDeprecated():
result = obj.bits()
self.assertEqual(result, 42)
def test_deprecated_method_with_fancy_signature(self):
obj = ClassWithDeprecatedBits()
with self.assertDeprecated():
result = obj.bytes(3, 27, 65, name="Boris", age=-3.2)
self.assertEqual(result, (3, (27, 65), {"name": "Boris", "age": -3.2}))
| [
"[email protected]"
] | |
1303e073dc84ebad408b0edac7e5fb07bdf84434 | 099b57613250ae0a0c3c75cc2a9b8095a5aac312 | /leetcode/Hashtable/3. 没有重复字符的最长子串长度(hashmap滑动窗口).py | 8ee74e2f67fcb006783210aefdb598862a6b8705 | [] | no_license | MitsurugiMeiya/Leetcoding | 36e41c8d649b777e5c057a5241007d04ad8f61cd | 87a6912ab4e21ab9be4dd6e90c2a6f8da9c68663 | refs/heads/master | 2022-06-17T19:48:41.692320 | 2020-05-13T16:45:54 | 2020-05-13T16:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py |
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
hashmap = {}
left = -1
maxLen = 0
for right in range(len(s)):
# s[right] in hashmap
# abca,left = -1 -> left = 1
if s[right] in hashmap and hashmap[s[right]] > left:
left = hashmap[s[right]]
hashmap[s[right]] = right
# s[right] not in hashmap
else:
hashmap[s[right]] = right
if right - left > maxLen:
maxLen = right - left
return maxLen
"""
https://www.youtube.com/watch?v=COVvQ9I7XyI
答案:
hashmap里 key是 字母, value是字母所对应的index
1.强调一点,这里计算长度的方式是 right - left, 这种计算方式是计算(left,right]的长度
所以一开始left 是等于-1,因为这样才可以计算从[0,right]的长度
2.我们一次遍历完整个字符串
abac
right
如果 s[right] (a) 在之前出现过,同时hashmap[s[right]] > left,这说明了在当前的(left,right],有两个s[right]
所以我们要更新left, 去创造一个不重复包括s[right]的新的(left,right]
所以我们要更新left,left = hashmap[s[right]](第一个出现的s[right]的index)
这表面我们从第一个a后面的字符开始计算
同时更新hashmap[s[right]]的value,就是a的新index
如果s[right]没在之前出现过,或者 hashmap[s[right]] < left,这说明在当前的(left,right),还不存在当前元素
说明这个元素我们要把把它统计到最长子串中
例如:"tmmzuxt"
我们愿意把最后一个t加入到我们的最长字串当中,因为第一个t我们早已不在字串中了(index<left)
所以这个最后这个t我们要把它加进去
""" | [
"[email protected]"
] | |
50215d61361227c827e4df2011348c8fd87d275b | be7a1a6bfe16729e5e11451f16ef3205e2ce9208 | /cabinet/migrations/0001_initial.py | 9b190e622f8fa48737c9f55c599777a7314537c2 | [] | no_license | xiaoyaolaotou/django-rest_framework | b790e4926f430d1125f455812344babe9b1c1179 | 2e2d792633996f05780e22f226a3e1afa4178ea0 | refs/heads/master | 2020-03-22T19:30:30.319249 | 2018-07-11T06:51:28 | 2018-07-11T06:51:28 | 140,533,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-03 04:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('idcs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cabinet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('idc', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='idcs.Idc', verbose_name='所在机房')),
],
options={
'ordering': ['id'],
'db_table': 'resources_cabinet',
},
),
]
| [
"[email protected]"
] | |
1159e47d3ec5b74c128ba41c3b3197930fa38b79 | 030724b60fb4f8b63953b7401702a98072993e94 | /python/796.rotate_string.py | 6b29b2bfd77e32fbbd1bb6e616ff612beb4d84c1 | [] | no_license | MtTsai/Leetcode | 5f51a892b78cf6427ce2b4891a10bc2d4ed4d972 | 21e83294aee779a16a8c1b96089da4a40eb03035 | refs/heads/master | 2021-01-24T17:17:52.909429 | 2019-08-04T06:53:53 | 2019-08-04T06:54:23 | 123,228,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | class Solution(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
if len(A) != len(B):
return False
if not len(A):
return True
for i in range(len(B)):
if A == B[i:] + B[:i]:
return True
return False
| [
"[email protected]"
] | |
bde6df60ba0a98d920ec3727a93a12ec6bf5347d | 233208e64f4f977a94e2a8675e0af4ed5bc094b8 | /crux/schema/dataset.py | 81618fc4beb58911e8e4c06db4ea4b732dcfb47b | [] | no_license | thomasjpfan/crux-v2-backend | 74589cade29e45490c413a805e2194506dd95ab4 | d4fe26aca9d696f5c97d4fbb7e747e074c3d956f | refs/heads/master | 2022-04-29T08:18:48.961678 | 2021-03-19T02:19:58 | 2021-03-19T02:19:58 | 182,922,124 | 0 | 0 | null | 2022-04-22T21:10:09 | 2019-04-23T03:28:35 | Python | UTF-8 | Python | false | false | 3,644 | py | import graphene
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphql_jwt.decorators import login_required
from graphql_relay import from_global_id
from .nodes import DatasetNode
from ..models import DatasetTag
from ..models import Dataset
from ..models import Task
class CreateDataset(relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
description = graphene.String(required=True)
tags = graphene.List(graphene.NonNull(graphene.String), required=True)
tasks = graphene.List(graphene.NonNull(graphene.String), required=True)
figshare_id = graphene.Int(required=True)
dataset = graphene.Field(DatasetNode)
@staticmethod
@login_required
def mutate_and_get_payload(self,
info,
name,
description,
tags,
tasks,
figshare_id,
client_mutation_id=None):
user = info.context.user
if not user.is_authenticated:
raise Exception('Authentication credentials were not provided')
ds_exists = Dataset.objects.filter(figshare_id=figshare_id).exists()
if ds_exists:
raise Exception(
f'figshare document, {figshare_id} already in database')
dataset = Dataset(name=name,
description=description,
created_by=user,
figshare_id=figshare_id)
dataset.save()
for tag_name in tags:
tag, _ = DatasetTag.objects.get_or_create(name=tag_name)
dataset.tags.add(tag)
for task_name in tasks:
Task.objects.get_or_create(name=task_name,
dataset=dataset,
created_by=user)
return CreateDataset(dataset=dataset)
class EditDataset(relay.ClientIDMutation):
class Input:
description = graphene.String(required=True)
dataset_id = relay.GlobalID(required=True)
tasks = graphene.List(graphene.NonNull(graphene.String), required=True)
dataset = graphene.Field(DatasetNode)
@staticmethod
@login_required
def mutate_and_get_payload(self,
info,
description,
dataset_id,
tasks,
client_mutation_id=None):
user = info.context.user
if not user.is_authenticated:
raise Exception('Authentication credentials were not provided')
_type, _id = from_global_id(dataset_id)
graphene_type = info.schema.get_type(_type).graphene_type
dataset_obj = graphene_type.get_node(info, _id)
if dataset_obj.created_by != user:
raise Exception('User did not create dataset')
dataset_obj.description = description
if tasks is not None:
for task_name in tasks:
Task.objects.get_or_create(name=task_name,
dataset=dataset_obj,
created_by=user)
dataset_obj.save()
return EditDataset(dataset=dataset_obj)
class DatasetQuery:
dataset = relay.Node.Field(DatasetNode)
datasets = DjangoFilterConnectionField(DatasetNode)
class DatasetMutations:
create_dataset = CreateDataset.Field()
edit_dataset = EditDataset.Field()
| [
"[email protected]"
] | |
63032003a234d3872ffe883b21527821e270be33 | 55d560fe6678a3edc9232ef14de8fafd7b7ece12 | /tools/build/src/tools/types/cpp.py | f4edec8ff445a07226c5aecc344e3a94ae05dc07 | [
"BSL-1.0"
] | permissive | stardog-union/boost | ec3abeeef1b45389228df031bf25b470d3d123c5 | caa4a540db892caa92e5346e0094c63dea51cbfb | refs/heads/stardog/develop | 2021-06-25T02:15:10.697006 | 2020-11-17T19:50:35 | 2020-11-17T19:50:35 | 148,681,713 | 0 | 0 | BSL-1.0 | 2020-11-17T19:50:36 | 2018-09-13T18:38:54 | C++ | UTF-8 | Python | false | false | 385 | py | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from b2.build import type as type_
type_.register_type('CPP', ['cpp', 'cxx', 'cc'])
type_.register_type('H', ['h'])
type_.register_type('HPP', ['hpp'], 'H')
type_.register_type('C', ['c'])
| [
"[email protected]"
] | |
a6e95d1edb7dda366e388cd121406f406675eaed | d00a72ae536b77667e8544c0a7c91a5be4faeddc | /PIONEER-ROBOT/pioneer_yolov3/scripts/utils/parse_config.py | de593d6ebcae059505d8d9c7724d39068beb7c1f | [] | no_license | ahanjaya/THORMANG3-OPC | 079b441dd2ae3575fbf6f78b97bfb31685355d8d | 15d707e4206999d95a2e5922cb1a531b1499ef7e | refs/heads/master | 2020-07-11T01:46:23.626851 | 2020-05-22T12:40:26 | 2020-05-22T12:40:26 | 204,421,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | #!/usr/bin/env python3
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options | [
"[email protected]"
] | |
65ce8fc2c42afb3ab3f77c14bc8f6a40c1d081d6 | dce8dad6067ff3f6f59e1fa8185d249fd0bd9c58 | /tests/performance/microbenchmarks/MicroPerf_BuildCommon/run.py | 7f3e5deb21cdf956d50eba26df41d4a9a34feb07 | [
"Apache-2.0"
] | permissive | xpybuild/xpybuild | ccd6c22d4aa0560ee5583004b94dccc03c0cad52 | 9e0aa0ae268cf3fcc759572a026e1ed2a03379e0 | refs/heads/master | 2023-07-12T03:04:08.687644 | 2023-06-29T13:57:34 | 2023-06-29T13:57:34 | 81,104,277 | 9 | 5 | Apache-2.0 | 2022-01-07T18:48:57 | 2017-02-06T15:58:02 | Python | UTF-8 | Python | false | false | 452 | py | from pysys.constants import *
from xpybuild.microperf_basetest import MicroPerfPySysTest
class PySysTest(MicroPerfPySysTest):
OPERATIONS = [
# resultKey (must be a valid filename), command, setup
('xpybuild.buildcommon.isDirPath()','isDirPath(OUTPUT_DIR)', ""),
('xpybuild.fileutils.isDirPath()','fileutils_isDirPath(OUTPUT_DIR)', "from xpybuild.utils.fileutils import isDirPath as fileutils_isDirPath"),
('isWindows()','isWindows()',''),
] | [
"[email protected]"
] | |
ea3039cb62c55b38b78a273a04f80356332081b1 | 14675f0c66fb4f4eeaa6ad1e8e691b9edf8f0bdb | /All other combo programs/Program_to_count_elements_in_list_till_its_tuple.py | 64b4dce0759b39546bea2ccdb184cb48dc15a24f | [] | no_license | abhishekjoshi1991/Python_Learning | 9a94529643eac7394615289e2ecd96106e70ddb8 | a74293d0776304638b5cf976b3534481e57b17f2 | refs/heads/master | 2023-04-16T02:21:30.588052 | 2021-04-21T13:58:29 | 2021-04-21T13:58:29 | 360,176,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # Count the elements in a list until an element is a Tuple
a=(10,20,30,40,(1,2,3),90)
count=0
for i in a:
if type(i)!=tuple:
count+=1
else:
break
print(count)
| [
"[email protected]"
] | |
4ba72c068ca253243de27fffa171e4b35a6ea7f3 | bf07c592fbbe7b44e32b08b2489f63f4ce7ad33a | /blog/migrations/0002_auto_20210315_1954.py | d22e82ec669eed1f23be79eb91248c7f57e4a97b | [] | no_license | dabslee/BrandonsSandbox | 46f032a3227c786d74ac4cae7545e753bf35dbd4 | 07b624f66d71b315cf6dce35bf46e2fbb8f96b9c | refs/heads/master | 2023-07-26T13:27:55.054558 | 2021-09-12T01:33:04 | 2021-09-12T01:33:04 | 347,568,825 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # Generated by Django 3.1.6 on 2021-03-16 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created',
field=models.DateTimeField(),
),
]
| [
"[email protected]"
] | |
d4e2787173097729d2b66f77e7d7a6c3bc5ec4e6 | 9a7904a3ee4abd4c352a746b13963073aa62314b | /04. AI/1.Machin Learning/199_cross_iris.py | 18057a0caf6b4b34224bcf768ee67a6f732720ad | [] | no_license | cat-holic/Python-Bigdata | 4ab1528fa9e804206a2381ac08293088c3e9e250 | 2cb6c75eb02b3b0dc3a16a63c0446c1fc6f04f71 | refs/heads/master | 2020-03-15T09:27:33.944887 | 2018-08-02T08:16:35 | 2018-08-02T08:16:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | from sklearn import svm, metrics
import random, re
# 붓꽃의 csv 파일 읽어 들이기 --- 1
lines = open('iris.csv', 'r', encoding='utf-8').read().split('\n')
f_tonum = lambda n: float(n) if re.match(r'^[0`9\.]+$', n) else n
f_cols = lambda li: list(map(f_tonum, li.strip().split(',')))
csv = list(map(f_cols, lines))
del csv[0] # 헤더 제거
# 데이터를 K개로 분할 --- 2
k = 5
csvk = [[] for i in range(k)]
for i in range(len(csv)):
csvk[i % k].append(csv[i])
print("데이터 분할완료")
# 리스트를 훈련 전용 데이터와 테스트 전용 데이터로 분할하는 함수
def split_data_label(rows):
data = []
label = []
for row in rows:
data.append(row[:4])
label.append(row[4])
return data, label
# 정답률 구하기 ---3.Homenetwork
def calc_score(test, train):
test_f, test_l = split_data_label(test)
train_f, train_l = split_data_label(train)
clf = svm.SVC()
clf.fit(train_f, train_l)
pre = clf.predict(test_f)
return metrics.accuracy_score(test_l, pre)
# K개로 분할해서 정답률 찾기 --- 4
score_list = []
for testc in csvk:
# testc 이외의 데이터를 훈련 전용 데이터로 사용하기
trainc = []
for i in csvk:
if i != testc:
trainc += i
sc = calc_score(testc, trainc)
score_list.append(sc)
print("각각의 정답률 = ", score_list)
print("평균 정답률 =", sum(score_list)/len(score_list)) | [
"[email protected]"
] | |
937bf12bb07ce110f75155d56b1317e89abf2930 | 492ec6be99affb752aa7cb887cfef7c1f29028c4 | /js_rss_articles/migrations/0001_initial.py | f272f7ac1828432946ee63c852f86be71681028a | [] | no_license | compoundpartners/js-rss-articles | e49c4498eae5b3998854dc4f39b37289742ff599 | f8ab5f0a4240e1ab2c15ff24cdf1935f61fdc357 | refs/heads/master | 2020-04-22T16:14:10.854225 | 2019-02-13T12:12:18 | 2019-02-13T12:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-04 04:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0020_old_tree_cleanup'),
]
operations = [
migrations.CreateModel(
name='RSSArticles',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='js_rss_articles_rssarticles', serialize=False, to='cms.CMSPlugin')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='title')),
('url', models.CharField(blank=True, max_length=255, null=True, verbose_name='rss url')),
('count', models.IntegerField(verbose_name='number of articles')),
('layout', models.CharField(choices=[('columns', 'Columns'), ('rows', 'Rows'), ('hero', 'Hero'), ('articles', 'Articles')], max_length=30, verbose_name='layout')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| [
"[email protected]"
] | |
2ba25ab9a95aba75b53be5c4c308be120ae61d3a | 3ba20a18943f70f748189f10fe7c4081f5de528f | /docs/Week3_Python /OOP_Exercises/OOP_class2.py | 482daee69b71d53ac1a826950d66aa1389b34fd3 | [] | no_license | naistangz/Technical_Training | c18dfb7c1986ade227292ebc9f6bf89adb6d9291 | daaee180f650ab3d0ddb1fd9456b9a5d79af4fcc | refs/heads/master | 2022-12-03T22:28:12.317308 | 2020-08-25T23:11:45 | 2020-08-25T23:11:45 | 272,961,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | class Animal:
# class variable outside functions - dangerous
animal_kind = "canine"
def __init__(self, name, colour, breed, hunger):
self.name = name
self.colour = colour
self.breed = breed
self.hunger = hunger
def bark(self):
self.animal_kind
return "woof woof"
def sleep(self):
return "zzz zzz zzz"
def run(self):
return "walkies!"
def eat(self):
return "nom nom nom..."
pip = Animal("Pip", "white", "Labrador", hunger="hungry") # creating an object of our class
print(pip.name) # printing an attribute
print(pip.colour)
print(pip.animal_kind)
kiko = Animal("Kiko", "brown", "Poodle", hunger="starving") # instantiating or creating an object
print(kiko.colour)
print(kiko.name)
print(kiko.bark())
mongoose = Animal("Mongoose", "black", "Yorkshire Terrior", hunger="hungry")
print(mongoose.run())
print(mongoose.eat())
print(mongoose.breed)
mika = Animal("Mika", "pink", "German Shepherd", hunger="hungry")
mika.animal_kind = "fish"
print(mika.animal_kind)
# Using Inheritance
class Bird(Animal):
# def __init__(self):
# # super inherits everything from Animal class
# super().__init__()
# print("I am a bird!")
def tweet(self):
print("tweet tweet")
def eat(self):
print("nibble nibble")
def info(self):
print(f"I am a bird. My name is {self.name}. I am a {self.breed}")
richard = Bird("Richard", "blue", "blue tit", hunger="starving")
richard.tweet()
richard.eat()
| [
"[email protected]"
] | |
8f0a5971c0ef750fd2dbbdcd78fefd7adadb2005 | d8d8144ade3b53d54d47e9115723c9e330351916 | /backend/Himalaya/himalaya/urls.py | 15774d13ce57ab4dfa6d179c22165584dfe75834 | [
"MIT"
] | permissive | djs2082/StoreManagement-Freelancing | 28d70875651824a6ab310c68e1711142199e7797 | 11057cb4f155c0d36a8456c9ea3395b779516384 | refs/heads/master | 2023-07-20T09:12:50.718294 | 2020-08-23T14:54:28 | 2020-08-23T14:54:28 | 288,665,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,558 | py | """himalaya URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.decorators import api_view,renderer_classes,permission_classes
from rest_framework.permissions import IsAuthenticated,IsAdminUser
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from django.views.static import serve
from django.contrib.auth.models import Group, User
admin.autodiscover()
# admin.site.unregister(Token)
# admin.site.unregister(User)
# admin.site.unregister(Group)
admin.site.site_header = "Denim Factory Admin"
admin.site.site_title = "Denim Factory Admin Portal"
admin.site.index_title = "Welcome to Denim Factory Admin Portal"
admin.site.site_url="https://shield-1712.firebaseapp.com/"
admin.empty_value_display="Nothing to Display, Add by clicking on right side"
urlpatterns = [
path('admin/', admin.site.urls),
path('owner/', include("owner.urls","owner")),
path('customers/', include("customers.urls","customers")),
path('payments/',include('payment.urls')),
path('items/',include('items.urls')),
path('brands/',include('brands.urls')),
path('sizes/',include('size.urls')),
path('receipts/',include('receipts.urls'))
]
@api_view(['GET'])
@permission_classes([])
def protected_serve(request, path, document_root=None, show_indexes=False):
path=path.split("?Token=")
if(len(path)>1):
try:
token=Token.objects.get(key=path[1])
return serve(request, path[0], document_root, show_indexes)
except Token.DoesNotExist:
return Response({'authentication':'Authentication Credentials not provided/ Wrong Credentials'})
else:
return Response({'authentication':'Token should be provided with URL'})
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
172031f7d80db86782e254084bbad7ebd5ce4a62 | 0a65d42f4f0e491cb2aada408401b94909f821c2 | /django_media/src/newApp/urls.py | fe401a8af347fd71e8d6964625edd673c714c70c | [] | no_license | jmadlansacay/_Office | 3acde7655784e91c7dcecfc853d4f36cdfeef028 | 7f46449b9f7e8e892e2e0025ba493259197fa592 | refs/heads/main | 2023-07-28T10:23:54.680822 | 2021-09-11T02:28:07 | 2021-09-11T02:28:07 | 379,155,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py |
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"[email protected]"
] | |
24488570c952f56d7c5f1fa6372ce288f2dfa114 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_legroom.py | 66f2fec81e858dcd1a1df2ae9270c9e5c37adfdc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py |
#calss header
class _LEGROOM():
def __init__(self,):
self.name = "LEGROOM"
self.definitions = [u'the amount of space available for your legs when you are sitting behind another seat: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
33d36a2f0e5308056479a9b773d00196e6c6399d | 26f8a8782a03693905a2d1eef69a5b9f37a07cce | /test/test_destiny_definitions_sockets_destiny_plug_whitelist_entry_definition.py | a26dc9eb8d8129c6fb82ad55f027d2db7e632795 | [] | no_license | roscroft/openapi3-swagger | 60975db806095fe9eba6d9d800b96f2feee99a5b | d1c659c7f301dcfee97ab30ba9db0f2506f4e95d | refs/heads/master | 2021-06-27T13:20:53.767130 | 2017-08-31T17:09:40 | 2017-08-31T17:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.destiny_definitions_sockets_destiny_plug_whitelist_entry_definition import DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition
class TestDestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition(unittest.TestCase):
""" DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition(self):
"""
Test DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.destiny_definitions_sockets_destiny_plug_whitelist_entry_definition.DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
cc58d94c31115839eacb037326313e180039794b | be2931f7bb9ed71d42e92cd11709cd466e9486f2 | /app/api/task_api.py | 1f1054a2ebd82602064856c3e208204da1a2f8d6 | [
"BSD-3-Clause"
] | permissive | orf53975/Apfell | 44ca8a3a508353f0c3bf48ad67199e2aa4c3b6d8 | 7f4adb1fa7178137d2b78c2862a767712446e0e1 | refs/heads/master | 2020-03-29T21:20:33.312506 | 2018-09-22T02:18:19 | 2018-09-22T02:18:19 | 150,361,542 | 3 | 0 | null | 2018-09-26T03:09:36 | 2018-09-26T03:09:37 | null | UTF-8 | Python | false | false | 5,219 | py | from app import apfell, db_objects
from sanic.response import json
from app.database_models.model import Callback, Operator, Task, Command, FileMeta, Operation
from urllib.parse import unquote_plus
import datetime
from sanic_jwt.decorators import protected, inject_user
from app.api.utils import breakout_quoted_params
# This gets all tasks in the database
@apfell.route(apfell.config['API_BASE'] + "/tasks/", methods=['GET'])
@inject_user()
@protected()
async def get_all_tasks(request, user):
callbacks = Callback.select()
operators = Operator.select()
tasks = Task.select()
# callbacks_with_operators = await db_objects.prefetch(callbacks, operators)
full_task_data = await db_objects.prefetch(tasks, callbacks, operators)
return json([c.to_json() for c in full_task_data])
@apfell.route(apfell.config['API_BASE'] + "/tasks/callback/<cid:int>", methods=['GET'])
@inject_user()
@protected()
async def get_all_tasks_for_callback(request, cid, user):
try:
callback = await db_objects.get(Callback, id=cid)
except Exception as e:
return json({'status': 'error',
'error': 'Callback does not exist'})
try:
tasks = Task.select()
cb_task_data = await db_objects.execute(Task.select().where(Task.callback == callback))
return json([c.to_json() for c in cb_task_data])
except Exception as e:
return json({'status': 'error',
'error': 'No Tasks',
'msg': str(e)})
# We don't put @protected or @inject_user here since the callback needs to be able to call this function
@apfell.route(apfell.config['API_BASE'] + "/tasks/callback/<cid:int>/nextTask", methods=['GET'])
async def get_next_task(request, cid):
# gets the next task by time for the callback to do
try:
callback = await db_objects.get(Callback, id=cid)
except Exception as e:
return json({'status': 'error',
'error': 'callback does not exist'})
try:
callback.last_checkin = datetime.datetime.now()
callback.active = True # always set this to true regardless of what it was before because it's clearly active
await db_objects.update(callback) # update the last checkin time
operation = await db_objects.get(Operation, name=callback.operation.name)
if not operation.complete:
tasks = await db_objects.get(Task.select().join(Callback).where(
(Task.callback == callback) & (Task.status == "submitted")).order_by(Task.timestamp))
else:
# if the operation is done, kill anything that still tries to get tasking
return json({"command": "exit", "params": ""})
except Exception as e:
print(e)
return json({'command': 'none'}) # return empty if there are no tasks that meet the criteria
tasks.status = "processing"
await db_objects.update(tasks)
return json({"command": tasks.command.cmd, "params": tasks.params, "id": tasks.id})
# create a new task to a specific callback
@apfell.route(apfell.config['API_BASE'] + "/tasks/callback/<cid:int>", methods=['POST'])
@inject_user()
@protected()
async def add_task_to_callback(request, cid, user):
data = request.json
data['operator'] = user['username']
return json(await add_task_to_callback_func(data, cid))
async def add_task_to_callback_func(data, cid):
try:
# first see if the operator and callback exists
op = await db_objects.get(Operator, username=data['operator'])
cb = await db_objects.get(Callback, id=cid)
# now check the task and add it if it's valid
cmd = await db_objects.get(Command, cmd=data['command'])
file_meta = ""
# some tasks require a bit more processing, so we'll handle that here so it's easier for the implant
if cmd.cmd == "upload":
# we need to get the file into the database before we can signal for the callback to pull it down
# this will have {path to local file} {path to remote file} in the data['params'] section
upload_params = await breakout_quoted_params(data['params'])
file_meta = await db_objects.create(FileMeta, total_chunks=1, chunks_received=1, complete=True,
path=upload_params[0], operation=cb.operation)
data['params'] = str(file_meta.id) + " " + upload_params[1]
if cmd.cmd == "download":
if '"' in data['params']:
data['params'] = data['params'][1:-1] # remove "" around the string at this point if they are there
task = await db_objects.create(Task, callback=cb, operator=op, command=cmd, params=data['params'])
if cmd.cmd == "upload":
# now we can associate the task with the filemeta object
file_meta.task = task
await db_objects.update(file_meta)
status = {'status': 'success'}
task_json = task.to_json()
return {**status, **task_json}
except Exception as e:
print("failed to get something in add_task_to_callback_func " + str(e))
return {'status': 'error', 'error': 'Failed to create task', 'msg': str(e)}
| [
"[email protected]"
] | |
e39b0900c10267e355514f90c0edadec2b928e73 | a1e17363c5fbb5e1e70c38c91108cc84b0a2e98a | /expyfun/_externals/ndarraysource.py | 2ef142c93f62ca66ac5cae3f189cb8898e8eec78 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | mmittag/expyfun | daa3332c8b228abaa60e2893210e7598d761a89b | ca52135ace62baf7419f4708e54ebf648a21c9cc | refs/heads/master | 2021-01-16T19:59:47.671278 | 2014-07-03T20:57:53 | 2014-07-03T20:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # -*- coding: utf-8 -*-
try:
from pyglet.media import NdarraySource
except ImportError:
from pyglet.media import StaticMemorySource, AudioFormat
class NdarraySource(StaticMemorySource):
"""Play sound from numpy array
:Parameters:
`data` : ndarray
float data with shape n_channels x n_samples. If ``data`` is
1D, then the sound is assumed to be mono. Note that data
will be clipped between +/- 1.
`fs` : int
Sample rate for the data.
"""
def __init__(self, data, fs):
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
super(NdarraySource, self).__init__(data, audio_format)
def _get_queue_source(self):
return self
| [
"[email protected]"
] | |
69b55b7eae0532801ad2677e109a8824ef180527 | 4dbc4d9c864ac4565193f412d1a2928f34d28da5 | /Educational Codeforces Round 91 (Rated for Div. 2)/.history/D_Berserk_And_Fireball_20200714175533.py | c57c462fe3a7e046e49bf7e8ae5dc3eb0d04a592 | [] | no_license | TomChan1991/codeforce | 91807fd9b62abc48eaed8c0bfac17a38707a2b5c | d23c882d9194ff09f8b41bd76c9cddc3af5c9b21 | refs/heads/master | 2022-12-08T09:23:05.599194 | 2020-07-20T13:54:35 | 2020-07-20T13:54:35 | 281,128,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import sys
inpy = [int(x) for x in sys.stdin.read().split()]
n, m, x, k, y = inpy[0:5]
a, b = inpy[5:5+n], inpy[5+n:]
prei = -1
i, j = 0, 0
res = 0
while i < len(a) and j < len(b):
print(i, j)
if a[i] == b[j]:
flag = True
maxV = 0
for j in range(prei + 1, i):
maxV = max(maxV, a[j])
minMana = None
if max(a[prei] if prei > 0 else 0, a[i]) > minMana:
minMana = y * (i - prei - 1)
if i - prei > k:
minMana = min(((i - prei - 1) // k) * x + ((i - prei - 1) % k) * y, minMana)
if not minMana:
break
res += minMana
prei = i
j += 1
i += 1
if j == len(b):
for j in range(prei + 1, len(a)):
maxV = max(maxV, a[j])
minMana = None
if a[prei] > minMana:
minMana = y * (i - prei - 1)
if i - prei > k:
minMana = min(((i - prei - 1) // k) * x + ((i - prei - 1) % k) * y, minMana)
if not minMana:
print(-1)
else:
print(res + minMana)
else:
print(-1)
| [
"[email protected]"
] | |
94ac20b87ff92b36f7406e2ef2b2dfcb4d534a0b | 17d5736896e79d4b8a11ed8d8ecddd6ede56b2a6 | /day_159_AddtoArrayFormofInteger.py | 02a99cfacee4532a28914a1aa701427853144533 | [] | no_license | parulsharma-121/CodingQuestions | e733e5b24c30f137593267d8464721a83df3f241 | 9c3a99501878edd22052505b8bda9863e5855fd7 | refs/heads/master | 2021-04-23T19:19:13.651753 | 2020-10-22T16:30:29 | 2020-10-22T16:30:29 | 249,979,493 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | '''
For a non-negative integer X, the array-form of X is an array of its digits in left to right order. For example, if X = 1231, then the array form is [1,2,3,1].
Given the array-form A of a non-negative integer X, return the array-form of the integer X+K.
Example 1:
Input: A = [1,2,0,0], K = 34
Output: [1,2,3,4]
Explanation: 1200 + 34 = 1234
Example 2:
Input: A = [2,7,4], K = 181
Output: [4,5,5]
Explanation: 274 + 181 = 455
'''
def addToArrayForm(A, K):
list_to_int = lambda A: int(''.join(str(i) for i in A)) # Generator exp.
A1 = list_to_int(A) + K
return list(str(A1))
A = [1,2,0,0]
K = 34
print(addToArrayForm(A,K))
| [
"[email protected]"
] | |
d7a5d281599ae77fdda2fbf31690cc3d93d99a0b | 463c8ba5baad086d37819804af4ee10f43ab6dd5 | /06_django_advance/01_DJANGO_RECAP/poll/views.py | e768d7b81c353ad198972ae2e64c97b315bc5f02 | [] | no_license | sooya14/TIL | dbbb0608d45ce273ddef6f7cea1b1195285f269d | 232b0d38d8f6ee2e6e5517bfd6a2a15cf1000dad | refs/heads/master | 2023-01-11T17:12:39.370178 | 2020-05-11T12:06:41 | 2020-05-11T12:06:41 | 195,916,241 | 0 | 0 | null | 2023-01-05T18:22:56 | 2019-07-09T02:17:42 | Jupyter Notebook | UTF-8 | Python | false | false | 651 | py | from django.shortcuts import render, redirect, get_object_or_404
from .models import Question, Choice
from .forms import ChoiceModelForm
from django.views.decorators.http import require_GET, require_POST, require_http_methods
def question_detail(request, question_id):
question = get_object_or_404(Question, id=question_id)
return render(request, 'poll/question_detail.html', {
'question': question,
})
def upvote(request, question_id):
question = get_object_or_404(Question, id=question_id)
choice = ChoiceModelForm(request.POST, instance=question)
return redirect('poll:question_detail', question_id) | [
"[email protected]"
] | |
69268ae1f4ab71c81fc10a27196e5b30bd979016 | 91b80ef798cbcdaab7f6ae0be994f5a3b12f1515 | /709.py | 2f8a0323dfd3f9e25176ed78d7adcb8763a4e366 | [] | no_license | luckkyzhou/leetcode | 13377565a1cc2c7861601ca5d55f6b83c63d490e | 43bcf65d31f1b729ac8ca293635f46ffbe03c80b | refs/heads/master | 2021-06-21T11:26:06.114096 | 2021-03-24T21:06:15 | 2021-03-24T21:06:15 | 205,568,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | class Solution:
def toLowerCase(self, str: str) -> str:
res = []
for s in str:
res.append(chr(ord(s) | 32))
return "".join(res) | [
"[email protected]"
] | |
ffedc98b8e7a736467f154ddf564e6d90d606fa5 | bddd0b5e00906380bd45e3d948efdbe5ab9c5063 | /tests/test_marshmallow.py | 93854b69f4b8e04592b1959a4fee32865e8a04a6 | [
"MIT"
] | permissive | life4/vaa | 2e1f3f01c88e7d592b977db4715fa9e14225267c | d934e073966cacc1810419bed0ed8e5dca83fac8 | refs/heads/master | 2020-08-04T10:57:29.791563 | 2020-04-16T13:34:31 | 2020-04-16T13:34:31 | 212,113,705 | 5 | 0 | MIT | 2021-12-14T09:53:17 | 2019-10-01T14:12:54 | Python | UTF-8 | Python | false | false | 720 | py | import marshmallow
import vaa
@vaa.marshmallow
class Scheme(marshmallow.Schema):
name = marshmallow.fields.Str(required=True)
mail = marshmallow.fields.Email(required=True)
count = marshmallow.fields.Int(required=True)
def test_valid():
data = {'name': 'Gram', 'mail': '[email protected]', 'count': 10}
v = Scheme(data)
assert v.is_valid() is True
assert v.cleaned_data == data
assert v.errors is None
def test_invalid_name():
data = {'name': 'Gram', 'mail': 'mail.ru', 'count': 10}
v = Scheme(data)
assert v.is_valid() is False
assert v.cleaned_data is None
error = vaa.Error(field='mail', message='Not a valid email address.')
assert v.errors == [error]
| [
"[email protected]"
] | |
bf84f9c5adfa9e7583673be3f2010094b0ec0812 | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /invert-binary-tree/invert-binary-tree.py | 5d741a14934dc5b2a736ed7eb2ffb43c88227147 | [
"Apache-2.0"
] | permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | // https://leetcode.com/problems/invert-binary-tree
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def invert(root):
if not root:
return root
l = invert(root.left)
r = invert(root.right)
root.left, root.right=r,l
return root
return invert(root)
| [
"[email protected]"
] | |
6995ee1e78fd36068874a000c4c37ef1b646d8d8 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/RenewAdditionalBandwidthRequest.py | 13003abc6ba6cd98917539ae4400928484b4e505 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 3,305 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class RenewAdditionalBandwidthRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'RenewAdditionalBandwidth','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_SourceBiz(self): # String
return self.get_query_params().get('SourceBiz')
def set_SourceBiz(self, SourceBiz): # String
self.add_query_param('SourceBiz', SourceBiz)
def get_OrderTimeLength(self): # String
return self.get_query_params().get('OrderTimeLength')
def set_OrderTimeLength(self, OrderTimeLength): # String
self.add_query_param('OrderTimeLength', OrderTimeLength)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
| [
"[email protected]"
] | |
322aa78a81d83983090c878e3f2a5fb4415a64d9 | 7635ccca81bb3c2cfce123ebf14831522b8ba6ee | /cvutils/DropboxFileWatcherUpload.py | 94effc0c120f720bfc9c5da82e3a446119cacc3f | [] | no_license | youngsoul/cvutils | 6ef45f26e56d06cc445ae41066eb2028f8d08e3b | 75d8249b2a5724e4c168b598943edeca87f15335 | refs/heads/master | 2023-02-19T10:13:36.143823 | 2021-01-24T16:44:51 | 2021-01-24T16:44:51 | 332,498,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | from cvutils.BackgroundFileProcessor import BackgroundFileProcessor
from pathlib import Path
import dropbox
from dropbox.exceptions import ApiError
class DropboxFileWatcherUpload(BackgroundFileProcessor):
def _upload_file(self, file_from, file_to):
dbx = dropbox.Dropbox(self.dropbox_access_token)
with open(file_from, 'rb') as f:
dbx.files_upload(f.read(), file_to)
def __init__(self, dropbox_access_token: str, root_dir: str, include_parent_dir_in_to_file=True, pattern:str="*", delete_after_process: bool=False, batch_size: int=10, polling_time: int=5 ):
BackgroundFileProcessor.__init__(self, root_dir, pattern, delete_after_process, batch_size, polling_time)
self.include_parent_dir_in_to_file = include_parent_dir_in_to_file
self.dropbox_access_token = dropbox_access_token
def process_file(self, absolute_file_path):
print(absolute_file_path)
p = Path(absolute_file_path)
if self.include_parent_dir_in_to_file:
to_path = f"/{p.parent.name}/{p.name}"
else:
to_path = p.name
try:
self._upload_file(absolute_file_path, to_path)
except ApiError as err:
# Check user has enough Dropbox space quota
if (err.error.is_path() and
err.error.get_path().error.is_insufficient_space()):
print("ERROR: Cannot upload; insufficient space.")
elif err.user_message_text:
print(err.user_message_text)
else:
print(err)
if __name__ == '__main__':
from dotenv import load_dotenv
import os
load_dotenv()
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
access_token = os.getenv('dropbox_access_token')
db = DropboxFileWatcherUpload(dropbox_access_token=access_token, root_dir="../motion", pattern="*.jpg", delete_after_process=True)
db.start()
db.drain()
| [
"[email protected]"
] | |
4946e7e6f3ef0a2ca51d0b3bf91574e342693917 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/mayavi/filters/greedy_terrain_decimation.py | e50f1acf66bc4db6845b7ab0a9e5811695c35ea2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 109 | py | # proxy module
from __future__ import absolute_import
from mayavi.filters.greedy_terrain_decimation import *
| [
"[email protected]"
] | |
8527a180c4f68c8b4694e5badaf03b66f91d6750 | 8d472f9facb895dda9e1df81f3bb6c2f81b9c357 | /master/bt5/slapos_accounting/SkinTemplateItem/portal_skins/slapos_accounting/SaleInvoiceTransaction_init.py | bfbac88099e23111059502c1988eb88e8a08d087 | [] | no_license | SlapOS/slapos.core | 852485eed9382685f3df6ba8532f8192bb1389c4 | 369e8d56636e1c59a745e68dc68154abfc5b7840 | refs/heads/master | 2023-08-31T04:42:34.722241 | 2023-08-30T15:13:08 | 2023-08-30T15:13:08 | 1,825,920 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | if kw.get('created_by_builder', 0):
return
context.newContent(portal_type='Sale Invoice Transaction Line',
id='income',)
context.newContent(portal_type='Sale Invoice Transaction Line',
id='receivable', )
context.newContent(portal_type='Sale Invoice Transaction Line',
id='collected_vat',)
| [
"[email protected]"
] | |
9655ab9b5ab81ccda6e6117b91a292de0f007db0 | b424a13f032d5a607e6df4dd78bc47ad1d06a147 | /lhc/io/fastq/iterator.py | d620bee09bebc387ba2e493048ec1f2d2c782158 | [] | no_license | EnjoyLifeFund/macSierra-py36-pkgs | 1e7eeb9b55415da6eb12465d67730d76e9cc619a | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | refs/heads/master | 2021-01-20T10:23:50.044019 | 2017-09-05T02:53:26 | 2017-09-05T02:53:26 | 90,333,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | from collections import namedtuple
from lhc.itertools.chunked_iterator import ChunkedIterator
class FastqEntry(namedtuple('FastqEntry', ('hdr', 'seq', 'qual_hdr', 'qual'))):
def __str__(self):
return '@{}\n{}\n{}+\n{}\n'.format(self.hdr, self.seq, self.qual_hdr, self.qual)
class FastqEntryIterator(object):
def __init__(self, iterator):
self.iterator = iterator
self.it = ChunkedIterator(self.iterator, 4)
def __iter__(self):
return self
def __next__(self):
seq_id, seq, qual_id, qual = next(self.it)
return FastqEntry(seq_id.strip()[1:],
seq.strip(),
qual_id.strip()[1:],
qual.strip())
def __del__(self):
if hasattr(self.iterator, 'close'):
self.iterator.close()
| [
"[email protected]"
] | |
cac7b511c6a80009d9336e269018d5ebaff5f0fc | bce492a540213327e524a528a0bde3fe13e4cbdc | /hospital/get_data.py | b6f86359acb215f804128dae3f84c1350a97b5b7 | [] | no_license | RympeR/hospital | e8277ce895b321f3fcc434cbddc388b07887458f | 7a682a1a9a936f6257e9f7f28de0376f88447cf9 | refs/heads/master | 2021-04-08T13:16:50.716211 | 2020-04-17T13:46:42 | 2020-04-17T13:46:42 | 248,779,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py |
with open('Hospinfo.txt') as f:
data = f.read().split('"')
# print(data[:200])
for row, row_data in enumerate(data):
if row == 1:
# print(row_data)
print(len(row_data.split(',')))
cols = """Provider ID,Hospital Name,Address,City,State,ZIP Code,County Name,Phone Number,Hospital Type,Hospital Ownership,Emergency Services,Meets criteria for meaningful use of EHRs,Hospital overall rating,Hospital overall rating footnote,Mortality national comparison,Mortality national comparison footnote,Safety of care national comparison,Safety of care national comparison footnote,Readmission national comparison,Readmission national comparison footnote,Patient experience national comparison,Patient experience national comparison footnote,Effectiveness of care national comparison,Effectiveness of care national comparison footnote,Timeliness of care national comparison,Timeliness of care national comparison footnote,Efficient use of medical imaging national comparison,Efficient use of medical imaging national comparison footnote,Location
""".split(',')
for i in cols:
print(i)
| [
"[email protected]"
] | |
0a43eb71f8d79b57e3e6eeac7e51f86e76c91464 | 47deebe6fefedb01fdce5d4e82f58bb08f8e1e92 | /python core/Lesson_10/matrix_13.py | bfaf79444f2715dffed23a8b328deda5812089c2 | [] | no_license | developeryuldashev/python-core | 5bb162603bdb5782acf05e3fb25ca5dd6347067a | 08fca77c9cfde69d93a7875b3fb65b98f3dabd78 | refs/heads/main | 2023-08-21T03:33:12.160133 | 2021-10-19T04:56:53 | 2021-10-19T04:56:53 | 393,383,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from methods import makeMatrix,Print
n=5
a=makeMatrix(n)
Print(a)
for i in range(n):
for j in range(n-i):
print(a[i][j],end=' ')
print()
for k in range(1+i,n):
print(a[k][n-1-i],end=' ')
print()
| [
"[email protected]"
] | |
0baeae710bb003d24d9f5571745cc95246b97e50 | 5b5a49643c75aa43d5a876608383bc825ae1e147 | /python99/arithmetic/p206.py | 052c679c71877459f7b22b02df25cd6179ae3ee8 | [] | no_license | rscai/python99 | 281d00473c0dc977f58ba7511c5bcb6f38275771 | 3fa0cb7683ec8223259410fb6ea2967e3d0e6f61 | refs/heads/master | 2020-04-12T09:08:49.500799 | 2019-10-06T07:47:17 | 2019-10-06T07:47:17 | 162,393,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | # A list of Goldbach compositions
from python99.arithmetic.p201 import is_prime
from python99.arithmetic.p204 import prime_generator
import math
def goldbach_list(lower, upper, min_prime_factor=0):
return [x for x in
[goldbach(even, min_prime_factor) for even in
even_nums(lower, upper, min_prime_factor*2)]
if x != None]
def even_nums(lower, upper, min_even):
for num in range(lower, upper+1):
if num > min_even and num % 2 == 0:
yield num
def goldbach(n, min_prime_factor):
for first_prime in prime_generator(max(2, min_prime_factor+1), n//2):
if n-first_prime < min_prime_factor:
return None
if is_prime(n-first_prime):
return [first_prime, n-first_prime]
return None
| [
"[email protected]"
] | |
f15c94f47d7cf0fbb915e73bca37f4000fee172b | 8fc999f5262b5a2dadc830f1cc345f51b6dde862 | /samples/conceptual_samples/functions/enumerate_function.py | 11fdfb523aa3b9139a195ffe0c46779ab406a3cd | [] | no_license | pandiyan07/python_2.x_tutorial_for_beginners_and_intermediate | 5ca5cb5fcfe7ce08d109fb32cdf8138176ac357a | a4c14deaa518fea1f8e95c2cc98783c8ca3bd4ae | refs/heads/master | 2022-04-09T20:33:28.527653 | 2020-03-27T06:35:50 | 2020-03-27T06:35:50 | 250,226,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # this is a sample python script program which is used to demonstrate the concept of enumerate built in function in the python scripting
# language.
for i,v in enumerate(['tic','tack','toe']):
print i,'-',v
# this is the end of the program file. happy coding..!! | [
"[email protected]"
] | |
85242539aeb51faefb57164d622aa34a9f448586 | 89812f6ab80008222bcf93a9b2ca614a60291738 | /river/time_series/holt_winters.py | 11afd133036663f6c9e404b62eaa8a6edfe84a8a | [
"BSD-3-Clause"
] | permissive | Pandinosaurus/river | 47135f5b7e612f83d96f4a50f9d746dec834b16d | 09a24d35c1f548239c54c1244973241bfe5c4edc | refs/heads/master | 2023-08-27T21:08:12.553115 | 2021-11-09T22:10:17 | 2021-11-09T22:10:17 | 409,610,355 | 0 | 0 | BSD-3-Clause | 2021-11-10T04:13:30 | 2021-09-23T13:47:27 | Python | UTF-8 | Python | false | false | 6,329 | py | import operator
import statistics
from collections import deque
from .base import Forecaster
__all__ = ["HoltWinters"]
class Component(deque):
...
class AdditiveLevel(Component):
def __init__(self, alpha):
super().__init__([], maxlen=2)
self.alpha = alpha
def update(self, y, trend, season):
self.append(
self.alpha * (y - (season[-season.seasonality] if season else 0))
+ (1 - self.alpha) * (self[-1] + (trend[-1] if trend else 0))
)
class MultiplicativeLevel(Component):
def __init__(self, alpha):
super().__init__([], maxlen=2)
self.alpha = alpha
def update(self, y, trend, season):
self.append(
self.alpha * (y / (season[-season.seasonality] if season else 1))
+ (1 - self.alpha) * (self[-1] + (trend[-1] if trend else 0))
)
class Trend(Component):
def __init__(self, beta):
super().__init__([], maxlen=2)
self.beta = beta
def update(self, y, level):
self.append(self.beta * (level[-1] - level[-2]) + (1 - self.beta) * self[-1])
class AdditiveSeason(Component):
def __init__(self, gamma, seasonality):
super().__init__([], maxlen=seasonality + 1)
self.gamma = gamma
self.seasonality = seasonality
def update(self, y, level, trend):
self.append(
self.gamma * (y - level[-2] - trend[-2])
+ (1 - self.gamma) * self[-self.seasonality]
)
class MultiplicativeSeason(Component):
def __init__(self, gamma, seasonality):
super().__init__([], maxlen=seasonality + 1)
self.gamma = gamma
self.seasonality = seasonality
def update(self, y, level, trend):
self.append(
self.gamma * y / (level[-2] + trend[-2])
+ (1 - self.gamma) * self[-self.seasonality]
)
class HoltWinters(Forecaster):
r"""Holt-Winters forecaster.
This is a standard implementation of the Holt-Winters forecasting method. Certain
parametrisations result in special cases, such as simple exponential smoothing.
Optimal parameters and initialisation values can be determined in a batch setting. However, in
an online setting, it is necessary to wait and observe enough values. The first
`k = max(2, seasonality)` values are indeed used to initialize the components.
**Level initialization**
$$l = \frac{1}{k} \sum_{i=1}{k} y_i$$
**Trend initialization**
$$t = \frac{1}{k - 1} \sum_{i=2}{k} y_i - y_{i-1}$$
**Trend initialization**
$$s_i = \frac{y_i}{k}$$
Parameters
----------
alpha
Smoothing parameter for the level.
beta
Smoothing parameter for the trend.
gamma
Smoothing parameter for the seasonality.
seasonality
The number of periods in a season. For instance, this should be 4 for quarterly data,
and 12 for yearly data.
multiplicative
Whether or not to use a multiplicative formulation.
Examples
--------
>>> from river import datasets
>>> from river import metrics
>>> from river import time_series
>>> dataset = datasets.AirlinePassengers()
>>> model = time_series.HoltWinters(
... alpha=0.3,
... beta=0.1,
... gamma=0.6,
... seasonality=12,
... multiplicative=True
... )
>>> metric = metrics.MAE()
>>> time_series.evaluate(
... dataset,
... model,
... metric,
... horizon=12,
... grace_period=12
... )
+1 MAE: 25.899087
+2 MAE: 26.26131
+3 MAE: 25.735903
+4 MAE: 25.625678
+5 MAE: 26.093842
+6 MAE: 26.90249
+7 MAE: 28.634398
+8 MAE: 29.284769
+9 MAE: 31.018351
+10 MAE: 32.252349
+11 MAE: 33.518946
+12 MAE: 33.975057
References
----------
[^1]: [Exponential smoothing — Wikipedia](https://www.wikiwand.com/en/Exponential_smoothing)
[^2]: [Exponential smoothing — Forecasting: Principles and Practice](https://otexts.com/fpp2/expsmooth.html)
[^3]: [What is Exponential Smoothing? — Engineering statistics handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc43.htm)
"""
def __init__(
self, alpha, beta=None, gamma=None, seasonality=0, multiplicative=False,
):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.seasonality = seasonality
self.multiplicative = multiplicative
self.level = (
MultiplicativeLevel(alpha) if multiplicative else AdditiveLevel(alpha)
)
self.trend = Trend(beta) if beta else None
self.season = (
(
MultiplicativeSeason(gamma, seasonality)
if multiplicative
else AdditiveSeason(gamma, seasonality)
)
if (gamma or seasonality)
else None
)
self._first_values = []
self._initialized = False
def learn_one(self, y, x=None):
if self._initialized:
self.level.update(y, self.trend, self.season)
if self.trend:
self.trend.update(y, self.level)
if self.season:
self.season.update(y, self.level, self.trend)
return self
self._first_values.append(y)
if len(self._first_values) < max(2, self.seasonality):
return self
# The components can be initialized now that enough values have been observed
self.level.append(statistics.mean(self._first_values))
diffs = [b - a for a, b in zip(self._first_values[:-1], self._first_values[1:])]
self.trend.append(statistics.mean(diffs))
self.season.extend([y / self.level[-1] for y in self._first_values])
self._initialized = True
return self
def forecast(self, horizon, xs=None):
op = operator.mul if self.multiplicative else operator.add
return [
op(
self.level[-1] + ((h + 1) * self.trend[-1] if self.trend else 0),
(
self.season[-self.seasonality + h % self.seasonality]
if self.seasonality
else 0
),
)
for h in range(horizon)
]
| [
"[email protected]"
] | |
c933e757285db1011743a3f267fcb8c3576b6e2f | bec6d5e1dc94c7229530aefd33a801953637acb5 | /3ch/mnist.py | 0d69dfebcb128690eec0c0861219e6ec2427fbfe | [] | no_license | WebAppEngineer/hands_on_ml | 6b3f3c5b0ce69289c975dc42db9fc82766bc9beb | 62a19624466d7d00caa47f760172efcb04b6dea1 | refs/heads/master | 2020-06-19T00:36:37.720951 | 2018-10-12T01:56:07 | 2018-10-12T01:56:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,517 | py | import os, pdb
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score, StratifiedKFold, cross_val_predict
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, precision_recall_curve, roc_curve, roc_auc_score
from sklearn.base import clone, BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
PNG_PATH = '/home/ubuntu/workspace/hands_on_ml/png/3ch/'
def setup():
mnist = fetch_mldata('MNIST original')
X, y = mnist["data"], mnist["target"]
# some_digit = X[36000]
# some_digit_image = some_digit.reshape(28, 28)
# plt.imshow(some_digit_image, cmap = mpl.cm.binary, interpolation="nearest")
# plt.axis("off")
# plt.savefig(PNG_PATH + "some_digit_plot.png", dpi=300)
# print(y[36000])
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# binary_classifier(X_train, X_test, y_train, y_test)
# roc_curve_demo(X_train, X_test, y_train, y_test)
# multiclass(X_train, X_test, y_train, y_test)
multiclass_output(X_train, X_test, y_train, y_test)
def multiclass_output(X_train, X_test, y_train, y_test):
some_digit = X_train[20000]
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
# knn_clf.fit(X_train, y_multilabel)
# print(knn_clf.predict([some_digit]))
# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1)
# print(f1_score(y_multilabel, y_train_knn_pred, average="macro"))
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test
some_index = 5500
plt.subplot(121); plot_digit(X_test_mod[some_index])
plt.subplot(122); plot_digit(y_test_mod[some_index])
plt.savefig(PNG_PATH + "noisy_digit_example_plot.png", tight_layout=False, dpi=300)
plt.close()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[some_index]])
plot_digit(clean_digit)
plt.savefig(PNG_PATH + "cleaned_digit_example_plot.png", tight_layout=False, dpi=300)
plt.close()
def multiclass(X_train, X_test, y_train, y_test):
some_digit = X_train[20000]
sgd_clf = SGDClassifier(max_iter=5, random_state=42)
# sgd_clf.fit(X_train, y_train)
# print(sgd_clf.predict([some_digit]))
# print(sgd_clf.decision_function([some_digit]))
# ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, random_state=42))
# ovo_clf.fit(X_train, y_train)
# print(ovo_clf.predict([some_digit]))
# print(len(ovo_clf.estimators_))
# forest_clf = RandomForestClassifier(random_state=42)
# forest_clf.fit(X_train, y_train)
# print(forest_clf.predict([some_digit]))
# print(forest_clf.predict_proba([some_digit]))
# print(cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy"))
# scaler = StandardScaler()
# X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
# print(cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy"))
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train, cv=3)
# y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
print(conf_mx)
# plt.matshow(conf_mx, cmap=plt.cm.gray)
# plt.savefig(PNG_PATH + "confusion_matrix_plot.png", tight_layout=False, dpi=300)
# plt.close()
# row_sums = conf_mx.sum(axis=1, keepdims=True)
# norm_conf_mx = conf_mx / row_sums
# np.fill_diagonal(norm_conf_mx, 0)
# plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
# plt.savefig(PNG_PATH + "confusion_matrix_errors_plot.png", tight_layout=False, dpi=300)
# plt.close()
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5)
plt.savefig(PNG_PATH + "error_analysis_digits_plot.png", dpi=300)
plt.close()
def roc_curve_demo(X_train, X_test, y_train, y_test):
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
sgd_clf = SGDClassifier(max_iter=5, random_state=42)
sgd_clf.fit(X_train, y_train_5)
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")
# tpr = recall
# fpr = FP / (FP + TN)
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
plt.savefig(PNG_PATH + "roc_curve_plot" + ".png", dpi=300)
plt.close()
print(roc_auc_score(y_train_5, y_scores))
forest_clf = RandomForestClassifier(random_state=42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba")
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest)
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD")
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
plt.legend(loc="lower right", fontsize=16)
plt.savefig(PNG_PATH + "roc_curve_comparison_plot" + ".png", dpi=300)
plt.close()
print(roc_auc_score(y_train_5, y_scores_forest))
y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)
print(precision_score(y_train_5, y_train_pred_forest))
print(recall_score(y_train_5, y_train_pred_forest))
def binary_classifier(X_train, X_test, y_train, y_test):
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
sgd_clf = SGDClassifier(max_iter=5, random_state=42)
sgd_clf.fit(X_train, y_train_5)
# print(sgd_clf.predict([X_train[36000]]))
# print(cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy"))
# never_5_clf = Never5Classifier()
# print(cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy"))
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
print(confusion_matrix(y_train_5, y_train_pred))
# [[true neg, false pos]
# [false neg, true pos]]
# NOTES
# precision = TP / (TP + FP)
# recall = TP / (TP + FN)
# F1 score = harmonic mean of precision and recall
print("precision: " + str(precision_score(y_train_5, y_train_pred)))
print("AKA when it predicts a 5 it is only right this percent")
print("recall: " + str(recall_score(y_train_5, y_train_pred)))
print("AKA only accurately detects this percent of the 5s")
print("f1 score: " + str(f1_score(y_train_5, y_train_pred)))
# Setting custom threshold
# y_scores = sgd_clf.decision_function([some_digit])
# threshold = 0
# y_some_digit_pred = (y_scores > threshold)
# print(y_some_digit_pred)
# threshold = 200000
# y_some_digit_pred = (y_scores > threshold)
# print(y_some_digit_pred)
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")
# hack to work around issue #9589 introduced in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
# precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
# plt.figure(figsize=(8, 4))
# plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
# plt.xlim([-700000, 700000])
# plt.savefig(PNG_PATH + "precision_recall_vs_threshold_plot" + ".png", dpi=300)
# plt.close()
# plt.figure(figsize=(8, 6))
# plot_precision_vs_recall(precisions, recalls)
# plt.savefig(PNG_PATH + "precision_vs_recall_plot" + ".png", dpi=300)
# plt.close()
y_train_pred_90 = (y_scores > 70000)
print(precision_score(y_train_5, y_train_pred_90))
print(recall_score(y_train_5, y_train_pred_90))
def custom_folds(X_train, y_train_5):
# pseudo custom way to have more control over the kfolds process
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = (y_train_5[train_index])
X_test_fold = X_train[test_index]
y_test_fold = (y_train_5[test_index])
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
n_correct = sum(y_pred == y_test_fold)
print(n_correct / len(y_pred))
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.xlabel("Threshold", fontsize=16)
plt.legend(loc="upper left", fontsize=16)
plt.ylim([0, 1])
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = mpl.cm.binary, interpolation="nearest")
plt.axis("off")
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = mpl.cm.binary, **options)
plt.axis("off")
if __name__ == '__main__':
setup() | [
"[email protected]"
] | |
12d9546549889ef6154b5507be8052e83c67feb5 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_2508.py | c688c4238de404a6dfd37ac60813eb1cd942d180 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,330 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((952, 290, 680), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((894, 105, 458), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((2, 731, 654), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((673, 305, 895), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((698, 112, 782), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((338, 192, 144), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((129, 617, 486), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((655, 740, 828), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((107, 904, 348), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((365, 980, 742), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((110, 223, 36), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((377, 465, 805), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((12, 813, 34), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((152, 995, 987), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((344, 857, 79), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((109, 422, 328), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((74, 800, 387), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((162, 116, 380), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((891, 372, 877), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((803, 934, 758), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((876, 616, 557), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
6f55f23df459c4493cdb78acf117563e94831ef7 | b8755b5f0b5a3c1bba1270cc8f20dc172abb0634 | /02-10分钟如何创建一个可以管理职位的后台/recruitment/recruitment/settings.py | a58ce1e0e7660f068ae68e04c0dd88a341fa0dff | [] | no_license | AndersonHJB/Django_Leraning | bf44af05b0e604342fd97cb8699385461cbbb965 | 95c34057f643b234478e72665c6454ebd99cb6cd | refs/heads/main | 2023-07-12T02:47:34.289089 | 2021-08-15T10:17:32 | 2021-08-15T10:17:32 | 367,765,444 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for recruitment project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i$1k$c0im$0_&8l8^$u&!(jwg^49-!4+j=gvx&ds%7^6wv%lbj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jobs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'recruitment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recruitment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
3bae43f7bd877325511010cbed8dd0ed6d313794 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/ps8.py | 12394cdb453ca7d6f1fee3301fb9bd900bc94f8c | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'pS8':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
b405cb126d3e976154d3534a211d7c924676e808 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02682/s857771361.py | d205ccdcbde00b6bd04ef1defef77f456a20c864 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | a, b, c, k = map(int, input().split())
max_val = 0
if k <= a:
max_val += k
else:
max_val += a
k -= a
if k <= b:
pass
# break
else:
k -= b
max_val -= k
print(max_val) | [
"[email protected]"
] | |
b43322d399185982d5b64b3d61a54d2011130ca6 | bdfdd067b98b0c93fab49dc0b61eb6160edc1175 | /Day 1 sum of numbers in a string.py | 23477d8ba2ef4eeaa9dac87ba223c44c656a2d6c | [] | no_license | AprajitaChhawi/365DaysOfCode.FEBRUARY | 7b049ac92df68cb7162f68a66cfdb014a0bb45ba | 6f9619e33e200247473543b0f9cbaa17b55782a2 | refs/heads/main | 2023-03-09T16:54:21.545637 | 2021-03-01T17:58:02 | 2021-03-01T17:58:02 | 336,838,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #User function Template for python3
'''
Your task is to return the sum of all the
numbers appearing in the given string.
Function Arguments: s (given string)
Return Type: integer
'''
import re
def findSum(s):
a=[]
a=[int(i) for i in re.split("[a-z]",s) if i.isdigit()]
return sum(a)
#code here
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
if __name__=='__main__':
t = int(input())
for i in range(t):
s=str(input())
print(findSum(s))
# } Driver Code Ends
| [
"[email protected]"
] | |
3d41bb59afbe0048d46debc82e42718ccb3d96d5 | 2c7f40ad997de27ef13c368f84da6df2d2f3a565 | /oci/auth.py | e71268e72f6333d2d3e553114b7b7349603e3d44 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | mliepold/cc-utils | ff88ba9f95064b598f33649005c8b6764a0dce3a | 3f8c4b0d11d6a52d1605026f478371411daab81e | refs/heads/master | 2023-06-01T18:36:44.089480 | 2021-06-14T14:42:47 | 2021-06-14T14:42:47 | 377,459,403 | 0 | 0 | Apache-2.0 | 2021-06-16T10:43:02 | 2021-06-16T10:35:30 | null | UTF-8 | Python | false | false | 3,538 | py | import dataclasses
import enum
import operator
import typing
import oci.util
class AuthType(enum.Enum):
BASIC_AUTH = 'basic_auth'
class Privileges(enum.Enum):
READONLY = 'readonly'
READWRITE = 'readwrite'
def _asint(self, privileges):
if privileges is self.READONLY:
return 0
elif privileges is self.READWRITE:
return 1
elif privileges is None:
return 2
else:
raise NotImplementedError(privileges)
def __hash__(self):
return self._asint(self).__hash__()
def __lt__(self, other):
o = self._asint(other)
return self._asint(self).__lt__(o)
def __le__(self, other):
o = self._asint(other)
return self._asint(self).__le__(o)
def __eq__(self, other):
o = self._asint(other)
return self._asint(self).__eq__(o)
def __ne__(self, other):
o = self._asint(other)
return self._asint(self).__ne__(o)
def __gt__(self, other):
o = self._asint(other)
return self._asint(self).__gt__(o)
def __ge__(self, other):
o = self._asint(other)
return self._asint(self).__ge__(o)
@dataclasses.dataclass(frozen=True)
class OciCredentials:
pass
@dataclasses.dataclass(frozen=True)
class OciConfig:
privileges: Privileges
credentials: OciCredentials
url_prefixes: typing.Sequence[str] = dataclasses.field(default_factory=tuple)
def valid_for(self, image_reference: str, privileges: Privileges=Privileges.READONLY):
if privileges and privileges > self.privileges:
return False
if not self.url_prefixes:
return True
unmodified_ref = image_reference.lower()
image_reference = oci.util.normalise_image_reference(image_reference=image_reference).lower()
for prefix in self.url_prefixes:
prefix = prefix.lower()
if image_reference.startswith(oci.util.normalise_image_reference(prefix)):
return True
if image_reference.startswith(prefix.lower()):
return True
if unmodified_ref.startswith(prefix):
return True
return False
@dataclasses.dataclass(frozen=True)
class OciBasicAuthCredentials(OciCredentials):
username: str
password: str
# typehint-alias
image_reference = str
credentials_lookup = typing.Callable[[image_reference, Privileges, bool], OciCredentials]
def mk_credentials_lookup(
cfgs: typing.Union[OciCredentials, typing.Sequence[OciCredentials]],
) -> typing.Callable[[image_reference, Privileges, bool], OciConfig]:
'''
returns a callable that can be queried for matching OciCredentials for requested
privileges and image-references
'''
if isinstance(cfgs, OciConfig):
cfgs = (cfgs,)
def lookup_credentials(
image_reference: str,
privileges: Privileges=Privileges.READONLY,
absent_ok: bool=False,
):
valid_cfgs = sorted(
(
c for c in cfgs
if c.valid_for(image_reference=image_reference, privileges=privileges)
),
key=operator.attrgetter('privileges'),
)
if not valid_cfgs and absent_ok:
return None
if not valid_cfgs:
raise ValueError(f'no valid cfg found: {image_reference=}, {privileges=}')
# first element contains cfg with least required privileges
return valid_cfgs[0].credentials
return lookup_credentials
| [
"[email protected]"
] | |
01bde579ac5e8282b572898002630a3b05d69be0 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/N/NinaC/obesity_scraper_3_us.py | d60baf370cc2cca6e73d62152bc797eb7f588e6f | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | ###################################################################################
# Twitter API scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Get results from the Twitter API! Change QUERY to your search term of choice.
# Examples: 'newsnight', '#newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = '"I am obese"'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 100
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=exclude:retweets+%s&rpp=%s&lang=%s&page=%s&country_code=US' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['geo'] = result['geo']
data['profile_image_url'] = result['profile_image_url']
data['from_user_id_str'] = result['from_user_id_str']
data['created_at'] = result['created_at']
data['from_user'] = result['from_user']
data['id_str'] = result['id_str']
data['metadata'] = result['metadata']
data['to_user_id'] = result['to_user_id']
print data['from_user'], data['text'], data['geo']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter API scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Get results from the Twitter API! Change QUERY to your search term of choice.
# Examples: 'newsnight', '#newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = '"I am obese"'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 100
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=exclude:retweets+%s&rpp=%s&lang=%s&page=%s&country_code=US' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['geo'] = result['geo']
data['profile_image_url'] = result['profile_image_url']
data['from_user_id_str'] = result['from_user_id_str']
data['created_at'] = result['created_at']
data['from_user'] = result['from_user']
data['id_str'] = result['id_str']
data['metadata'] = result['metadata']
data['to_user_id'] = result['to_user_id']
print data['from_user'], data['text'], data['geo']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"[email protected]"
] | |
1b85cda45f6371da032a7c90d41f74ee9956e9d4 | bfa100593b7fc67ae65593bdddb357fa3d9e27cf | /quotes/migrations/0001_initial.py | 9b07bac95358e974f2fd108e3f0c319e3ee86360 | [] | no_license | wall-e-08/medigap-wagtail | e2342631004de047a4b3d09571dd88f2a6fc2286 | 1d7b77759f071eec89e29591e814523d4c433655 | refs/heads/master | 2020-05-20T17:27:43.966094 | 2019-04-30T06:25:58 | 2019-04-30T06:25:58 | 185,688,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | # Generated by Django 2.1.7 on 2019-04-07 09:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=100, null=True)),
('middle_name', models.CharField(blank=True, max_length=100, null=True)),
('last_name', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=200, null=True)),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Phone number')),
('phone2', models.CharField(blank=True, max_length=100, null=True, verbose_name='Alternate Phone number')),
('zip_code', models.CharField(blank=True, max_length=100, null=True)),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='Quote Time')),
],
),
]
| [
"[email protected]"
] | |
980b3f033059374e13f3f5fe9614b19c68e86ac1 | 51d0377511a5da902033fb9d80184db0e096fe2c | /18-linear-classifiers-in-python/2-loss-functions/03-comparing-the-logistic-and-hinge-losses.py | 69c140b9a46811245c800484fcf7d7e5197fd13d | [] | no_license | sashakrasnov/datacamp | c28c6bda178163337baed646220b2f7dcc36047d | 759f4cec297883907e21118f24a3449d84c80761 | refs/heads/master | 2021-12-07T02:54:51.190672 | 2021-09-17T21:05:29 | 2021-09-17T21:05:29 | 157,093,632 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | '''
Comparing the logistic and hinge losses
In this exercise you'll create a plot of the logistic and hinge losses using their mathematical expressions, which are provided to you. The loss function diagram from the video is shown on the right.
'''
import numpy as np
import matplotlib.pyplot as plt
'''
INSTRUCTIONS
* Evaluate the log_loss and hinge_loss functions at the grid points, so that they are plotted.
'''
# Mathematical functions for logistic and hinge losses
# Feel free to ignore if you're not interested
def log_loss(raw_model_output):
return np.log(1 + np.exp(-raw_model_output))
def hinge_loss(raw_model_output):
return np.maximum(0, 1-raw_model_output)
# Create a grid of values and plot
grid = np.linspace(-2, 2, 1000)
plt.plot(grid, log_loss(grid), label='logistic')
plt.plot(grid, hinge_loss(grid), label='hinge')
plt.legend()
plt.show() | [
"[email protected]"
] | |
f49f2e35c58478490b43d51f112079f4c750f693 | 2194b6c17f3153c5976d6ac4a9ab78211027adab | /otoroshi_admin_api_client/api/templates/otoroshicontrollersadminapi_templates_controllerinitiate_api_key_templates.py | fcb0e751d7ce49d5a2c84428e89ff712f4d09b14 | [] | no_license | krezreb/otoroshi-admin-api-client | 7fab5e873c9c5950d77fffce6bcf80d3fdf4c319 | 9b3156c11eac227024cfe4a26c0129618deb2c4d | refs/heads/master | 2023-05-08T08:32:00.982987 | 2021-05-27T09:55:00 | 2021-05-27T09:55:00 | 371,324,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | from typing import Any, Dict, Optional, Union
import httpx
from ...client import AuthenticatedClient
from ...models.error_response import ErrorResponse
from ...models.otoroshimodels_api_key import OtoroshimodelsApiKey
from ...types import Response
def _get_kwargs(
*,
client: AuthenticatedClient,
) -> Dict[str, Any]:
url = "{}/api/new/apikey".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[Union[ErrorResponse, OtoroshimodelsApiKey]]:
if response.status_code == 401:
response_401 = ErrorResponse.from_dict(response.json())
return response_401
if response.status_code == 400:
response_400 = ErrorResponse.from_dict(response.json())
return response_400
if response.status_code == 404:
response_404 = ErrorResponse.from_dict(response.json())
return response_404
if response.status_code == 200:
response_200 = OtoroshimodelsApiKey.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[Union[ErrorResponse, OtoroshimodelsApiKey]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: AuthenticatedClient,
) -> Response[Union[ErrorResponse, OtoroshimodelsApiKey]]:
kwargs = _get_kwargs(
client=client,
)
response = httpx.get(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: AuthenticatedClient,
) -> Optional[Union[ErrorResponse, OtoroshimodelsApiKey]]:
""" """
return sync_detailed(
client=client,
).parsed
async def asyncio_detailed(
*,
client: AuthenticatedClient,
) -> Response[Union[ErrorResponse, OtoroshimodelsApiKey]]:
kwargs = _get_kwargs(
client=client,
)
async with httpx.AsyncClient() as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: AuthenticatedClient,
) -> Optional[Union[ErrorResponse, OtoroshimodelsApiKey]]:
""" """
return (
await asyncio_detailed(
client=client,
)
).parsed
| [
"[email protected]"
] | |
2dfa3b839998cf326bc38e466629f82853dae124 | 0a9cfe3d7c07e2a5997647ddbc04a73e7a0dc69d | /hivetools/lost_bee.py | 23763356df196decad810b15952f20c16d07b8e0 | [
"MIT"
] | permissive | brianoflondon/hivetools | 7120ec87bb608ea17daae395c42f637fc8e9fe44 | dbf97370503d2891cc953e136d226b098defa5ee | refs/heads/master | 2022-04-21T01:30:15.289781 | 2020-04-16T15:30:14 | 2020-04-16T15:30:14 | 255,656,686 | 0 | 0 | null | 2020-04-14T16:03:25 | 2020-04-14T16:03:24 | null | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python3
from getpass import getpass
from beemgraphenebase.account import PasswordKey
from tabulate import tabulate
hive_id = "thecrazygm"
hive_id = input("Hive User ID: ")
brain_key = getpass(prompt='Master Password: ')
roles = ["owner", "active", "posting", "memo"]
data = []
for role in roles:
keys = PasswordKey(hive_id, brain_key, role=role, prefix="STM")
priv_key = keys.get_private()
# priv = keys.get_private()
pub_key = keys.get_public()
# pub = keys.get_public()
data += [[role, str(pub_key), str(priv_key)]]
print(tabulate(data, headers=["Role", "Public Key", "Private Key"]))
| [
"[email protected]"
] | |
68b80d00366ec28ca9861a1364f27fda71ec94d4 | b137493f8d8167acfd4cedeb39fbc4abb8edf147 | /tensor2tensor/models/research/next_frame_sv2p.py | 910993303093ec38a0e537e21eaeb05e7afbdd6b | [
"Apache-2.0"
] | permissive | yufengm/tensor2tensor | 254cd8600b68f92d8567ec775426a8aac33115ac | c88edeae7c5578b487f234ca8842d3580a082d99 | refs/heads/master | 2020-03-25T12:43:49.747320 | 2018-08-06T22:00:10 | 2018-08-06T22:00:32 | 143,790,807 | 1 | 0 | null | 2018-08-06T22:42:26 | 2018-08-06T22:42:25 | null | UTF-8 | Python | false | false | 28,785 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SV2P: Stochastic Variational Video Prediction.
based on the following paper:
https://arxiv.org/abs/1710.11252
by Mohammad Babaeizadeh, Chelsea Finn, Dumitru Erhan,
Roy H. Campbell and Sergey Levine
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.models.research import next_frame
from tensor2tensor.models.research import next_frame_params # pylint: disable=unused-import
from tensor2tensor.utils import registry
import tensorflow as tf
tfl = tf.layers
tfcl = tf.contrib.layers
_LARGE_STEP_NUMBER = 100000
@registry.register_model
class NextFrameStochastic(next_frame.NextFrameBasic):
"""Stochastic Variational Video Prediction."""
@property
def is_training(self):
return self.hparams.mode == tf.estimator.ModeKeys.TRAIN
def tinyify(self, array):
if self.hparams.tiny_mode:
return [1 for _ in array]
return array
def get_gaussian_latent(self, latent_mean, latent_std):
latent = tf.random_normal(tf.shape(latent_mean), 0, 1, dtype=tf.float32)
latent = latent_mean + tf.exp(latent_std / 2.0) * latent
return latent
def get_iteration_num(self):
step_num = tf.train.get_global_step()
# TODO(lukaszkaiser): what should it be if it"s undefined?
if step_num is None:
step_num = 1000000
return step_num
def get_beta(self):
"""Get KL multiplier (beta) based on the schedule."""
step_num = self.get_iteration_num()
schedule = self.hparams.latent_loss_multiplier_schedule
second_stage = self.hparams.num_iterations_2nd_stage
# TODO(mechcoder): Add log_annealing schedule.
if schedule == "constant":
beta = tf.cond(tf.greater(step_num, second_stage),
lambda: self.hparams.latent_loss_multiplier,
lambda: 0.0)
elif schedule == "linear_anneal":
# Linearly anneal beta from 0.0 to self.hparams.latent_loss_multiplier.
# between self.hparams.num_iterations_2nd_stage to anneal_end.
# beta = latent_loss * (1 - (global_step - 2nd_stage) / (anneal_end - 2nd_stage)) # pylint:disable=line-too-long
anneal_end = self.hparams.anneal_end
latent_multiplier = self.hparams.latent_loss_multiplier
if anneal_end < second_stage:
raise ValueError("Expected hparams.num_iterations_2nd_stage < "
"hparams.anneal_end %d, got %d." %
(second_stage, anneal_end))
def anneal_loss(step_num):
step_num = tf.cast(step_num, dtype=tf.float32)
fraction = (float(anneal_end) - step_num) / (anneal_end - second_stage)
return self.hparams.latent_loss_multiplier * (1 - fraction)
beta = tf.case(
pred_fn_pairs={
tf.less(step_num, second_stage): lambda: 0.0,
tf.greater(step_num, anneal_end): lambda: latent_multiplier},
default=lambda: anneal_loss(step_num))
else:
raise ValueError("Unknown beta schedule.")
tf.summary.scalar("beta", beta)
return beta
def get_scheduled_sample_func(self, batch_size):
"""Creates a function for scheduled sampling based on given hparams."""
with tf.variable_scope("scheduled_sampling_func", reuse=False):
iter_num = self.get_iteration_num()
if self.hparams.scheduled_sampling_mode == "prob":
decay_steps = self.hparams.scheduled_sampling_decay_steps
probability = tf.train.polynomial_decay(
1.0, iter_num, decay_steps, 0.0)
scheduled_sampling_func = common_video.scheduled_sample_prob
scheduled_sampling_func_var = probability
else:
# Calculate number of ground-truth frames to pass in.
k = self.hparams.scheduled_sampling_k
num_ground_truth = tf.to_int32(
tf.round(
tf.to_float(batch_size) *
(k / (k + tf.exp(tf.to_float(iter_num) / tf.to_float(k))))))
scheduled_sampling_func = common_video.scheduled_sample_count
scheduled_sampling_func_var = num_ground_truth
tf.summary.scalar("scheduled_sampling_var", scheduled_sampling_func_var)
partial_func = partial(scheduled_sampling_func,
batch_size=batch_size,
scheduled_sample_var=scheduled_sampling_func_var)
return partial_func
def get_scheduled_sample_inputs(self,
done_warm_start,
groundtruth_items,
generated_items,
scheduled_sampling_func):
"""Scheduled sampling.
Args:
done_warm_start: whether we are done with warm start or not.
groundtruth_items: list of ground truth items.
generated_items: list of generated items.
scheduled_sampling_func: scheduled sampling function to choose between
groundtruth items and generated items.
Returns:
A mix list of ground truth and generated items.
"""
def sample():
"""Calculate the scheduled sampling params based on iteration number."""
with tf.variable_scope("scheduled_sampling", reuse=tf.AUTO_REUSE):
output_items = []
for item_gt, item_gen in zip(groundtruth_items, generated_items):
output_items.append(scheduled_sampling_func(item_gt, item_gen))
return output_items
cases = [
(tf.logical_not(done_warm_start), lambda: groundtruth_items),
(tf.logical_not(self.is_training), lambda: generated_items),
]
output_items = tf.case(cases, default=sample, strict=True)
return output_items
def get_input_if_exists(self, features, key, batch_size, num_frames):
if key in features:
x = features[key]
else:
x = tf.zeros((batch_size, num_frames, 1, self.hparams.hidden_size))
return common_video.swap_time_and_batch_axes(x)
def bottom_part_tower(self, input_image, input_reward, action, latent,
lstm_state, lstm_size, conv_size, concat_latent=False):
"""The bottom part of predictive towers.
With the current (early) design, the main prediction tower and
the reward prediction tower share the same arcitecture. TF Scope can be
adjusted as required to either share or not share the weights between
the two towers.
Args:
input_image: the current image.
input_reward: the current reward.
action: the action taken by the agent.
latent: the latent vector.
lstm_state: the current internal states of conv lstms.
lstm_size: the size of lstms.
conv_size: the size of convolutions.
concat_latent: whether or not to concatenate the latent at every step.
Returns:
- the output of the partial network.
- intermidate outputs for skip connections.
"""
lstm_func = common_video.conv_lstm_2d
tile_and_concat = common_video.tile_and_concat
input_image = common_layers.make_even_size(input_image)
concat_input_image = tile_and_concat(
input_image, latent, concat_latent=concat_latent)
enc0 = tfl.conv2d(
concat_input_image,
conv_size[0], [5, 5],
strides=(2, 2),
activation=tf.nn.relu,
padding="SAME",
name="scale1_conv1")
enc0 = tfcl.layer_norm(enc0, scope="layer_norm1")
hidden1, lstm_state[0] = lstm_func(
enc0, lstm_state[0], lstm_size[0], name="state1")
hidden1 = tile_and_concat(hidden1, latent, concat_latent=concat_latent)
hidden1 = tfcl.layer_norm(hidden1, scope="layer_norm2")
hidden2, lstm_state[1] = lstm_func(
hidden1, lstm_state[1], lstm_size[1], name="state2")
hidden2 = tfcl.layer_norm(hidden2, scope="layer_norm3")
hidden2 = common_layers.make_even_size(hidden2)
enc1 = tfl.conv2d(hidden2, hidden2.get_shape()[3], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="conv2")
enc1 = tile_and_concat(enc1, latent, concat_latent=concat_latent)
hidden3, lstm_state[2] = lstm_func(
enc1, lstm_state[2], lstm_size[2], name="state3")
hidden3 = tile_and_concat(hidden3, latent, concat_latent=concat_latent)
hidden3 = tfcl.layer_norm(hidden3, scope="layer_norm4")
hidden4, lstm_state[3] = lstm_func(
hidden3, lstm_state[3], lstm_size[3], name="state4")
hidden4 = tile_and_concat(hidden4, latent, concat_latent=concat_latent)
hidden4 = tfcl.layer_norm(hidden4, scope="layer_norm5")
hidden4 = common_layers.make_even_size(hidden4)
enc2 = tfl.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="conv3")
# Pass in action if exists.
if action is not None:
emb_action = common_video.encode_to_shape(
action, enc2.get_shape(), "action_enc")
enc2 = tf.concat(values=[enc2, emb_action], axis=3)
# Pass in reward if exists.
if input_reward is not None:
emb_reward = common_video.encode_to_shape(
input_reward, enc2.get_shape(), "reward_enc")
enc2 = tf.concat(values=[enc2, emb_reward], axis=3)
if latent is not None and not concat_latent:
with tf.control_dependencies([latent]):
enc2 = tf.concat([enc2, latent], axis=3)
enc3 = tfl.conv2d(enc2, hidden4.get_shape()[3], [1, 1], strides=(1, 1),
padding="SAME", activation=tf.nn.relu, name="conv4")
hidden5, lstm_state[4] = lstm_func(
enc3, lstm_state[4], lstm_size[4], name="state5") # last 8x8
hidden5 = tfcl.layer_norm(hidden5, scope="layer_norm6")
hidden5 = tile_and_concat(hidden5, latent, concat_latent=concat_latent)
return hidden5, (enc0, enc1)
def construct_latent_tower(self, images):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
Returns:
latent_mean: predicted latent mean
latent_std: predicted latent standard deviation
latent_loss: loss of the latent twoer
samples: random samples sampled from standard guassian
"""
conv_size = self.tinyify([32, 64, 64])
with tf.variable_scope("latent", reuse=tf.AUTO_REUSE):
# this allows more predicted frames at inference time
latent_num_frames = self.hparams.latent_num_frames
if latent_num_frames == 0: # use all frames by default.
latent_num_frames = (self.hparams.video_num_input_frames +
self.hparams.video_num_target_frames)
tf.logging.info("Creating latent tower with %d frames."%latent_num_frames)
latent_images = tf.unstack(images[:latent_num_frames], axis=0)
images = tf.concat(latent_images, 3)
x = images
x = common_layers.make_even_size(x)
x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv1")
x = tfcl.batch_norm(x, updates_collections=None,
is_training=self.is_training, scope="latent_bn1")
x = common_layers.make_even_size(x)
x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv2")
x = tfcl.batch_norm(x, updates_collections=None,
is_training=self.is_training, scope="latent_bn2")
x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1),
padding="SAME", activation=tf.nn.relu, name="latent_conv3")
x = tfcl.batch_norm(x, updates_collections=None,
is_training=self.is_training, scope="latent_bn3")
nc = self.hparams.latent_channels
mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=None, name="latent_mean")
std = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_std")
std += self.hparams.latent_std_min
# No latent tower at inference time, just standard gaussian.
if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
return tf.zeros_like(mean), tf.zeros_like(std)
return mean, std
def reward_prediction(self, input_image, input_reward, action, latent):
"""Builds a reward prediction network."""
del action
del latent
conv_size = self.tinyify([32, 32, 16, 4])
with tf.variable_scope("reward_pred", reuse=tf.AUTO_REUSE):
x = input_image
x = tfcl.batch_norm(x, updates_collections=None,
is_training=self.is_training, scope="reward_bn0")
x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="reward_conv1")
x = tfcl.batch_norm(x, updates_collections=None,
is_training=self.is_training, scope="reward_bn1")
x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="reward_conv2")
x = tfcl.batch_norm(x, updates_collections=None,
is_training=self.is_training, scope="reward_bn2")
x = tfl.conv2d(x, conv_size[3], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="reward_conv3")
pred_reward = common_video.decode_to_shape(
x, input_reward.shape, "reward_dec")
return pred_reward
def construct_predictive_tower(
self, input_image, input_reward, action, lstm_state, latent,
concat_latent=False):
# Main tower
lstm_func = common_video.conv_lstm_2d
frame_shape = common_layers.shape_list(input_image)
batch_size, img_height, img_width, color_channels = frame_shape
# the number of different pixel motion predictions
# and the number of masks for each of those predictions
num_masks = self.hparams.num_masks
upsample_method = self.hparams.upsample_method
tile_and_concat = common_video.tile_and_concat
lstm_size = self.tinyify([32, 32, 64, 64, 128, 64, 32])
conv_size = self.tinyify([32])
with tf.variable_scope("main", reuse=tf.AUTO_REUSE):
hidden5, skips = self.bottom_part_tower(
input_image, input_reward, action, latent,
lstm_state, lstm_size, conv_size, concat_latent=concat_latent)
enc0, enc1 = skips
with tf.variable_scope("upsample1", reuse=tf.AUTO_REUSE):
enc4 = common_layers.cyclegan_upsample(
hidden5, num_outputs=hidden5.shape.as_list()[-1],
stride=[2, 2], method=upsample_method)
enc1_shape = common_layers.shape_list(enc1)
enc4 = enc4[:, :enc1_shape[1], :enc1_shape[2], :] # Cut to shape.
enc4 = tile_and_concat(enc4, latent, concat_latent=concat_latent)
hidden6, lstm_state[5] = lstm_func(
enc4, lstm_state[5], lstm_size[5], name="state6",
spatial_dims=enc1_shape[1:-1]) # 16x16
hidden6 = tile_and_concat(hidden6, latent, concat_latent=concat_latent)
hidden6 = tfcl.layer_norm(hidden6, scope="layer_norm7")
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
with tf.variable_scope("upsample2", reuse=tf.AUTO_REUSE):
enc5 = common_layers.cyclegan_upsample(
hidden6, num_outputs=hidden6.shape.as_list()[-1],
stride=[2, 2], method=upsample_method)
enc0_shape = common_layers.shape_list(enc0)
enc5 = enc5[:, :enc0_shape[1], :enc0_shape[2], :] # Cut to shape.
enc5 = tile_and_concat(enc5, latent, concat_latent=concat_latent)
hidden7, lstm_state[6] = lstm_func(
enc5, lstm_state[6], lstm_size[6], name="state7",
spatial_dims=enc0_shape[1:-1]) # 32x32
hidden7 = tfcl.layer_norm(hidden7, scope="layer_norm8")
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
with tf.variable_scope("upsample3", reuse=tf.AUTO_REUSE):
enc6 = common_layers.cyclegan_upsample(
hidden7, num_outputs=hidden7.shape.as_list()[-1],
stride=[2, 2], method=upsample_method)
enc6 = tfcl.layer_norm(enc6, scope="layer_norm9")
enc6 = tile_and_concat(enc6, latent, concat_latent=concat_latent)
if self.hparams.model_options == "DNA":
# Using largest hidden state for predicting untied conv kernels.
enc7 = tfl.conv2d_transpose(
enc6,
self.hparams.dna_kernel_size**2,
[1, 1],
strides=(1, 1),
padding="SAME",
name="convt4",
activation=None)
else:
# Using largest hidden state for predicting a new image layer.
enc7 = tfl.conv2d_transpose(
enc6,
color_channels,
[1, 1],
strides=(1, 1),
padding="SAME",
name="convt4",
activation=None)
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if self.hparams.model_options == "CDNA":
# cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
cdna_input = tfcl.flatten(hidden5)
transformed += common_video.cdna_transformation(
input_image, cdna_input, num_masks, int(color_channels),
self.hparams.dna_kernel_size, self.hparams.relu_shift)
elif self.hparams.model_options == "DNA":
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError("Only one mask is supported for DNA model.")
transformed = [
common_video.dna_transformation(
input_image, enc7,
self.hparams.dna_kernel_size, self.hparams.relu_shift)]
masks = tfl.conv2d(
enc6, filters=num_masks + 1, kernel_size=[1, 1],
strides=(1, 1), name="convt7", padding="SAME")
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[batch_size,
int(img_height),
int(img_width), num_masks + 1])
mask_list = tf.split(
axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * input_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
return output, lstm_state
def construct_model(self,
images,
actions,
rewards):
"""Build convolutional lstm video predictor using CDNA, or DNA.
Args:
images: list of tensors of ground truth image sequences
there should be a 4D image ?xWxHxC for each timestep
actions: list of action tensors
each action should be in the shape ?x1xZ
rewards: list of reward tensors
each reward should be in the shape ?x1xZ
Returns:
gen_images: predicted future image frames
gen_rewards: predicted future rewards
latent_mean: mean of approximated posterior
latent_std: std of approximated posterior
Raises:
ValueError: if more than 1 mask specified for DNA model.
"""
context_frames = self.hparams.video_num_input_frames
batch_size = common_layers.shape_list(images)[1]
ss_func = self.get_scheduled_sample_func(batch_size)
def process_single_frame(prev_outputs, inputs):
"""Process a single frame of the video."""
cur_image, cur_reward, action = inputs
time_step, prev_image, prev_reward, lstm_states = prev_outputs
generated_items = [prev_image, prev_reward]
groundtruth_items = [cur_image, cur_reward]
done_warm_start = tf.greater(time_step, context_frames - 1)
input_image, input_reward = self.get_scheduled_sample_inputs(
done_warm_start, groundtruth_items, generated_items, ss_func)
# Prediction
pred_image, lstm_states = self.construct_predictive_tower(
input_image, input_reward, action, lstm_states, latent)
if self.hparams.reward_prediction:
reward_input_image = pred_image
if self.hparams.reward_prediction_stop_gradient:
reward_input_image = tf.stop_gradient(reward_input_image)
pred_reward = self.reward_prediction(
reward_input_image, input_reward, action, latent)
else:
pred_reward = input_reward
time_step += 1
outputs = (time_step, pred_image, pred_reward, lstm_states)
return outputs
# Latent tower
latent = None
if self.hparams.stochastic_model:
latent_mean, latent_std = self.construct_latent_tower(images)
latent = self.get_gaussian_latent(latent_mean, latent_std)
# HACK: Do first step outside to initialize all the variables
lstm_states = [None] * 7
inputs = images[0], rewards[0], actions[0]
prev_outputs = (tf.constant(0),
tf.zeros_like(images[0]),
tf.zeros_like(rewards[0]),
lstm_states)
initializers = process_single_frame(prev_outputs, inputs)
first_gen_images = tf.expand_dims(initializers[1], axis=0)
first_gen_rewards = tf.expand_dims(initializers[2], axis=0)
inputs = (images[1:-1], rewards[1:-1], actions[1:-1])
outputs = tf.scan(process_single_frame, inputs, initializers)
gen_images, gen_rewards = outputs[1:3]
gen_images = tf.concat((first_gen_images, gen_images), axis=0)
gen_rewards = tf.concat((first_gen_rewards, gen_rewards), axis=0)
return gen_images, gen_rewards, [latent_mean], [latent_std]
def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None, beta=1.0):
"""Losses in addition to the default modality losses."""
kl_loss = 0.0
if self.is_training:
for i, (mean, std) in enumerate(zip(latent_means, latent_stds)):
kl_loss += common_layers.kl_divergence(mean, std)
tf.summary.histogram("posterior_mean_%d" % i, mean)
tf.summary.histogram("posterior_std_%d" % i, std)
tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss))
return beta * kl_loss
def body(self, features):
hparams = self.hparams
batch_size = common_layers.shape_list(features["inputs"])[0]
# Swap time and batch axes.
input_frames = common_video.swap_time_and_batch_axes(features["inputs"])
target_frames = common_video.swap_time_and_batch_axes(features["targets"])
# Get actions if exist otherwise use zeros
input_actions = self.get_input_if_exists(
features, "input_action", batch_size, hparams.video_num_input_frames)
target_actions = self.get_input_if_exists(
features, "target_action", batch_size, hparams.video_num_target_frames)
# Get rewards if exist otherwise use zeros
input_rewards = self.get_input_if_exists(
features, "input_reward", batch_size, hparams.video_num_input_frames)
target_rewards = self.get_input_if_exists(
features, "target_reward", batch_size, hparams.video_num_target_frames)
all_actions = tf.concat([input_actions, target_actions], axis=0)
all_rewards = tf.concat([input_rewards, target_rewards], axis=0)
all_frames = tf.concat([input_frames, target_frames], axis=0)
# Each image is being used twice, in latent tower and main tower.
# This is to make sure we are using the *same* image for both, ...
# ... given how TF queues work.
# NOT sure if this is required at all. Doesn"t hurt though! :)
all_frames = tf.identity(all_frames)
gen_images, gen_rewards, latent_means, latent_stds = self.construct_model(
images=all_frames,
actions=all_actions,
rewards=all_rewards,
)
tf.summary.histogram("input_action", tf.argmax(input_actions, axis=3))
tf.summary.histogram("target_action", tf.argmax(target_actions, axis=3))
tf.summary.histogram("input_reward", tf.argmax(input_rewards, axis=3))
tf.summary.histogram("target_reward", tf.argmax(target_rewards, axis=3))
tf.summary.histogram("gen_rewards", tf.argmax(gen_rewards, axis=3))
beta = self.get_beta()
extra_loss = self.get_extra_loss(
latent_means=latent_means,
latent_stds=latent_stds, beta=beta, true_frames=all_frames,
gen_frames=gen_images)
# Ignore the predictions from the input frames.
# This is NOT the same as original paper/implementation.
predictions = gen_images[hparams.video_num_input_frames-1:]
reward_pred = gen_rewards[hparams.video_num_input_frames-1:]
reward_pred = tf.squeeze(reward_pred, axis=2) # Remove undeeded dimension.
# TODO(mbz): clean this up!
def fix_video_dims_and_concat_on_x_axis(x):
x = tf.transpose(x, [1, 3, 4, 0, 2])
x = tf.reshape(x, [batch_size, 64, 3, -1])
x = tf.transpose(x, [0, 3, 1, 2])
return x
frames_gd = fix_video_dims_and_concat_on_x_axis(target_frames)
frames_pd = fix_video_dims_and_concat_on_x_axis(predictions)
side_by_side_video = tf.concat([frames_gd, frames_pd], axis=2)
tf.summary.image("full_video", side_by_side_video)
# Swap back time and batch axes.
predictions = common_video.swap_time_and_batch_axes(predictions)
reward_pred = common_video.swap_time_and_batch_axes(reward_pred)
return_targets = predictions
if "target_reward" in features:
return_targets = {"targets": predictions, "target_reward": reward_pred}
return return_targets, extra_loss
@registry.register_model
class NextFrameStochasticTwoFrames(NextFrameStochastic):
"""Stochastic next-frame model with 2 frames posterior."""
def construct_model(self, images, actions, rewards):
images = tf.unstack(images, axis=0)
actions = tf.unstack(actions, axis=0)
rewards = tf.unstack(rewards, axis=0)
batch_size = common_layers.shape_list(images[0])[0]
context_frames = self.hparams.video_num_input_frames
# Predicted images and rewards.
gen_rewards, gen_images, latent_means, latent_stds = [], [], [], []
# LSTM states.
lstm_state = [None] * 7
# Create scheduled sampling function
ss_func = self.get_scheduled_sample_func(batch_size)
pred_image = tf.zeros_like(images[0])
pred_reward = tf.zeros_like(rewards[0])
latent = None
for timestep, image, action, reward in zip(
range(len(images)-1), images[:-1], actions[:-1], rewards[:-1]):
# Scheduled Sampling
done_warm_start = timestep > context_frames - 1
groundtruth_items = [image, reward]
generated_items = [pred_image, pred_reward]
input_image, input_reward = self.get_scheduled_sample_inputs(
done_warm_start, groundtruth_items, generated_items, ss_func)
# Latent
# TODO(mbz): should we use input_image iunstead of image?
latent_images = [image, images[timestep+1]]
latent_mean, latent_std = self.construct_latent_tower(latent_images)
latent = self.get_gaussian_latent(latent_mean, latent_std)
latent_means.append(latent_mean)
latent_stds.append(latent_std)
# Prediction
pred_image, lstm_state = self.construct_predictive_tower(
input_image, input_reward, action, lstm_state, latent)
if self.hparams.reward_prediction:
pred_reward = self.reward_prediction(
pred_image, input_reward, action, latent)
else:
pred_reward = input_reward
gen_images.append(pred_image)
gen_rewards.append(pred_reward)
gen_images = tf.stack(gen_images, axis=0)
gen_rewards = tf.stack(gen_rewards, axis=0)
return gen_images, gen_rewards, latent_means, latent_stds
| [
"[email protected]"
] | |
c1810bdae8eb260c21b70432f9e0091da1d8ee3a | c61798997614f4430a6a56b16e8d17fe75fb2f9c | /Yurii_Khomych/l_6_files/csv_examples/csv_read_dictionary.py | 521b595e29954d34abd71fca21273664147b2280 | [] | no_license | YuriiKhomych/ITEA_AC | ad944bbe74be88f306a45f38efa70765c5286162 | f9eb147da1135a978929ae370d9c9fcd8dc59d21 | refs/heads/master | 2022-12-18T14:55:56.162451 | 2020-05-03T12:45:02 | 2020-05-03T12:45:02 | 234,373,863 | 0 | 9 | null | 2022-12-08T03:46:33 | 2020-01-16T17:26:50 | Python | UTF-8 | Python | false | false | 480 | py | import csv
with open("employee_birthday.csv", mode="r") as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
print(
f'\t{row["name"]} works in the {row["department"]} department, and was born in {row["birthday month"]}.'
)
line_count += 1
print(f"Processed {line_count} lines.")
| [
"[email protected]"
] | |
ac382ddbbaef11f5a3db3f6a26cb1703eeac2af9 | 949908be7a522279bc5947ee0be436ef058767a9 | /code/generate_mcts_games.py | c6a85e7062e56d3eeeb9474ee28d5d3435bb39d0 | [] | no_license | maxpumperla/deep_learning_and_the_game_of_go | 3bd2bddce228b6696fb716eb0f18a2e9c82bb20c | c70cfe4a03dd2365dcb4295236755cca7a7178b7 | refs/heads/master | 2023-08-21T14:48:53.899001 | 2022-09-17T13:03:09 | 2022-09-17T13:03:09 | 108,328,408 | 955 | 402 | null | 2022-09-17T13:03:10 | 2017-10-25T21:32:32 | Python | UTF-8 | Python | false | false | 3,187 | py | # tag::generate_mcts_imports[]
import argparse
import numpy as np
from dlgo.encoders import get_encoder_by_name
from dlgo import goboard_fast as goboard
from dlgo import mcts
from dlgo.utils import print_board, print_move
# end::generate_mcts_imports[]
# tag::generate_mcts[]
def generate_game(board_size, rounds, max_moves, temperature):
boards, moves = [], [] # <1>
encoder = get_encoder_by_name('oneplane', board_size) # <2>
game = goboard.GameState.new_game(board_size) # <3>
bot = mcts.MCTSAgent(rounds, temperature) # <4>
num_moves = 0
while not game.is_over():
print_board(game.board)
move = bot.select_move(game) # <5>
if move.is_play:
boards.append(encoder.encode(game)) # <6>
move_one_hot = np.zeros(encoder.num_points())
move_one_hot[encoder.encode_point(move.point)] = 1
moves.append(move_one_hot) # <7>
print_move(game.next_player, move)
game = game.apply_move(move) # <8>
num_moves += 1
if num_moves > max_moves: # <9>
break
return np.array(boards), np.array(moves) # <10>
# <1> In `boards` we store encoded board state, `moves` is for encoded moves.
# <2> We initialize a OnePlaneEncoder by name with given board size.
# <3> An new game of size `board_size` is instantiated.
# <4> A Monte Carlo tree search agent with specified number of rounds and temperature will serve as our bot.
# <5> The next move is selected by the bot.
# <6> The encoded board situation is appended to `boards`.
# <7> The one-hot-encoded next move is appended to `moves`.
# <8> Afterwards the bot move is applied to the board.
# <9> We continue with the next move, unless the maximum number of moves has been reached.
# end::generate_mcts[]
# tag::generate_mcts_main[]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--board-size', '-b', type=int, default=9)
parser.add_argument('--rounds', '-r', type=int, default=1000)
parser.add_argument('--temperature', '-t', type=float, default=0.8)
parser.add_argument('--max-moves', '-m', type=int, default=60,
help='Max moves per game.')
parser.add_argument('--num-games', '-n', type=int, default=10)
parser.add_argument('--board-out')
parser.add_argument('--move-out')
args = parser.parse_args() # <1>
xs = []
ys = []
for i in range(args.num_games):
print('Generating game %d/%d...' % (i + 1, args.num_games))
x, y = generate_game(args.board_size, args.rounds, args.max_moves, args.temperature) # <2>
xs.append(x)
ys.append(y)
x = np.concatenate(xs) # <3>
y = np.concatenate(ys)
np.save(args.board_out, x) # <4>
np.save(args.move_out, y)
if __name__ == '__main__':
main()
# <1> This application allows some customization via command line arguments.
# <2> For the specified number of games we generate game data.
# <3> After all games have been generated, we concatenate features and labels, respectively.
# <4> We store feature and label data to separate files, as specified by the command line options.
# end::generate_mcts_main[]
| [
"[email protected]"
] | |
d845487c0e8cfd54401601e8139b2f3acf4ad17a | 3e63608e1cad90bc845c4580723e57ae7ca3f61d | /cartography/intel/oci/utils.py | e92ab4552703c97a1d2b485e2a3694912bb35905 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | lyft/cartography | 06dcbf13907cbb9a31b75cd8b21f5721f7cc1b01 | 830b8944879a01f52b21ee12b6fddf245f9733cb | refs/heads/master | 2023-08-31T12:27:59.752452 | 2023-08-28T20:42:12 | 2023-08-28T20:42:12 | 172,811,550 | 2,778 | 334 | Apache-2.0 | 2023-09-13T04:59:46 | 2019-02-27T00:16:29 | Python | UTF-8 | Python | false | false | 3,223 | py | # Copyright (c) 2020, Oracle and/or its affiliates.
# OCI intel module - utility functions
import json
from typing import Any
from typing import Dict
from typing import List
import neo4j
# Generic way to turn a OCI python object into the json response that you would see from calling the REST API.
def oci_object_to_json(in_obj: Any) -> List[Dict[str, Any]]:
out_list = []
for dict in json.loads(str(in_obj)):
out_list.append(replace_char_in_dict(dict))
return out_list
# Have to replace _ with - in dictionary keys, since _ is substituted for - in OCI object variables.
def replace_char_in_dict(in_dict: Dict[str, Any]) -> Dict[str, Any]:
out_dict = {}
for dict_key, dict_val in in_dict.items():
if isinstance(dict_val, dict):
dict_val = replace_char_in_dict(dict_val)
out_dict[dict_key.replace('_', '-')] = dict_val
return out_dict
# Grab list of all compartments and sub-compartments in neo4j already populated by iam.
def get_compartments_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(compartment:OCICompartment) " \
"return DISTINCT compartment.name as name, compartment.ocid as ocid, " \
"compartment.compartmentid as compartmentid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all groups in neo4j already populated by iam.
def get_groups_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(group:OCIGroup)" \
"return DISTINCT group.name as name, group.ocid as ocid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all policies in neo4j already populated by iam.
def get_policies_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(policy:OCIPolicy)" \
"return DISTINCT policy.name as name, policy.ocid as ocid, policy.statements as statements, " \
"policy.compartmentid as compartmentid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all regions in neo4j already populated by iam.
def get_regions_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-->(region:OCIRegion)" \
"return DISTINCT region.name as name, region.key as key;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all security groups in neo4j already populated by network. Need to handle regions for this one.
def get_security_groups_in_tenancy(
neo4j_session: neo4j.Session,
tenancy_id: str, region: str,
) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(security_group:OCINetworkSecurityGroup)-[OCI_REGION]->" \
"(region:OCIRegion{name: $OCI_REGION})" \
"return DISTINCT security_group.name as name, security_group.ocid as ocid, security_group.compartmentid " \
"as compartmentid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id, OCI_REGION=region)
| [
"[email protected]"
] | |
1aaca2801000e12f5206239db1426efe9c79af26 | e3bdb7844f634efd89109079d22cade713c4899d | /test/test_tele_check_cbp_payment_method.py | bc5d46fb45107545f525634ef2eb3c58cb505e6f | [] | no_license | pc-coholic/Python | 5170c27da09b066c353e09539e404961f7ad50b7 | b7251c31339b579f71fb7ee9db05be51e9e43361 | refs/heads/master | 2023-04-19T02:42:02.914726 | 2021-04-26T16:07:37 | 2021-04-26T16:07:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.2.0.20210406.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.tele_check_cbp_payment_method import TeleCheckCBPPaymentMethod # noqa: E501
from openapi_client.rest import ApiException
class TestTeleCheckCBPPaymentMethod(unittest.TestCase):
"""TeleCheckCBPPaymentMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTeleCheckCBPPaymentMethod(self):
"""Test TeleCheckCBPPaymentMethod"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.tele_check_cbp_payment_method.TeleCheckCBPPaymentMethod() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4f90609902c001000dd8541fb9265dbecca5a894 | 7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14 | /airbyte-integrations/connectors/source-shopify/main.py | 583c32023bc0f39e94d299da40e63db51016e230 | [
"MIT",
"Elastic-2.0"
] | permissive | Velocity-Engineering/airbyte | b6e1fcead5b9fd7c74d50b9f27118654604dc8e0 | 802a8184cdd11c1eb905a54ed07c8732b0c0b807 | refs/heads/master | 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 | MIT | 2021-06-08T05:58:44 | 2021-05-25T14:55:43 | Java | UTF-8 | Python | false | false | 248 | py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import sys
from airbyte_cdk.entrypoint import launch
from source_shopify import SourceShopify
if __name__ == "__main__":
source = SourceShopify()
launch(source, sys.argv[1:])
| [
"[email protected]"
] | |
7f438642a7db1531b61af45a6b30465cec4404e4 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/116_13.py | c7ed19f6baf638e4c0f6144bd59a54b0499a494f | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,694 | py | Python | Retain K consecutive elements
Sometimes while working with data, we can have a problem in which we need to
select some of the elements that occur K times consecutively. This problem can
occur in many domains. Let’s discuss certain ways in which this problem can be
solved.
**Method #1 : Usinggroupby() \+ list comprehension**
This task can be performed using above functionalities. In this, we group all
the numbers that are occurring K consecutively. We iterate the list using list
comprehension.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Retain K consecutive elements
# using groupby() + list comprehension
from itertools import groupby
# initialize list
test_list = [1, 1, 4, 5, 5, 6, 7, 7,
8]
# printing original list
print("The original list : " + str(test_list))
# initialize K
K = 2
# Retain K consecutive elements
# using groupby() + list comprehension
res = [i for i, j in groupby(test_list) if len(list(j))
== K]
# printing result
print("The K consecutive elements are : " + str(res))
---
__
__
**Output :**
The original list : [1, 1, 4, 5, 5, 6, 7, 7, 8]
The K consecutive elements are : [1, 5, 7]
**Method #2 : Using list comprehension +slice() + groupby()**
This task can also be performed using above functions. In this, we just
perform grouping in similar way as above but the way we extract consecutive
elements is by slice().
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Retain K consecutive elements
# using groupby() + list comprehension + islice()
from itertools import groupby, islice
# initialize list
test_list = [1, 1, 4, 5, 5, 6, 7, 7,
8]
# printing original list
print("The original list : " + str(test_list))
# initialize K
K = 2
# Retain K consecutive elements
# using groupby() + list comprehension + islice()
res = [i for i, j in groupby(test_list) if
len(list(islice(j, 0, K))) == K]
# printing result
print("The K consecutive elements are : " + str(res))
---
__
__
**Output :**
The original list : [1, 1, 4, 5, 5, 6, 7, 7, 8]
The K consecutive elements are : [1, 5, 7]
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
b2b118c86d4de3a1e077197eb9735bf522e54fbc | 5ddcaa63a665b91b4928517a8463db497d581e79 | /run.py | efa32721e6a61deed9332f8194da40c53460c644 | [] | no_license | vgoklani/aiohttpvsgrequests | 5c1144977a94dfad7fe1f5866004b37d69a232e0 | ef260649ff16c886a8d0e7f0d1a85dee89af3e15 | refs/heads/master | 2021-01-19T20:47:00.745877 | 2016-12-08T08:03:05 | 2016-12-08T08:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,650 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Compare aiohttp and grequests
'''
import logging
import hashlib
import asyncio
import time
import aiohttp
import grequests
from hdx.data.resource import Resource
from hdx.facades.simple import facade
from requests import HTTPError
from requests import Session
from requests.adapters import HTTPAdapter
logger = logging.getLogger(__name__)
NUMBER_OF_URLS_TO_PROCESS = 100
async def fetch(metadata, session):
url, resource_id = metadata
md5hash = hashlib.md5()
try:
with aiohttp.Timeout(300, loop=session.loop):
async with session.get(url, timeout=10) as response:
last_modified = response.headers.get('Last-Modified', None)
if last_modified:
response.close()
return resource_id, url, 1, last_modified
logger.info('Hashing %s' % url)
async for chunk in response.content.iter_chunked(1024):
if chunk:
md5hash.update(chunk)
return resource_id, url, 2, md5hash.hexdigest()
except Exception as e:
return resource_id, url, 0, str(e)
async def bound_fetch(sem, metadata, session):
# Getter function with semaphore.
async with sem:
return await fetch(metadata, session)
async def aiohttp_check_resources_for_last_modified(last_modified_check, loop):
tasks = list()
# create instance of Semaphore
sem = asyncio.Semaphore(100)
conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100)
async with aiohttp.ClientSession(connector=conn, loop=loop) as session:
for metadata in last_modified_check:
task = bound_fetch(sem, metadata, session)
tasks.append(task)
return await asyncio.gather(*tasks)
def set_metadata(metadata):
def hook(resp, **kwargs):
resp.metadata = metadata
return resp
return hook
def grequests_check_resources_for_last_modified(last_modified_check):
results = list()
reqs = list()
def exception_handler(req, exc):
url, res_id = req.metadata
results.append((res_id, url, 0, str(exc)))
with Session() as session:
session.mount('http://', HTTPAdapter(pool_connections=100, pool_maxsize=100))
session.mount('https://', HTTPAdapter(pool_connections=100, pool_maxsize=100))
for metadata in last_modified_check:
req = grequests.get(metadata[0], timeout=10, session=session, callback=set_metadata(metadata))
req.metadata = metadata
reqs.append(req)
for response in grequests.imap(reqs, size=100, stream=True, exception_handler=exception_handler):
url, resource_id = response.metadata
try:
response.raise_for_status()
except HTTPError as e:
results.append((resource_id, url, 0, response.status_code))
response.close()
continue
last_modified = response.headers.get('Last-Modified', None)
if last_modified:
results.append((resource_id, url, 1, last_modified))
response.close()
continue
logger.info('Hashing %s' % url)
md5hash = hashlib.md5()
try:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
md5hash.update(chunk)
results.append((resource_id, url, 2, md5hash.hexdigest()))
except Exception as e:
results.append((resource_id, url, 0, str(e)))
finally:
response.close()
return results
def print_results(results):
lastmodified_count = 0
hash_count = 0
failed_count = 0
for resource_id, url, status, result in results:
if status == 0:
failed_count += 1
logger.error(result)
elif status == 1:
lastmodified_count += 1
elif status == 2:
hash_count += 1
else:
raise ValueError('Invalid status returned!')
str = 'Have Last-Modified: %d, Hashed: %d, ' % (lastmodified_count, hash_count)
str += 'Number Failed: %d' % failed_count
logger.info(str)
def run_aiohttp(last_modified_check):
start_time = time.time()
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(aiohttp_check_resources_for_last_modified(last_modified_check, loop))
results = loop.run_until_complete(future)
logger.info('Execution time: %s seconds' % (time.time() - start_time))
print_results(results)
def run_grequests(last_modified_check):
start_time = time.time()
results = grequests_check_resources_for_last_modified(last_modified_check)
logger.info('Execution time: %s seconds' % (time.time() - start_time))
print_results(results)
def main(configuration):
resources = Resource.search_in_hdx(configuration, 'name:')
last_modified_check = list()
for resource in resources:
resource_id = resource['id']
url = resource['url']
if 'data.humdata.org' in url or 'manage.hdx.rwlabs.org' in url or 'proxy.hxlstandard.org' in url or \
'scraperwiki.com' in url or 'ourairports.com' in url:
continue
last_modified_check.append((url, resource_id))
last_modified_check = sorted(last_modified_check)[:NUMBER_OF_URLS_TO_PROCESS]
# run_grequests(last_modified_check)
run_aiohttp(last_modified_check)
if __name__ == '__main__':
facade(main, hdx_site='prod', hdx_read_only=True)
| [
"[email protected]"
] | |
ed607b1844d38659474ab6f087fdead0907d0fe9 | 4a6d784fd44b57d6b2aabae9d2381884cc880aea | /form_cuotas_vencidas_30dias.py | 120d8721442c011b7db999e92002e91e49ab82c2 | [] | no_license | blueautomatic/Slam_Sistema_creditos | 0e46c2f23d396793122739f838073eff77df88e3 | 7eb20a90abce53f10dcd18e3d47e9a5f330acbbd | refs/heads/master | 2020-03-26T19:13:36.634824 | 2018-02-05T15:46:42 | 2018-02-05T15:46:42 | 145,254,325 | 0 | 0 | null | 2018-08-18T21:37:23 | 2018-08-18T21:37:23 | null | UTF-8 | Python | false | false | 8,848 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form_cuotas_vencidas_30dias.ui'
#
# Created: Fri Feb 24 11:25:32 2017
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_form_cuotas_vencidas_30dias(object):
def setupUi(self, form_cuotas_vencidas_30dias):
form_cuotas_vencidas_30dias.setObjectName("form_cuotas_vencidas_30dias")
form_cuotas_vencidas_30dias.resize(488, 282)
form_cuotas_vencidas_30dias.setStyleSheet("font: 75 11pt \"KacstOne\";\n"
"selection-background-color: rgb(255, 170, 127);\n"
"color: rgb(0, 0, 0);\n"
"background-color: rgba(136, 3, 3, 100);\n"
"\n"
"")
self.gridLayout_2 = QtWidgets.QGridLayout(form_cuotas_vencidas_30dias)
self.gridLayout_2.setObjectName("gridLayout_2")
self.tabWidget = QtWidgets.QTabWidget(form_cuotas_vencidas_30dias)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 235, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
self.tabWidget.setPalette(palette)
self.tabWidget.setStyleSheet("background-color: rgb(239, 235, 231);\n"
"\n"
"")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout = QtWidgets.QGridLayout(self.tab)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.lineEdit = QtWidgets.QLineEdit(self.tab)
self.lineEdit.setEnabled(False)
self.lineEdit.setStyleSheet("background-color: rgb(222, 175, 153);\n"
"\n"
"")
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout.addWidget(self.lineEdit)
self.boton_generar = QtWidgets.QPushButton(self.tab)
self.boton_generar.setStyleSheet("background-color: rgb(251, 204, 193);")
self.boton_generar.setObjectName("boton_generar")
self.verticalLayout.addWidget(self.boton_generar)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setStyleSheet("background-color: rgb(222, 175, 153);\n"
"\n"
"")
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.boton_generar_60_dias = QtWidgets.QPushButton(self.tab)
self.boton_generar_60_dias.setStyleSheet("background-color: rgb(251, 204, 193);")
self.boton_generar_60_dias.setObjectName("boton_generar_60_dias")
self.verticalLayout_2.addWidget(self.boton_generar_60_dias)
self.gridLayout.addLayout(self.verticalLayout_2, 1, 0, 1, 1)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setStyleSheet("background-color: rgb(222, 175, 153);\n"
"\n"
"")
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.boton_generar_90_dias = QtWidgets.QPushButton(self.tab)
self.boton_generar_90_dias.setStyleSheet("background-color: rgb(251, 204, 193);")
self.boton_generar_90_dias.setObjectName("boton_generar_90_dias")
self.verticalLayout_3.addWidget(self.boton_generar_90_dias)
self.gridLayout.addLayout(self.verticalLayout_3, 2, 0, 1, 1)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Íconos/maquina-de-facturacion-electronica-con-escaner.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab, icon, "")
self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1)
self.retranslateUi(form_cuotas_vencidas_30dias)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(form_cuotas_vencidas_30dias)
def retranslateUi(self, form_cuotas_vencidas_30dias):
_translate = QtCore.QCoreApplication.translate
form_cuotas_vencidas_30dias.setWindowTitle(_translate("form_cuotas_vencidas_30dias", "Reportes Morosos"))
self.lineEdit.setText(_translate("form_cuotas_vencidas_30dias", "LISTADO DE MOROSOS CON CUOTAS VENCIDAS A 30 DÍAS"))
self.boton_generar.setText(_translate("form_cuotas_vencidas_30dias", "Generar"))
self.label_2.setText(_translate("form_cuotas_vencidas_30dias", " LISTADO DE MOROSOS CON CUOTAS VENCIDAS A 60 DÍAS"))
self.boton_generar_60_dias.setText(_translate("form_cuotas_vencidas_30dias", "Generar"))
self.label_3.setText(_translate("form_cuotas_vencidas_30dias", " LISTADO DE MOROSOS CON CUOTAS VENCIDAS A 90 DÍAS"))
self.boton_generar_90_dias.setText(_translate("form_cuotas_vencidas_30dias", "Generar"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("form_cuotas_vencidas_30dias", "Generador por Vencimientos"))
| [
"[email protected]"
] | |
e7bd2854db85a4f829ca05755bd0a9ded7ee7c71 | a79b734bec4bb0dacfee46f0fb8f33f2872581a9 | /p_gen_data.py | dc876af9fb474224758838e2b4821abbead64689 | [] | no_license | ryosuke071111/cnn_seq2seq | 530d27e0efa96fe9181c0708000897261ca489b6 | fda5ffa68b37d3f537ccb8b5ec142c1904c455a8 | refs/heads/master | 2020-07-02T15:53:20.137133 | 2019-08-12T11:23:49 | 2019-08-12T11:23:49 | 201,579,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | import glob
from tensorflow.core.example import example_pb2
import struct
from nltk.tokenize import word_tokenize, sent_tokenize
from utils import *
from tqdm import tqdm
PATH="/home/ryosuke/desktop/data_set/cnn_stories_tokenized/"
def sent_split(text):
words =[sent for sent in sent_tokenize(text)]
words = list(map(lambda x:x.split(),words))
return [word for inner_list in words for word in inner_list]
def data_generate(vocab, num_of_data):
file = open(PATH+"train.bin","rb")
# file = open(PATH+"val.bin","rb")
#valでやって見る
articles = []
abstracts = []
articles_extend = []
abstracts_extend = []
oovs = []
print('# of data', num_of_data)
i=0
pbar = tqdm(total=num_of_data)
while i<num_of_data:
len_bytes = file.read(8)
if not len_bytes:
print('finishied reading this files')
break
#連続するバイト列から単位ごとに区切る
str_len = struct.unpack('q',len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, file.read(str_len))[0]
#区切られた単位からarticle/abstractを取り出す
data = example_pb2.Example.FromString(example_str)
article = data.features.feature["article"].bytes_list.value[0]
if len(article)<=0:
continue
abstract = data.features.feature["abstract"].bytes_list.value[0]
#バイトコードを文字列に変換する(articleの場合はセンテンスとの一つずつに<s><\s>を入れている)
article = sent_split(article.decode())[:MAX_INPUT_LENGTH]
abstract = " ".join(vocab.abstract2sents(abstract.decode())).split()[:MAX_OUTPUT_LENGTH]
#IDに変更している(辞書増やし版)
article_vocab_extend, oov = vocab.article2ids(article)
abstract_vocab_extend = [START_DECODING_NO]+vocab.abstract2ids(abstract, oov)+[STOP_DECODING_NO]
#IDに変更している(辞書増やし版)
article = [vocab._word2id(word) for word in article]
abstract = [START_DECODING_NO]+[vocab._word2id(word) for word in abstract]+[STOP_DECODING_NO]
articles.append(article)
abstracts.append(abstract)
oovs.append(oov)
articles_extend.append(article_vocab_extend)
abstracts_extend.append(abstract_vocab_extend)
i+=1
pbar.update(1)
print('data successfully constructed!')
print()
return articles, abstracts, oovs, articles_extend, abstracts_extend | [
"[email protected]"
] | |
ea43039889c71780bfb652cd23a7ffd233c9b35a | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /AdventOfCode/19/day09.py | 2aa42d5b34b1bbbd2b3ff0b8293a3fedf050b62b | [] | no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | #!/usr/bin/env python3
import sys
from collections import deque
P = list(map(int,sys.stdin.readline().strip().split(',')))
class IntcodeComputer:
def __init__(self, P, In=None):
self.P = P[:] + [0]*10**3 # program copy & padding
self.i = 0
if In is None:
self.In = deque()
elif isinstance(In, list):
self.In = deque(In)
else:
assert isinstance(In, deque)
self.In = In
self.Out = [] # could be deque too
self.halt = False
self.base = 0
def run(self):
assert not self.halt
value = lambda k: self.P[self.i+k]
mode = lambda k: (value(0)//10**(1+k))%10
addr = lambda k: self.base+value(k) if mode(k)==2 else value(k)
param = lambda k: value(k) if mode(k)==1 else self.P[addr(k)]
while True:
op = value(0) % 100
if op==1: # add
self.P[addr(3)] = param(1) + param(2)
self.i += 4
elif op==2: # mul
self.P[addr(3)] = param(1) * param(2)
self.i += 4
elif op==3: # input
if self.In:
x = self.In.popleft()
#print('input %d' % x)
self.P[addr(1)] = x
self.i += 2
else:
break
elif op==4: # output
self.Out.append(param(1))
print(self.Out[-1])
self.i += 2
elif op==5: # jnz
if param(1)!=0:
self.i = param(2)
else:
self.i += 3
elif op==6: # jz
if param(1)==0:
self.i = param(2)
else:
self.i += 3
elif op==7: # lt
self.P[addr(3)] = 1 if param(1)<param(2) else 0
self.i += 4
elif op==8: # eq
self.P[addr(3)] = 1 if param(1)==param(2) else 0
self.i += 4
elif op==9: # incr base
self.base += param(1)
self.i += 2
else:
assert op==99
self.halt = True
break
# Part 1
IntcodeComputer(P,[1]).run()
# Part 2
IntcodeComputer(P,[2]).run()
| [
"[email protected]"
] | |
b15823ed5db74a6e8478495832ba2993301dad62 | 73e147e1d49656fafba5d4bf84df5ded2c4dca73 | /team_9/cocos/test/test_tmx_autotest.py | bd9efdc51d8246157010fa676a115abee804a8b4 | [
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] | permissive | Donnyvdm/dojo19 | 2278747366c57bfc80eb9ee28ca617ec0a79bae3 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | refs/heads/master | 2020-07-26T12:22:15.882800 | 2019-09-15T20:34:36 | 2019-09-15T20:34:36 | 208,642,183 | 1 | 0 | BSD-3-Clause | 2019-09-15T18:57:53 | 2019-09-15T18:57:52 | null | UTF-8 | Python | false | false | 1,628 | py | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 1.1, s, t 2.1, s, t 3.1, s, t 4.1, s, t 5.1, s, t 6.1, s, q"
tags = "scrolling, ScrollingManager, TMX"
import pyglet
pyglet.resource.path.append(pyglet.resource.get_script_home())
pyglet.resource.reindex()
import cocos
from cocos import tiles, layer
from cocos.actions import CallFunc, ScaleTo, Delay
from cocos.director import director
class TestScene(cocos.scene.Scene):
def __init__(self):
super(TestScene, self).__init__()
scroller = layer.ScrollingManager()
scrollable = tiles.load('road-map.tmx')['map0']
scroller.add(scrollable)
self.add(scroller)
template_action = ( CallFunc(scroller.set_focus, 0, 0) + Delay(1) +
CallFunc(scroller.set_focus, 768, 0) + Delay(1) +
CallFunc(scroller.set_focus, 768, 768) +Delay(1) +
CallFunc(scroller.set_focus, 1500, 768) +Delay(1) +
ScaleTo(0.75, 1) + Delay(1) +
CallFunc(scrollable.set_debug, True) + Delay(1) +
CallFunc(director.window.set_size, 800, 600)
)
scroller.do(template_action)
def main():
director.init(width=600, height=300, autoscale=False, resizable=True)
main_scene = TestScene()
director.run(main_scene)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ff895b5631505fd586f59614875869e82b3a902e | 3468fe20cd1128eb8e18354c30490421e504e4af | /portal/settings.py | 8b7e702f2ea68b929c9a21e3e46a15e97cb220eb | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | djpeluca/utopia-cms | 7da45422ffc4f1f397f385ea37243f2745a758de | 1e444afea565fdc734abf449b8ebe9b7c2c47d80 | refs/heads/main | 2023-08-19T23:04:44.666527 | 2021-10-27T01:55:11 | 2021-10-27T01:55:11 | 387,323,009 | 0 | 0 | BSD-3-Clause | 2021-07-19T03:03:48 | 2021-07-19T03:03:48 | null | UTF-8 | Python | false | false | 14,263 | py | # -*- coding: utf-8 -*-
import sys
from os.path import abspath, basename, dirname, join, realpath
from datetime import datetime
import mimetypes
from freezegun import freeze_time
from django.contrib.messages import constants as messages
LAST_OLD_DAY = datetime(2014, 7, 22)
FIRST_DAY = datetime(2009, 8, 1)
PROJECT_ABSOLUTE_DIR = dirname(abspath(__file__))
PROJECT_NAME = basename(PROJECT_ABSOLUTE_DIR)
APPS_DIR = join(PROJECT_ABSOLUTE_DIR, "apps")
if APPS_DIR not in sys.path:
sys.path.insert(0, APPS_DIR)
SITE_ROOT = dirname(realpath(__file__))
STATIC_URL = '/static/'
STATIC_ROOT = '%s/static/' % SITE_ROOT
SITE_DOMAIN = 'example.com'
URL_SCHEME = "https"
DEFAULT_URL_SCHEME = URL_SCHEME
# django-mobile
FLAVOURS = ('full', 'mobile', 'amp')
FLAVOURS_GET_PARAMETER = u'display'
FLAVOURS_COOKIE_SECURE = True
# Multi sub-domain secure cookie
SESSION_COOKIE_DOMAIN = ".ladiaria.com.uy"
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_AGE = 2592000 # 30 days
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SAMESITE = 'None'
SESSION_COOKIE_SAMESITE_FORCE_ALL = True
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
AMP_DEBUG = False
RAW_SQL_DEBUG = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
INSTALLED_APPS = (
'django_mobile',
'django.contrib.staticfiles',
'admin_shortcuts',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.sites',
'background_task',
'subdomains',
'audiologue',
'tagging',
'core.config.CoreConfig',
'core.attachments',
'django_extensions',
'generator',
'memcached',
'shoutbox',
'thedaily',
'videologue',
'short',
'adzone',
'exchange',
'faq',
'captcha',
'photologue',
'sortedm2m',
'photologue_ladiaria',
'robots',
'search',
'sorl.thumbnail',
'shorturls',
'less',
'django_user_agents',
'updown',
'materialize',
'crispy_forms',
'crispy_forms_materialize',
'actstream',
'django.contrib.messages',
'signupwall',
'homev3',
'cartelera.config.CarteleraConfig',
'markdown',
'django_bleach',
'django_markdown',
'django_markup',
'comunidad',
'appconf',
'star_ratings',
'tagging_autocomplete_tagit',
'avatar',
'endless_pagination',
'notification',
'django.contrib.flatpages',
'epubparser',
'dashboard',
'django_filters',
'rest_framework',
'compressor',
'favit',
'social_django',
)
SITE_ID = 1
# photologue app need to add a custom migration
MIGRATION_MODULES = {'photologue': 'photologue_ladiaria.photologue_migrations'}
ADMIN_SHORTCUTS = [
{
'title': u'Edición',
'shortcuts': [
{
'url_name': 'admin:core_edition_changelist',
'title': 'Ediciones',
},
{
'url_name': 'admin:core_edition_add',
'title': u'Crear edición',
},
{
'url_name': 'admin:core_article_add',
'title': u'Crear Artículo',
},
],
},
{
'title': 'Reportes',
'shortcuts': [
{
'url': '/dashboard/',
'title': u'Estadísticas de usuarios',
},
],
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGINATE_BY': 20,
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
}
ACTSTREAM_SETTINGS = {'FETCH_RELATIONS': False, 'USE_PREFETCH': True}
CRISPY_ALLOWED_TEMPLATE_PACKS = ('bootstrap', 'uni_form', 'bootstrap3', 'bootstrap4', 'materialize_css_forms')
CRISPY_TEMPLATE_PACK = 'materialize_css_forms'
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django_cookies_samesite.middleware.CookiesSameSite',
'core.middleware.AMP.FlavoursCookieSecure',
'django_mobile.cache.middleware.UpdateCacheFlavourMiddleware',
'django.middleware.cache.UpdateCacheMiddleware', # runs during the response phase (top -> last)
'core.middleware.cache.AnonymousResponse', # hacks cookie header for anon users (resp phase)
'django.contrib.sessions.middleware.SessionMiddleware',
'subdomains.middleware.SubdomainMiddleware',
'subdomains.middleware.SubdomainURLRoutingMiddleware',
'libs.middleware.url.UrlMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'core.middleware.threadlocals.ThreadLocals',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
'signupwall.middleware.SignupwallMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django_mobile.middleware.MobileDetectionMiddleware',
'django_mobile.middleware.SetFlavourMiddleware',
'core.middleware.cache.AnonymousRequest', # hacks cookie header for anon users (req phase)
'django_mobile.cache.middleware.FetchFromCacheFlavourMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware', # runs during the request phase (top -> first)
'social_django.middleware.SocialAuthExceptionMiddleware',
'core.middleware.AMP.OnlyArticleDetail',
)
LANGUAGES = (
('es', 'Español'),
)
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'es'
LOCAL_LANG = 'es'
DEFAULT_CHARSET = 'utf-8'
LOCAL_COUNTRY = 'UY'
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # 2006-10-25, 25/10/2006, 25/10/06
)
DATETIME_FORMAT = 'j N, Y, P'
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M', # '10/25/2006 14:30:59'
)
ROOT_URLCONF = 'urls'
DATA_UPLOAD_MAX_NUMBER_FIELDS = 2000
# Base
BASE_SUB = None
DEFAULT_PUB = 'default'
FIRST_DAY_OF_WEEK = 0 # 0 is Sunday
# Convert to calendar module, where 0 is Monday :/
FIRST_DAY_OF_WEEK_CAL = (FIRST_DAY_OF_WEEK - 1) % 7
HOME_PUBLICATIONS = []
HASHIDS_SALT = 'top_secret_salt_phrase'
# A dictionary of urlconf module paths, keyed by their subdomain.
SUBDOMAIN_URLCONFS = {
None: 'urls', # no subdomain, e.g. ``example.com``
}
# MEDIA
MEDIA_ROOT = PROJECT_ABSOLUTE_DIR + '/media/'
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
CSS_URL = '%scss/' % MEDIA_URL
IMG_URL = '%simg/' % MEDIA_URL
JS_URL = '%sjs/' % MEDIA_URL
SWF_URL = '%sswf/' % MEDIA_URL
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
# AVATAR
AVATAR_DEFAULT_IMAGE = 'identicon'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': ['127.0.0.1:11211']
}
}
# required for django mobile.
# TODO: search for a django-mobile replacement because last version is not compatible with new "TEMPLATE" setting.
TEMPLATE_LOADERS = (
(
'django_mobile.loader.CachedLoader',
(
'django_mobile.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [join(PROJECT_ABSOLUTE_DIR, 'templates'), join(PROJECT_ABSOLUTE_DIR, 'apps')],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'context_processors.urls',
'context_processors.site',
'context_processors.publications',
'context_processors.gtm',
'context_processors.main_menus',
'django.template.context_processors.static',
'apps.core.context_processors.aniosdias',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'django.contrib.messages.context_processors.messages',
"django.template.context_processors.i18n",
'django.template.context_processors.tz',
'adzone.context_processors.get_source_ip',
'django_mobile.context_processors.flavour',
'apps.thedaily.context_processors.permissions',
'django.template.context_processors.csrf',
],
'loaders': TEMPLATE_LOADERS,
},
}
]
FIXTURE_DIRS = (join(PROJECT_ABSOLUTE_DIR, 'fixtures'), )
# EMAIL
EMAIL_FAIL_SILENTLY = False
NOTIFICATIONS_FROM_NAME = 'utopia cms'
NOTIFICATIONS_FROM_ADDR1 = '[email protected]'
NOTIFICATIONS_FROM_ADDR2 = '[email protected]'
NOTIFICATIONS_TO_ADDR = '[email protected]'
NOTIFICATIONS_FROM_MX = NOTIFICATIONS_FROM_ADDR1
NEWSLETTER_IMG_FORMAT = 'jpg'
SENDNEWSLETTER_LOGFILE = '/var/log/utopiacms/sendnewsletter/%s-%s.log'
# apps
# background tasks
MAX_ATTEMPTS = 1
# core
# publications that use the root url as their home page
CORE_PUBLICATIONS_USE_ROOT_URL = [DEFAULT_PUB]
# slugs of the categories to update their modules after some modifications
CORE_UPDATE_CATEGORY_HOMES = []
# log user visits, disbale on critical performance issues
CORE_LOG_ARTICLE_VIEWS = True
# enable related articles in article detail
CORE_ENABLE_RELATED_ARTICLES = True
# mongodb databases for user and anon article visits
CORE_MONGODB_ARTICLEVIEWEDBY = 'ldsocial_core_articleviewedby'
CORE_MONGODB_ARTICLEVISITS = 'ldsocial_core_articlevisits'
SIGNUPWALL_MONGODB_VISITOR = 'ldsocial_signupwall_visitor'
# Change to false if the signupwall middleware is removed
SIGNUPWALL_ENABLED = True
# thedaily
SUBSCRIPTION_EMAIL_SUBJECT = u'Nueva suscripción'
PROMO_EMAIL_SUBJECT = u'Nueva promoción'
SUBSCRIPTION_EMAIL_TO = [NOTIFICATIONS_TO_ADDR]
SUBSCRIPTION_BY_PHONE_EMAIL_TO = SUBSCRIPTION_EMAIL_TO
MAX_USERS_API_SESSIONS = 3
THEDAILY_SUBSCRIPTION_TYPE_CHOICES = (
('DDIGM', u'Suscripción Ilimitada'),
('PAPYDIM', u'Suscripción papel'),
)
THEDAILY_PROVINCE_CHOICES = []
THEDAILY_WELCOME_TEMPLATE = 'welcome.html'
THEDAILY_DEFAULT_CATEGORY_NEWSLETTERS = [] # category slugs for add default category newsletters in new accounts
# photologue
DEFAULT_BYLINE = 'Difusión, S/D de autor.'
# django-tagging and autocomplete-taggit
FORCE_LOWERCASE_TAGS = False
TAGGING_AUTOCOMPLETE_JS_BASE_URL = '%sjs/jquery-tag-it-utopia/' % STATIC_URL
TAGGING_AUTOCOMPLETE_JQUERY_UI_FILE = 'jquery-ui.min.js'
# home
PUBLISHING_TIME = '05:00' # 'HH:MM'
# default logos
HOMEV3_LOGO = HOMEV3_LOGO_FOOTER = 'img/logo-utopia.png'
HOMEV3_SECONDARY_LOGO = 'img/logo-utopia-secondary.png'
HOMEV3_LOGO_PRINTABLE = 'img/logo-utopia-printable.png'
HOMEV3_LOGO_ALT_TEXT = 'utopia logo'
# default footer template
HOMEV3_FOOTER_TEMPLATE = 'footer.html'
# django reCaptcha
NOCAPTCHA = True
RECAPTCHA_USE_SSL = True
# exchange
EXCHANGE_UPDATE_MODULE = 'exchange.brou'
# adzone
ADZONE_LOG_AD_IMPRESSIONS = True
ADZONE_LOG_AD_CLICKS = True
SHORTEN_MODELS = {
'A': 'core.article',
'U': 'short.url',
}
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
}
AUTH_USER_EMAIL_UNIQUE = True
AUTH_PROFILE_MODULE = 'thedaily.Subscriber'
# login_required decorator redirects here
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/logged-in/'
LOGIN_ERROR_URL = '/usuarios/error/login/'
MESSAGETAGS = {messages.ERROR: 'danger', }
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# Opciones de django-social-auth
SOCIAL_ADMIN_EMAIL_TO = ['[email protected]']
SOCIAL_AUTH_GOOGLE_OAUTH2_STRATEGY = 'social_django.strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage'
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = [
'https://www.googleapis.com/auth/plus.me',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile']
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'libs.social_auth_pipeline.get_phone_number',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
SOCIAL_AUTH_URL_NAMESPACE = 'social'
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
('text/x-scss', 'sass --scss {infile} {outfile}'),
)
BLEACH_STRIP_TAGS = True
# Online sync User fields with CRM (empty, using hardcoded fields only)
CRM_UPDATE_SUBSCRIBER_FIELDS = {}
# Online sync User fields with CRM enabled by default
CRM_UPDATE_USER_ENABLED = True
# PWA
PWA_SERVICE_WORKER_TEMPLATE = 'core/templates/sw/serviceworker.js'
PWA_SERVICE_WORKER_VERSION = 1
try:
UTILS_MODULE = __import__('utils', fromlist=[PROJECT_ABSOLUTE_DIR])
except ImportError as e:
print(e)
FREEZE_TIME = None
# Override previous settings with values in local_settings.py settings file.
from local_settings import *
SITE_URL = '%s://%s/' % (URL_SCHEME, SITE_DOMAIN)
ROBOTS_SITEMAP_URLS = [SITE_URL + 'sitemap.xml']
LOCALE_NAME = LOCAL_LANG + '_' + LOCAL_COUNTRY + '.UTF8'
if FREEZE_TIME:
freezer = freeze_time(FREEZE_TIME)
freezer.start()
ABSOLUTE_URL_OVERRIDES = {'auth.user': SITE_URL + "usuarios/perfil/editar/"}
| [
"[email protected]"
] | |
f19cac3711c4c978703670b4f20e4a32000bc39d | 573a66e4f4753cc0f145de8d60340b4dd6206607 | /JS-CS-Detection-byExample/Dataset (ALERT 5 GB)/362764/shogun-2.0.0/shogun-2.0.0/examples/undocumented/python_modular/kernel_linear_byte_modular.py | b06ecaf888f155e9958d54c81554c292997b5de8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | mkaouer/Code-Smells-Detection-in-JavaScript | 3919ec0d445637a7f7c5f570c724082d42248e1b | 7130351703e19347884f95ce6d6ab1fb4f5cfbff | refs/heads/master | 2023-03-09T18:04:26.971934 | 2022-03-23T22:04:28 | 2022-03-23T22:04:28 | 73,915,037 | 8 | 3 | null | 2023-02-28T23:00:07 | 2016-11-16T11:47:44 | null | UTF-8 | Python | false | false | 974 | py | #!/usr/bin/env python
###########################################################################
# linear kernel on byte features
###########################################################################
from tools.load import LoadMatrix
from numpy import ubyte
lm=LoadMatrix()
traindat = ubyte(lm.load_numbers('../data/fm_train_byte.dat'))
testdat = ubyte(lm.load_numbers('../data/fm_test_byte.dat'))
parameter_list=[[traindat,testdat],[traindat,testdat]]
def kernel_linear_byte_modular (fm_train_byte=traindat,fm_test_byte=testdat):
from shogun.Kernel import LinearKernel
from shogun.Features import ByteFeatures
feats_train=ByteFeatures(fm_train_byte)
feats_test=ByteFeatures(fm_test_byte)
kernel=LinearKernel(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return kernel
if __name__=='__main__':
print('LinearByte')
kernel_linear_byte_modular(*parameter_list[0])
| [
"[email protected]"
] | |
c172f69311e43071b174976da7a5783ee9d8d304 | e7fcc1d64cd95805918ab1b5786bf81a92f973ef | /2016/day01/day01.py | 7d9cd2be2ff4f76e40d4eb42d46a8370f43b0be6 | [] | no_license | trolen/advent-of-code | 8145c1e36fea04e53d4b7a885efcc2da71fbfe57 | 0a4e022a6a810d86e044a15036a2f5778f0d38af | refs/heads/master | 2023-02-26T13:11:58.341006 | 2023-02-20T23:22:27 | 2023-02-20T23:22:27 | 54,579,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | #! /usr/bin/env python3
DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)] # N, E, S, W
class Position:
def __init__(self):
self.reset()
def reset(self):
self._x, self._y = (0, 0)
self._direction = 0
self._positions = []
def get_distance(self):
return abs(self._x) + abs(self._y)
def apply_instruction(self, instruction, unique):
turn = instruction[0].upper()
if turn == 'R':
self._direction += 1
else:
self._direction -= 1
self._direction %= 4
distance = int(instruction[1:])
if not unique:
self._x += DIRECTIONS[self._direction][0] * distance
self._y += DIRECTIONS[self._direction][1] * distance
return False
if DIRECTIONS[self._direction][0] != 0:
plist = [(self._x + i * DIRECTIONS[self._direction][0], self._y) for i in range(1, distance + 1)]
if DIRECTIONS[self._direction][1] != 0:
plist = [(self._x, self._y + i * DIRECTIONS[self._direction][1]) for i in range(1, distance + 1)]
for p in plist:
self._x, self._y = p
if p in self._positions:
return True
self._positions.append(p)
return False
def apply_instructions(self, instructions, unique=False):
for instruction in [x.strip() for x in instructions.split(',')]:
if self.apply_instruction(instruction, unique):
break
if __name__ == '__main__':
instructions = ''
with open('input.txt', 'rt') as file:
instructions = file.read()
p = Position()
p.apply_instructions(instructions)
print('Part One: {0}'.format(p.get_distance()))
p.reset()
p.apply_instructions(instructions, unique=True)
print('Part Two: {0}'.format(p.get_distance())) | [
"[email protected]"
] | |
be73b3b89032e500668e954d0d7cbf1e4e038763 | ba0e07b34def26c37ee22b9dac1714867f001fa5 | /azure-graphrbac/azure/graphrbac/models/password_credential_paged.py | f4d5ef494856e72642ca0a714abfe73012a38552 | [
"MIT"
] | permissive | CharaD7/azure-sdk-for-python | b11a08ac7d24a22a808a18203072b4c7bd264dfa | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | refs/heads/master | 2023-05-12T12:34:26.172873 | 2016-10-26T21:35:20 | 2016-10-26T21:35:20 | 72,448,760 | 1 | 0 | MIT | 2023-05-04T17:15:01 | 2016-10-31T15:14:09 | Python | UTF-8 | Python | false | false | 914 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class PasswordCredentialPaged(Paged):
"""
A paging container for iterating over a list of PasswordCredential object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PasswordCredential]'}
}
def __init__(self, *args, **kwargs):
super(PasswordCredentialPaged, self).__init__(*args, **kwargs)
| [
"[email protected]"
] | |
8e39496238ae7abc1a45eade812fa000e74ef3bb | 11e81ec279ec17196bdbc75ce334305e95929b52 | /8주차 이분탐색,그래프/파티/김승욱.py | 526773d77fb4084503893cb8d8223ccb823b07d7 | [] | no_license | wheejoo/PythonCodeStudy | 70992e1723d621fec933786dd2b3faa5d2518763 | 9e324f9deee1be044c07b64e7480c6bfac42876c | refs/heads/main | 2023-07-28T23:11:19.335117 | 2021-10-03T11:37:09 | 2021-10-03T11:37:09 | 382,866,256 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # x를 시작으로 가장 먼곳 간다음 그 곳에서 다시 x까지 거리 구하려햇는데
# 왕복 거리가 다르므로 이렇게 풀면 안됨
# 모든 정점에서 x까지 최단거리 구하고
# 다시 x에서 모든 거리까지 최단거리 구해서 서로 더한다음
# 가장 큰 값구함
import heapq
import sys
input = sys.stdin.readline
n, m, x = map(int,input().split())
INF = sys.maxsize
grpah = [[] for _ in range(n+1)]
maxx = 0
for i in range(m):
a,b,c = map(int, input().split())
grpah[a].append((c,b))
def dijstra(index):
queue = []
distance[index] = 0
heapq.heappush(queue, (0, index))
while queue:
wei, now = heapq.heappop(queue)
if distance[now] < wei:
continue
for w, next in grpah[now]:
next_wei = w + wei
if next_wei < distance[next]:
distance[next] = next_wei
heapq.heappush(queue, (next_wei, next))
result = []
for i in range(1, n+1):
distance = [INF for _ in range(n+1)]
dijstra(i)
result.append(distance[x])
distance = [INF for _ in range(n+1)]
dijstra(x)
for i in range(len(result)):
maxx = max(maxx, result[i] + distance[i+1])
print(maxx)
| [
"[email protected]"
] | |
ddc056b2b37e8b40bdb6264c89407197605144ef | 566ce57c0a56a492895dc5b2e4b7ae1b49b301dd | /job/urls.py | 2ca32cf10b8d4220993e331027bcaabdfa5591c6 | [] | no_license | ingafter60/job-portal | 178e81c0f1d3fc4527242cf8c9bcc29c5d413ac9 | 6af2460927a29c914df74ea10172a731fcd528c6 | refs/heads/master | 2022-11-26T09:27:23.543390 | 2020-07-15T03:40:47 | 2020-07-15T03:40:47 | 279,142,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # job/urls.py
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('users/', include('users.urls')),
path('', include('jobs.urls')),
path('admin/', admin.site.urls),
] | [
"[email protected]"
] | |
68adf7e197ced3869d0d0a33c74355024394305e | 0beaf9d78d03100b2aebaaac38fb343d425f2b6a | /tests/regression/gsheet/test_chrome_gsheet_100r_number_chars_image.py | bc85a9757ae1b561082d29b318d73a1b600482a8 | [] | no_license | digitarald/Hasal | 462fc044bb4a754c8d76c0bfb0df519f1786fdcc | c496afae6ec2e3743148f3a6288b78f120100513 | refs/heads/master | 2021-01-13T14:29:44.471037 | 2016-11-04T10:49:19 | 2016-11-04T10:49:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from lib.perfBaseTest import PerfBaseTest
class TestSikuli(PerfBaseTest):
def setUp(self):
super(TestSikuli, self).setUp()
def test_chrome_gsheet_100r_number_chars_image(self):
self.test_url = self.env.GSHEET_TEST_URL_SPEC % self.env.TEST_TARGET_ID_100R_NUMBER_ENCHAR_IMAGE
self.sikuli_status = self.sikuli.run_test(self.env.test_name, self.env.output_name, test_target=self.test_url, script_dp=self.env.test_script_py_dp)
| [
"[email protected]"
] | |
f89f2605057672e195be30599e8b17bd6843fffa | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/command_lib/container/gkeonprem/flags.py | 65a11cb83a4f743b2ab1aef526415180dfa7ce70 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 3,400 | py | # -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for flags in commands for Anthos GKE On-Prem clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import parser_arguments
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import resources
def GetAdminClusterMembershipResource(membership_name):
return resources.REGISTRY.ParseRelativeName(
membership_name, collection='gkehub.projects.locations.memberships'
)
def AdminClusterMembershipAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='admin_cluster_membership',
help_text=(
'admin cluster membership of the {resource}, in the form of'
' projects/PROJECT/locations/global/memberships/MEMBERSHIP. '
),
)
def LocationAttributeConfig():
"""Gets Google Cloud location resource attribute."""
return concepts.ResourceParameterAttributeConfig(
name='location',
help_text='Google Cloud location for the {resource}.',
)
def GetAdminClusterMembershipResourceSpec():
return concepts.ResourceSpec(
'gkehub.projects.locations.memberships',
resource_name='admin_cluster_membership',
membershipsId=AdminClusterMembershipAttributeConfig(),
locationsId=LocationAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
)
def AddAdminClusterMembershipResourceArg(
parser: parser_arguments.ArgumentInterceptor, positional=True, required=True
):
"""Adds a resource argument for a VMware admin cluster membership.
Args:
parser: The argparse parser to add the resource arg to.
positional: bool, whether the argument is positional or not.
required: bool, whether the argument is required or not.
"""
name = (
'admin_cluster_membership' if positional else '--admin-cluster-membership'
)
# TODO(b/227667209): Add fallthrough from cluster location when regional
# membership is implemented.
concept_parsers.ConceptParser.ForResource(
name,
GetAdminClusterMembershipResourceSpec(),
'membership of the admin cluster. Membership can be the membership ID or'
' the full resource name.',
required=required,
flag_name_overrides={
'location': '--admin-cluster-membership-location',
},
).AddToParser(parser)
parser.set_defaults(admin_cluster_membership_location='global')
def AddBinauthzEvaluationMode(parser):
parser.add_argument(
'--binauthz-evaluation-mode',
choices=['DISABLED', 'PROJECT_SINGLETON_POLICY_ENFORCE'],
default=None,
help='Set Binary Authorization evaluation mode for this cluster.',
hidden=True,
)
| [
"[email protected]"
] | |
02cc868019621649a62b17392c2f8891804a69a6 | 696e35ccdf167c3f6b1a7f5458406d3bb81987c9 | /mash/DEPS | 681fd163d9adfc6508b88a7df485d548ae8c59da | [
"BSD-3-Clause"
] | permissive | mgh3326/iridium-browser | 064e91a5e37f4e8501ea971483bd1c76297261c3 | e7de6a434d2659f02e94917be364a904a442d2d0 | refs/heads/master | 2023-03-30T16:18:27.391772 | 2019-04-24T02:14:32 | 2019-04-24T02:14:32 | 183,128,065 | 0 | 0 | BSD-3-Clause | 2019-11-30T06:06:02 | 2019-04-24T02:04:51 | null | UTF-8 | Python | false | false | 279 | include_rules = [
"+ash/public",
"+components/prefs",
"+components/viz/common",
"+mojo/converters",
"+mojo/public",
"+services/catalog/public",
"+services/service_manager",
"+services/ws/common",
"+services/ws/public",
"+third_party/skia/include",
"+ui",
]
| [
"[email protected]"
] | ||
40ce2d0ef01f6a45b190fa3ad50ad6e90dda63f5 | b500996a0b29829fde6afe8b23178ca9df4a239d | /rydinfap/src/apps/vehfifthwkday.py | f0553cf45ea5c4557c83129f4448663dcbf8cd3f | [] | no_license | eocampo2000/test-code | 48c4d444e323eef5e6fe7e61b018952ef3cd4134 | 49328664243e1a9daf9c567d1aaaa19fd4654c02 | refs/heads/master | 2016-08-11T07:35:31.346464 | 2016-02-13T12:33:55 | 2016-02-13T12:33:55 | 51,642,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,311 | py | '''
Created on June 18, 2015
@author: eocampo
Vehicle DB2 Conversion 5th workday
Following is the list of wkf that need to be ran in sequential order:
wkf_part_dimension_monthly
wkf_rfp_parts_us_can_monthly
wkf_rfp_compliance_monthly
wkf_jobr_compliance_monthly
wkf_ser_compliance_monthly
wkf_prt_compliance_monthly
self.wPartDimMthly,
self.wRFPPartUsCanMthly,
self.wRFPComplMthly,
self.wJobrComplMthly,
self.wSerComplMthly,
self.wPrtComplMthly,
FTP FILES:
wPartDimMthly RFP_Part_Price.csv,RFP_Part_Xref.csv,MFG_Parts_Match.CSV
RFPPartUsCanMthly RFP_Part_Price.csv,RFP_Part_Xref.csv,MFG_Parts_Match.CSV,RFP_Part_Price_CAN.csv,RFP_Part_Xref_CAN.csv,MFG_Parts_Match_CAN.CSV,
RFP_CMPL_PART_EXCLUDE.csv
ComplMthly RFP_ADJ.csv,RFP_NAVISTAR.txt
20150909 Added wkf_rfp_comp_us_can_monthly.
'''
__version__ = '20150909'
import sys
import os
from datetime import datetime
import utils.fileutils as fu
import utils.strutils as su
import utils.filetransf as ft
import datastore.dbapp as da
import procdata.procinfa as pi
from apps.infbaseapp import _InfaBaseApp
# Mandatory to define self.cmdStep
cur_dayr = su.getTodayDtStr('%Y%m%d')
class VehFifthWkday(_InfaBaseApp):
exitOnError = True
def __init__(self):
super(VehFifthWkday,self).__init__()
self.landDir = 'SrcFiles/vehicle'
self.incFileSet = [] # Incoming Files. Contains full path name.
self.incFiles = [] # Will need to re-initialize
self.workFiles = [] # Files that were moved to the working dir (ideally same than incSetFile).
self.RowCnt = -1
self.srcFile = ('mfg_parts_match.csv','mfg_parts_match_can.csv','rfp_adj.csv','rfp_cmpl_part_exclude.csv','rfp_navistar.txt','rfp_part_price.csv','rfp_part_price_can.csv','rfp_part_xref.csv','rfp_part_xref_can.csv') # File that Informatica expects. Alphabetical.
self.ib.fileName = "MFG_Parts_Match.CSV,MFG_Parts_Match_CAN.CSV,RFP_ADJ.csv,RFP_CMPL_PART_EXCLUDE.csv,RFP_NAVISTAR.txt,RFP_Part_Price.csv,RFP_Part_Price_CAN.csv,RFP_Part_Xref.csv,RFP_Part_Xref_CAN.csv" # SourceFile Name as String, List
self.checkNextRunFlg = False
self.runWkfFlowFlg = False
self.fileDate = ''
self.FILE_SET_LEN = 1
self.ts = su.getTimeSTamp()
# Allowable commands for this application. Make sure to Set
self.cmdStep = { 'A' : self.getLock ,
'B' : self.isWorkDayWarn ,
'C' : self.getFtpFiles , # Sets self.incFileSet
'D' : self.getIncSetFiles ,
'E' : self.copyFilesWorkDir ,
'F' : self.archFilesTS ,
'G' : self.wPartDimMthly ,
'H' : self.wRFPPartUsCanMthly ,
'I' : self.wRFPComplMthly ,
'J' : self.wJobrComplMthly ,
'K' : self.wSerComplMthly ,
'L' : self.wPrtComplMthly ,
'M' : self.wkfRFPCompUSCanMthly ,
'N' : self.wkfAsstRFPCompCanMthly ,
'O' : self.wkfAsstRFPCompMthly ,
'P' : self.wkfAsstPurchCompMthly ,
'Q' : self.wkfAsstRankResMthly ,
'R' : self.wkfAsstPartPriceMonMthly,
}
# Infa Environmental variables/
self.infaEnvVar = {
'PMCMD' : 'mg.pmcmd' ,
'INFA_USER' : 'self.ib.rep_user' ,
'INFA_XPWD' : 'self.ib.rep_xpwd' ,
'DOMAIN' : 'self.ib.dom_name' ,
'INT_SERV' : 'self.ib.IS' ,
'INFA_SHARE' : 'self.ib.shareDir' ,
'INFA_APP_CFG' : 'self.ib.cfgDir' ,
'INFA_APP_LCK' : 'self.ib.lckDir' ,
'INFA_APP_CTL' : 'self.ib.ctlDir' ,
}
# FTP is expecting the following env variables, which should not be in a config file.
# First Set of files from Mainframe.
#
os.environ['RXFILE' ] = ('None') #
os.environ['FILE' ] = ('MFG_Parts_Match.CSV,MFG_Parts_Match_CAN.CSV,RFP_ADJ.csv,RFP_CMPL_PART_EXCLUDE.csv,RFP_NAVISTAR.txt,RFP_Part_Price.csv,RFP_Part_Price_CAN.csv,RFP_Part_Xref.csv,RFP_Part_Xref_CAN.csv')
def getFtpFiles(self):
return ft.get('VehFithtWkday',self.log)
# Wrapper Method
def copyFilesWorkDir(self):
for i in range (len(self.srcFile)):
self.incFiles.append('%s' % self.incFileSet[i][0])
return self.cpSrcToTgtFiles()
def archFilesTS(self): return self.archGenFiles(self.incFiles, self.ts,True)
#RFP_Part_Price.csv, RFP_Part_Xref.csv,MFG_Parts_Match.CSV
def wPartDimMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_part_dimension_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
#RFP_Part_Price.csv, RFP_Part_Xref.csv, MFG_Parts_Match.CSV, RFP_Part_Price_CAN.csv, RFP_Part_Xref_CAN.csv, MFG_Parts_Match_CAN.CSV, RFP_CMPL_PART_EXCLUDE.csv
def wRFPPartUsCanMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_rfp_parts_us_can_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
#RFP_ADJ.csv, RFP_NAVISTAR.txt
def wRFPComplMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_rfp_compliance_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
#RFP_ADJ.csv
def wJobrComplMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_jobr_compliance_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wSerComplMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_ser_compliance_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wPrtComplMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_prt_compliance_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfRFPCompUSCanMthly(self):
self.ib.fld = 'Vehicle'
self.ib.wkf = 'wkf_rfp_comp_us_can_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfAsstRFPCompCanMthly(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_ppc_rfp_comp_can_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfAsstRFPCompMthly(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_ppc_rfp_compliance_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfAsstPurchCompMthly(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_ppc_purchasing_compliance_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfAsstRankResMthly(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_ppc_ranking_research_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfAsstPartPriceMonMthly(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_part_price_monitor_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def main(Args):
a = VehFifthWkday()
rc = a.main(Args)
return rc
if __name__ == '__main__':
rc= main(sys.argv)
sys.exit(rc)
| [
"[email protected]"
] | |
6a83f1a2c02e53428cf6622fc3cc28737370842f | c47340ae6bcac6002961cc2c6d2fecb353c1e502 | /test/test_known_hosts.py | 1c2c8b6867e85b61f8a35c508543f3fccb4a802f | [
"MIT"
] | permissive | rafaeldelrey/controlm_py | 6d9f56b8b6e72750f329d85b932ace6c41002cbd | ed1eb648d1d23e587321227217cbfcc5065535ab | refs/heads/main | 2023-04-23T09:01:32.024725 | 2021-05-19T00:25:53 | 2021-05-19T00:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.115
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.known_hosts import KnownHosts # noqa: E501
from controlm_py.rest import ApiException
class TestKnownHosts(unittest.TestCase):
"""KnownHosts unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testKnownHosts(self):
"""Test KnownHosts"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.known_hosts.KnownHosts() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c0367113441a8db2c3f75729fd8ff004260f1624 | fdbcbb95a34e05e015c3ece6a071b04915c70346 | /code_interview/settings.py | abae8f9ec0cbf5a01bc576629629e91621ec3687 | [
"Apache-2.0"
] | permissive | nonbeing/codeinterview-backend | ed0e8f4d4dddbc25e235a817cfbc22e491c5c5c9 | f44a592640a4a663daebef06561063c062c8acb0 | refs/heads/master | 2022-09-04T11:28:04.105984 | 2020-06-01T05:13:22 | 2020-06-01T05:13:22 | 268,828,769 | 1 | 0 | Apache-2.0 | 2020-06-02T14:49:35 | 2020-06-02T14:49:35 | null | UTF-8 | Python | false | false | 3,870 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'j%4s9n79np!^nrq3&h4=6a8r2c^ex9s)gg3s(zsx((o@qll2yj'
DEBUG = True
ALLOWED_HOSTS = []
# Celery config
CELERY_BROKER_URL = 'pyamqp://'
CELERY_RESULT_BACKEND = 'redis://'
CELERY_TASK_ROUTES = {
# WARNING: room.tasks still need explicit queue name when chaining.
# see rooms.signals.dispatch_run_task. Help?
'rooms.tasks.*': {'queue': 'callbacks'},
'tasks.sandbox.run_user_code': {'queue': 'sandbox'}
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'django_filters',
'channels',
'rooms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'code_interview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'code_interview.wsgi.application'
ASGI_APPLICATION = "code_interview.routing.application"
##### Channels-specific settings
redis_url = 'redis://'
# Channel layer definitions
# http://channels.readthedocs.io/en/latest/topics/channel_layers.html
CHANNEL_LAYERS = {
# "default": {
# # This example app uses the Redis channel layer implementation channels_redis
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [redis_url,],
# },
# },
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer"
}
}
CORS_ORIGIN_WHITELIST = [
'http://localhost:8080' # front-end
]
# REST framework
default_renderers = []
if DEBUG:
default_renderers = ['rest_framework.renderers.BrowsableAPIRenderer']
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
] + default_renderers,
}
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
3e02711859000bf568d3065860caad09b02d70f6 | 8aa5b087176a5de567664a9377eda56641cda628 | /binaryapi/ws/chanels/contract_update_history.py | 3a6a664953571745700447201f3bdf44426e7027 | [] | no_license | pabloapdz/binaryapi | ce55d2f4d125b2725ae0c5b23b953521c792cf27 | c2cca1ec144d10b885df7aeda03c7c63dbe673de | refs/heads/master | 2022-11-19T16:10:04.567328 | 2020-07-23T03:00:40 | 2020-07-23T03:00:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | """Module for Binary contract_update_history websocket chanel."""
from binaryapi.ws.chanels.base import Base
# https://developers.binary.com/api/#contract_update_history
class ContractUpdateHistory(Base):
"""Class for Binary contract_update_history websocket chanel."""
name = "contract_update_history"
def __call__(self, contract_id: int, limit=None, passthrough=None, req_id: int = None):
"""Method to send message to contract_update_history websocket chanel.
Update Contract History (request)
Request for contract update history.
:param contract_id: Internal unique contract identifier.
:type contract_id: int
:param limit: [Optional] Maximum number of historical updates to receive.
:type limit:
:param passthrough: [Optional] Used to pass data through the websocket, which may be retrieved via the `echo_req` output field.
:type passthrough:
:param req_id: [Optional] Used to map request to response.
:type req_id: int
"""
data = {
"contract_update_history": int(1),
"contract_id": int(contract_id)
}
if limit:
data['limit'] = limit
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)
| [
"[email protected]"
] | |
b91f7696711118f9d042b44bc9aa22203a40a6ba | ef3d4130f28c7c589c646b15d19010cf426fc0f6 | /doc/src/LectureNotes/_build/jupyter_execute/chapter3.py | d14bfd161844b30e6a3a7b6032fbca948f26603f | [
"CC0-1.0"
] | permissive | CompPhysics/MachineLearning | 54e1123a96060d824307f270415646494783cff5 | 00a2bd1a7efde5fbfd9b9d6d6c365dcd82fe8baf | refs/heads/master | 2023-09-06T02:34:28.559589 | 2023-09-05T13:16:27 | 2023-09-05T13:16:27 | 103,986,991 | 154 | 136 | CC0-1.0 | 2022-11-18T11:01:02 | 2017-09-18T20:11:45 | null | UTF-8 | Python | false | false | 58,293 | py | # Getting started, our first data and Machine Learning encounters
## Introduction
Our emphasis throughout this series of lectures
is on understanding the mathematical aspects of
different algorithms used in the fields of data analysis and machine learning.
However, where possible we will emphasize the
importance of using available software. We start thus with a hands-on
and top-down approach to machine learning. The aim is thus to start with
relevant data or data we have produced
and use these to introduce statistical data analysis
concepts and machine learning algorithms before we delve into the
algorithms themselves. The examples we will use in the beginning, start with simple
polynomials with random noise added. We will use the Python
software package [Scikit-Learn](http://scikit-learn.org/stable/) and
introduce various machine learning algorithms to make fits of
the data and predictions. We move thereafter to more interesting
cases such as data from say experiments (below we will look at experimental nuclear binding energies as an example).
These are examples where we can easily set up the data and
then use machine learning algorithms included in for example
**Scikit-Learn**.
These examples will serve us the purpose of getting
started. Furthermore, they allow us to catch more than two birds with
a stone. They will allow us to bring in some programming specific
topics and tools as well as showing the power of various Python
libraries for machine learning and statistical data analysis.
Here, we will mainly focus on two
specific Python packages for Machine Learning, Scikit-Learn and
Tensorflow (see below for links etc). Moreover, the examples we
introduce will serve as inputs to many of our discussions later, as
well as allowing you to set up models and produce your own data and
get started with programming.
## What is Machine Learning?
Statistics, data science and machine learning form important fields of
research in modern science. They describe how to learn and make
predictions from data, as well as allowing us to extract important
correlations about physical process and the underlying laws of motion
in large data sets. The latter, big data sets, appear frequently in
essentially all disciplines, from the traditional Science, Technology,
Mathematics and Engineering fields to Life Science, Law, education
research, the Humanities and the Social Sciences.
It has become more
and more common to see research projects on big data in for example
the Social Sciences where extracting patterns from complicated survey
data is one of many research directions. Having a solid grasp of data
analysis and machine learning is thus becoming central to scientific
computing in many fields, and competences and skills within the fields
of machine learning and scientific computing are nowadays strongly
requested by many potential employers. The latter cannot be
overstated, familiarity with machine learning has almost become a
prerequisite for many of the most exciting employment opportunities,
whether they are in bioinformatics, life science, physics or finance,
in the private or the public sector. This author has had several
students or met students who have been hired recently based on their
skills and competences in scientific computing and data science, often
with marginal knowledge of machine learning.
Machine learning is a subfield of computer science, and is closely
related to computational statistics. It evolved from the study of
pattern recognition in artificial intelligence (AI) research, and has
made contributions to AI tasks like computer vision, natural language
processing and speech recognition. Many of the methods we will study are also
strongly rooted in basic mathematics and physics research.
Ideally, machine learning represents the science of giving computers
the ability to learn without being explicitly programmed. The idea is
that there exist generic algorithms which can be used to find patterns
in a broad class of data sets without having to write code
specifically for each problem. The algorithm will build its own logic
based on the data. You should however always keep in mind that
machines and algorithms are to a large extent developed by humans. The
insights and knowledge we have about a specific system, play a central
role when we develop a specific machine learning algorithm.
Machine learning is an extremely rich field, in spite of its young
age. The increases we have seen during the last three decades in
computational capabilities have been followed by developments of
methods and techniques for analyzing and handling large date sets,
relying heavily on statistics, computer science and mathematics. The
field is rather new and developing rapidly. Popular software packages
written in Python for machine learning like
[Scikit-learn](http://scikit-learn.org/stable/),
[Tensorflow](https://www.tensorflow.org/),
[PyTorch](http://pytorch.org/) and [Keras](https://keras.io/), all
freely available at their respective GitHub sites, encompass
communities of developers in the thousands or more. And the number of
code developers and contributors keeps increasing. Not all the
algorithms and methods can be given a rigorous mathematical
justification, opening up thereby large rooms for experimenting and
trial and error and thereby exciting new developments. However, a
solid command of linear algebra, multivariate theory, probability
theory, statistical data analysis, understanding errors and Monte
Carlo methods are central elements in a proper understanding of many
of algorithms and methods we will discuss.
## Types of Machine Learning
The approaches to machine learning are many, but are often split into
two main categories. In *supervised learning* we know the answer to a
problem, and let the computer deduce the logic behind it. On the other
hand, *unsupervised learning* is a method for finding patterns and
relationship in data sets without any prior knowledge of the system.
Some authours also operate with a third category, namely
*reinforcement learning*. This is a paradigm of learning inspired by
behavioral psychology, where learning is achieved by trial-and-error,
solely from rewards and punishment.
Another way to categorize machine learning tasks is to consider the
desired output of a system. Some of the most common tasks are:
* Classification: Outputs are divided into two or more classes. The goal is to produce a model that assigns inputs into one of these classes. An example is to identify digits based on pictures of hand-written ones. Classification is typically supervised learning.
* Regression: Finding a functional relationship between an input data set and a reference data set. The goal is to construct a function that maps input data to continuous output values.
* Clustering: Data are divided into groups with certain common traits, without knowing the different groups beforehand. It is thus a form of unsupervised learning.
The methods we cover have three main topics in common, irrespective of
whether we deal with supervised or unsupervised learning. The first
ingredient is normally our data set (which can be subdivided into
training and test data), the second item is a model which is normally a
function of some parameters. The model reflects our knowledge of the system (or lack thereof). As an example, if we know that our data show a behavior similar to what would be predicted by a polynomial, fitting our data to a polynomial of some degree would then determin our model.
The last ingredient is a so-called **cost**
function which allows us to present an estimate on how good our model
is in reproducing the data it is supposed to train.
At the heart of basically all ML algorithms there are so-called minimization algorithms, often we end up with various variants of **gradient** methods.
## Software and needed installations
We will make extensive use of Python as programming language and its
myriad of available libraries. You will find
Jupyter notebooks invaluable in your work. You can run **R**
codes in the Jupyter/IPython notebooks, with the immediate benefit of
visualizing your data. You can also use compiled languages like C++,
Rust, Julia, Fortran etc if you prefer. The focus in these lectures will be
on Python.
If you have Python installed (we strongly recommend Python3) and you feel
pretty familiar with installing different packages, we recommend that
you install the following Python packages via **pip** as
1. pip install numpy scipy matplotlib ipython scikit-learn mglearn sympy pandas pillow
For Python3, replace **pip** with **pip3**.
For OSX users we recommend, after having installed Xcode, to
install **brew**. Brew allows for a seamless installation of additional
software via for example
1. brew install python3
For Linux users, with its variety of distributions like for example the widely popular Ubuntu distribution,
you can use **pip** as well and simply install Python as
1. sudo apt-get install python3 (or python for pyhton2.7)
etc etc.
## Python installers
If you don't want to perform these operations separately and venture
into the hassle of exploring how to set up dependencies and paths, we
recommend two widely used distrubutions which set up all relevant
dependencies for Python, namely
* [Anaconda](https://docs.anaconda.com/),
which is an open source
distribution of the Python and R programming languages for large-scale
data processing, predictive analytics, and scientific computing, that
aims to simplify package management and deployment. Package versions
are managed by the package management system **conda**.
* [Enthought canopy](https://www.enthought.com/product/canopy/)
is a Python
distribution for scientific and analytic computing distribution and
analysis environment, available for free and under a commercial
license.
Furthermore, [Google's Colab](https://colab.research.google.com/notebooks/welcome.ipynb) is a free Jupyter notebook environment that requires
no setup and runs entirely in the cloud. Try it out!
## Useful Python libraries
Here we list several useful Python libraries we strongly recommend (if you use anaconda many of these are already there)
* [NumPy](https://www.numpy.org/) is a highly popular library for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
* [The pandas](https://pandas.pydata.org/) library provides high-performance, easy-to-use data structures and data analysis tools
* [Xarray](http://xarray.pydata.org/en/stable/) is a Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun!
* [Scipy](https://www.scipy.org/) (pronounced “Sigh Pie”) is a Python-based ecosystem of open-source software for mathematics, science, and engineering.
* [Matplotlib](https://matplotlib.org/) is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.
* [Autograd](https://github.com/HIPS/autograd) can automatically differentiate native Python and Numpy code. It can handle a large subset of Python's features, including loops, ifs, recursion and closures, and it can even take derivatives of derivatives of derivatives
* [SymPy](https://www.sympy.org/en/index.html) is a Python library for symbolic mathematics.
* [scikit-learn](https://scikit-learn.org/stable/) has simple and efficient tools for machine learning, data mining and data analysis
* [TensorFlow](https://www.tensorflow.org/) is a Python library for fast numerical computing created and released by Google
* [Keras](https://keras.io/) is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano
* And many more such as [pytorch](https://pytorch.org/), [Theano](https://pypi.org/project/Theano/) etc
## Installing R, C++, cython or Julia
You will also find it convenient to utilize **R**. We will mainly
use Python during our lectures and in various projects and exercises.
Those of you
already familiar with **R** should feel free to continue using **R**, keeping
however an eye on the parallel Python set ups. Similarly, if you are a
Python afecionado, feel free to explore **R** as well. Jupyter/Ipython
notebook allows you to run **R** codes interactively in your
browser. The software library **R** is really tailored for statistical data analysis
and allows for an easy usage of the tools and algorithms we will discuss in these
lectures.
To install **R** with Jupyter notebook
[follow the link here](https://mpacer.org/maths/r-kernel-for-ipython-notebook)
## Installing R, C++, cython, Numba etc
For the C++ aficionados, Jupyter/IPython notebook allows you also to
install C++ and run codes written in this language interactively in
the browser. Since we will emphasize writing many of the algorithms
yourself, you can thus opt for either Python or C++ (or Fortran or other compiled languages) as programming
languages.
To add more entropy, **cython** can also be used when running your
notebooks. It means that Python with the jupyter notebook
setup allows you to integrate widely popular softwares and tools for
scientific computing. Similarly, the
[Numba Python package](https://numba.pydata.org/) delivers increased performance
capabilities with minimal rewrites of your codes. With its
versatility, including symbolic operations, Python offers a unique
computational environment. Your jupyter notebook can easily be
converted into a nicely rendered **PDF** file or a Latex file for
further processing. For example, convert to latex as
pycod jupyter nbconvert filename.ipynb --to latex
And to add more versatility, the Python package [SymPy](http://www.sympy.org/en/index.html) is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) and is entirely written in Python.
Finally, if you wish to use the light mark-up language
[doconce](https://github.com/hplgit/doconce) you can convert a standard ascii text file into various HTML
formats, ipython notebooks, latex files, pdf files etc with minimal edits. These lectures were generated using **doconce**.
## Numpy examples and Important Matrix and vector handling packages
There are several central software libraries for linear algebra and eigenvalue problems. Several of the more
popular ones have been wrapped into ofter software packages like those from the widely used text **Numerical Recipes**. The original source codes in many of the available packages are often taken from the widely used
software package LAPACK, which follows two other popular packages
developed in the 1970s, namely EISPACK and LINPACK. We describe them shortly here.
* LINPACK: package for linear equations and least square problems.
* LAPACK:package for solving symmetric, unsymmetric and generalized eigenvalue problems. From LAPACK's website <http://www.netlib.org> it is possible to download for free all source codes from this library. Both C/C++ and Fortran versions are available.
* BLAS (I, II and III): (Basic Linear Algebra Subprograms) are routines that provide standard building blocks for performing basic vector and matrix operations. Blas I is vector operations, II vector-matrix operations and III matrix-matrix operations. Highly parallelized and efficient codes, all available for download from <http://www.netlib.org>.
## Basic Matrix Features
**Matrix properties reminder.**
$$
\mathbf{A} =
\begin{bmatrix} a_{11} & a_{12} & a_{13} & a_{14} \\
a_{21} & a_{22} & a_{23} & a_{24} \\
a_{31} & a_{32} & a_{33} & a_{34} \\
a_{41} & a_{42} & a_{43} & a_{44}
\end{bmatrix}\qquad
\mathbf{I} =
\begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
$$
The inverse of a matrix is defined by
$$
\mathbf{A}^{-1} \cdot \mathbf{A} = I
$$
<table border="1">
<thead>
<tr><th align="center"> Relations </th> <th align="center"> Name </th> <th align="center"> matrix elements </th> </tr>
</thead>
<tbody>
<tr><td align="center"> $A = A^{T}$ </td> <td align="center"> symmetric </td> <td align="center"> $a_{ij} = a_{ji}$ </td> </tr>
<tr><td align="center"> $A = \left (A^{T} \right )^{-1}$ </td> <td align="center"> real orthogonal </td> <td align="center"> $\sum_k a_{ik} a_{jk} = \sum_k a_{ki} a_{kj} = \delta_{ij}$ </td> </tr>
<tr><td align="center"> $A = A^{ * }$ </td> <td align="center"> real matrix </td> <td align="center"> $a_{ij} = a_{ij}^{ * }$ </td> </tr>
<tr><td align="center"> $A = A^{\dagger}$ </td> <td align="center"> hermitian </td> <td align="center"> $a_{ij} = a_{ji}^{ * }$ </td> </tr>
<tr><td align="center"> $A = \left (A^{\dagger} \right )^{-1}$ </td> <td align="center"> unitary </td> <td align="center"> $\sum_k a_{ik} a_{jk}^{ * } = \sum_k a_{ki}^{ * } a_{kj} = \delta_{ij}$ </td> </tr>
</tbody>
</table>
### Some famous Matrices
* Diagonal if $a_{ij}=0$ for $i\ne j$
* Upper triangular if $a_{ij}=0$ for $i > j$
* Lower triangular if $a_{ij}=0$ for $i < j$
* Upper Hessenberg if $a_{ij}=0$ for $i > j+1$
* Lower Hessenberg if $a_{ij}=0$ for $i < j+1$
* Tridiagonal if $a_{ij}=0$ for $|i -j| > 1$
* Lower banded with bandwidth $p$: $a_{ij}=0$ for $i > j+p$
* Upper banded with bandwidth $p$: $a_{ij}=0$ for $i < j+p$
* Banded, block upper triangular, block lower triangular....
### More Basic Matrix Features
**Some Equivalent Statements.**
For an $N\times N$ matrix $\mathbf{A}$ the following properties are all equivalent
* If the inverse of $\mathbf{A}$ exists, $\mathbf{A}$ is nonsingular.
* The equation $\mathbf{Ax}=0$ implies $\mathbf{x}=0$.
* The rows of $\mathbf{A}$ form a basis of $R^N$.
* The columns of $\mathbf{A}$ form a basis of $R^N$.
* $\mathbf{A}$ is a product of elementary matrices.
* $0$ is not eigenvalue of $\mathbf{A}$.
## Numpy and arrays
[Numpy](http://www.numpy.org/) provides an easy way to handle arrays in Python. The standard way to import this library is as
import numpy as np
Here follows a simple example where we set up an array of ten elements, all determined by random numbers drawn according to the normal distribution,
n = 10
x = np.random.normal(size=n)
print(x)
We defined a vector $x$ with $n=10$ elements with its values given by the Normal distribution $N(0,1)$.
Another alternative is to declare a vector as follows
import numpy as np
x = np.array([1, 2, 3])
print(x)
Here we have defined a vector with three elements, with $x_0=1$, $x_1=2$ and $x_2=3$. Note that both Python and C++
start numbering array elements from $0$ and on. This means that a vector with $n$ elements has a sequence of entities $x_0, x_1, x_2, \dots, x_{n-1}$. We could also let (recommended) Numpy to compute the logarithms of a specific array as
import numpy as np
x = np.log(np.array([4, 7, 8]))
print(x)
In the last example we used Numpy's unary function $np.log$. This function is
highly tuned to compute array elements since the code is vectorized
and does not require looping. We normaly recommend that you use the
Numpy intrinsic functions instead of the corresponding **log** function
from Python's **math** module. The looping is done explicitely by the
**np.log** function. The alternative, and slower way to compute the
logarithms of a vector would be to write
import numpy as np
from math import log
x = np.array([4, 7, 8])
for i in range(0, len(x)):
x[i] = log(x[i])
print(x)
We note that our code is much longer already and we need to import the **log** function from the **math** module.
The attentive reader will also notice that the output is $[1, 1, 2]$. Python interprets automagically our numbers as integers (like the **automatic** keyword in C++). To change this we could define our array elements to be double precision numbers as
import numpy as np
x = np.log(np.array([4, 7, 8], dtype = np.float64))
print(x)
or simply write them as double precision numbers (Python uses 64 bits as default for floating point type variables), that is
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x)
To check the number of bytes (remember that one byte contains eight bits for double precision variables), you can use simple use the **itemsize** functionality (the array $x$ is actually an object which inherits the functionalities defined in Numpy) as
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x.itemsize)
## Matrices in Python
Having defined vectors, we are now ready to try out matrices. We can
define a $3 \times 3 $ real matrix $\hat{A}$ as (recall that we user
lowercase letters for vectors and uppercase letters for matrices)
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
print(A)
If we use the **shape** function we would get $(3, 3)$ as output, that is verifying that our matrix is a $3\times 3$ matrix. We can slice the matrix and print for example the first column (Python organized matrix elements in a row-major order, see below) as
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[:,0])
We can continue this was by printing out other columns or rows. The example here prints out the second column
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[1,:])
Numpy contains many other functionalities that allow us to slice, subdivide etc etc arrays. We strongly recommend that you look up the [Numpy website for more details](http://www.numpy.org/). Useful functions when defining a matrix are the **np.zeros** function which declares a matrix of a given dimension and sets all elements to zero
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to zero
A = np.zeros( (n, n) )
print(A)
or initializing all elements to
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to one
A = np.ones( (n, n) )
print(A)
or as unitarily distributed random numbers (see the material on random number generators in the statistics part)
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to random numbers with x \in [0, 1]
A = np.random.rand(n, n)
print(A)
As we will see throughout these lectures, there are several extremely useful functionalities in Numpy.
As an example, consider the discussion of the covariance matrix. Suppose we have defined three vectors
$\hat{x}, \hat{y}, \hat{z}$ with $n$ elements each. The covariance matrix is defined as
$$
\hat{\Sigma} = \begin{bmatrix} \sigma_{xx} & \sigma_{xy} & \sigma_{xz} \\
\sigma_{yx} & \sigma_{yy} & \sigma_{yz} \\
\sigma_{zx} & \sigma_{zy} & \sigma_{zz}
\end{bmatrix},
$$
where for example
$$
\sigma_{xy} =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}).
$$
The Numpy function **np.cov** calculates the covariance elements using the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have the exact mean values.
The following simple function uses the **np.vstack** function which takes each vector of dimension $1\times n$ and produces a $3\times n$ matrix $\hat{W}$
$$
\hat{W} = \begin{bmatrix} x_0 & y_0 & z_0 \\
x_1 & y_1 & z_1 \\
x_2 & y_2 & z_2 \\
\dots & \dots & \dots \\
x_{n-2} & y_{n-2} & z_{n-2} \\
x_{n-1} & y_{n-1} & z_{n-1}
\end{bmatrix},
$$
which in turn is converted into into the $3\times 3$ covariance matrix
$\hat{\Sigma}$ via the Numpy function **np.cov()**. We note that we can also calculate
the mean value of each set of samples $\hat{x}$ etc using the Numpy
function **np.mean(x)**. We can also extract the eigenvalues of the
covariance matrix through the **np.linalg.eig()** function.
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
z = x**3+np.random.normal(size=n)
print(np.mean(z))
W = np.vstack((x, y, z))
Sigma = np.cov(W)
print(Sigma)
Eigvals, Eigvecs = np.linalg.eig(Sigma)
print(Eigvals)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
eye = np.eye(4)
print(eye)
sparse_mtx = sparse.csr_matrix(eye)
print(sparse_mtx)
x = np.linspace(-10,10,100)
y = np.sin(x)
plt.plot(x,y,marker='x')
plt.show()
## Meet the Pandas
<!-- dom:FIGURE: [fig/pandas.jpg, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<img src="fig/pandas.jpg" width=600>
<!-- end figure -->
Another useful Python package is
[pandas](https://pandas.pydata.org/), which is an open source library
providing high-performance, easy-to-use data structures and data
analysis tools for Python. **pandas** stands for panel data, a term borrowed from econometrics and is an efficient library for data analysis with an emphasis on tabular data.
**pandas** has two major classes, the **DataFrame** class with two-dimensional data objects and tabular data organized in columns and the class **Series** with a focus on one-dimensional data objects. Both classes allow you to index data easily as we will see in the examples below.
**pandas** allows you also to perform mathematical operations on the data, spanning from simple reshapings of vectors and matrices to statistical operations.
The following simple example shows how we can, in an easy way make tables of our data. Here we define a data set which includes names, place of birth and date of birth, and displays the data in an easy to read way. We will see repeated use of **pandas**, in particular in connection with classification of data.
import pandas as pd
from IPython.display import display
data = {'First Name': ["Frodo", "Bilbo", "Aragorn II", "Samwise"],
'Last Name': ["Baggins", "Baggins","Elessar","Gamgee"],
'Place of birth': ["Shire", "Shire", "Eriador", "Shire"],
'Date of Birth T.A.': [2968, 2890, 2931, 2980]
}
data_pandas = pd.DataFrame(data)
display(data_pandas)
In the above we have imported **pandas** with the shorthand **pd**, the latter has become the standard way we import **pandas**. We make then a list of various variables
and reorganize the aboves lists into a **DataFrame** and then print out a neat table with specific column labels as *Name*, *place of birth* and *date of birth*.
Displaying these results, we see that the indices are given by the default numbers from zero to three.
**pandas** is extremely flexible and we can easily change the above indices by defining a new type of indexing as
data_pandas = pd.DataFrame(data,index=['Frodo','Bilbo','Aragorn','Sam'])
display(data_pandas)
Thereafter we display the content of the row which begins with the index **Aragorn**
display(data_pandas.loc['Aragorn'])
We can easily append data to this, for example
new_hobbit = {'First Name': ["Peregrin"],
'Last Name': ["Took"],
'Place of birth': ["Shire"],
'Date of Birth T.A.': [2990]
}
data_pandas=data_pandas.append(pd.DataFrame(new_hobbit, index=['Pippin']))
display(data_pandas)
Here are other examples where we use the **DataFrame** functionality to handle arrays, now with more interesting features for us, namely numbers. We set up a matrix
of dimensionality $10\times 5$ and compute the mean value and standard deviation of each column. Similarly, we can perform mathematial operations like squaring the matrix elements and many other operations.
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 matrix
rows = 10
cols = 5
a = np.random.randn(rows,cols)
df = pd.DataFrame(a)
display(df)
print(df.mean())
print(df.std())
display(df**2)
Thereafter we can select specific columns only and plot final results
df.columns = ['First', 'Second', 'Third', 'Fourth', 'Fifth']
df.index = np.arange(10)
display(df)
print(df['Second'].mean() )
print(df.info())
print(df.describe())
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
df.cumsum().plot(lw=2.0, figsize=(10,6))
plt.show()
df.plot.bar(figsize=(10,6), rot=15)
plt.show()
We can produce a $4\times 4$ matrix
b = np.arange(16).reshape((4,4))
print(b)
df1 = pd.DataFrame(b)
print(df1)
and many other operations.
The **Series** class is another important class included in
**pandas**. You can view it as a specialization of **DataFrame** but where
we have just a single column of data. It shares many of the same features as _DataFrame. As with **DataFrame**,
most operations are vectorized, achieving thereby a high performance when dealing with computations of arrays, in particular labeled arrays.
As we will see below it leads also to a very concice code close to the mathematical operations we may be interested in.
For multidimensional arrays, we recommend strongly [xarray](http://xarray.pydata.org/en/stable/). **xarray** has much of the same flexibility as **pandas**, but allows for the extension to higher dimensions than two. We will see examples later of the usage of both **pandas** and **xarray**.
## Reading Data and fitting
In order to study various Machine Learning algorithms, we need to
access data. Acccessing data is an essential step in all machine
learning algorithms. In particular, setting up the so-called **design
matrix** (to be defined below) is often the first element we need in
order to perform our calculations. To set up the design matrix means
reading (and later, when the calculations are done, writing) data
in various formats, The formats span from reading files from disk,
loading data from databases and interacting with online sources
like web application programming interfaces (APIs).
In handling various input formats, as discussed above, we will mainly stay with **pandas**,
a Python package which allows us, in a seamless and painless way, to
deal with a multitude of formats, from standard **csv** (comma separated
values) files, via **excel**, **html** to **hdf5** formats. With **pandas**
and the **DataFrame** and **Series** functionalities we are able to convert text data
into the calculational formats we need for a specific algorithm. And our code is going to be
pretty close the basic mathematical expressions.
Our first data set is going to be a classic from nuclear physics, namely all
available data on binding energies. Don't be intimidated if you are not familiar with nuclear physics. It serves simply as an example here of a data set.
We will show some of the
strengths of packages like **Scikit-Learn** in fitting nuclear binding energies to
specific functions using linear regression first. Then, as a teaser, we will show you how
you can easily implement other algorithms like decision trees and random forests and neural networks.
But before we really start with nuclear physics data, let's just look at some simpler polynomial fitting cases, such as,
(don't be offended) fitting straight lines!
### Simple linear regression model using **scikit-learn**
We start with perhaps our simplest possible example, using **Scikit-Learn** to perform linear regression analysis on a data set produced by us.
What follows is a simple Python code where we have defined a function
$y$ in terms of the variable $x$. Both are defined as vectors with $100$ entries.
The numbers in the vector $\hat{x}$ are given
by random numbers generated with a uniform distribution with entries
$x_i \in [0,1]$ (more about probability distribution functions
later). These values are then used to define a function $y(x)$
(tabulated again as a vector) with a linear dependence on $x$ plus a
random noise added via the normal distribution.
The Numpy functions are imported used the **import numpy as np**
statement and the random number generator for the uniform distribution
is called using the function **np.random.rand()**, where we specificy
that we want $100$ random variables. Using Numpy we define
automatically an array with the specified number of elements, $100$ in
our case. With the Numpy function **randn()** we can compute random
numbers with the normal distribution (mean value $\mu$ equal to zero and
variance $\sigma^2$ set to one) and produce the values of $y$ assuming a linear
dependence as function of $x$
$$
y = 2x+N(0,1),
$$
where $N(0,1)$ represents random numbers generated by the normal
distribution. From **Scikit-Learn** we import then the
**LinearRegression** functionality and make a prediction $\tilde{y} =
\alpha + \beta x$ using the function **fit(x,y)**. We call the set of
data $(\hat{x},\hat{y})$ for our training data. The Python package
**scikit-learn** has also a functionality which extracts the above
fitting parameters $\alpha$ and $\beta$ (see below). Later we will
distinguish between training data and test data.
For plotting we use the Python package
[matplotlib](https://matplotlib.org/) which produces publication
quality figures. Feel free to explore the extensive
[gallery](https://matplotlib.org/gallery/index.html) of examples. In
this example we plot our original values of $x$ and $y$ as well as the
prediction **ypredict** ($\tilde{y}$), which attempts at fitting our
data with a straight line.
The Python code follows here.
# Importing various packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
x = np.random.rand(100,1)
y = 2*x+np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
xnew = np.array([[0],[1]])
ypredict = linreg.predict(xnew)
plt.plot(xnew, ypredict, "r-")
plt.plot(x, y ,'ro')
plt.axis([0,1.0,0, 5.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Simple Linear Regression')
plt.show()
This example serves several aims. It allows us to demonstrate several
aspects of data analysis and later machine learning algorithms. The
immediate visualization shows that our linear fit is not
impressive. It goes through the data points, but there are many
outliers which are not reproduced by our linear regression. We could
now play around with this small program and change for example the
factor in front of $x$ and the normal distribution. Try to change the
function $y$ to
$$
y = 10x+0.01 \times N(0,1),
$$
where $x$ is defined as before. Does the fit look better? Indeed, by
reducing the role of the noise given by the normal distribution we see immediately that
our linear prediction seemingly reproduces better the training
set. However, this testing 'by the eye' is obviouly not satisfactory in the
long run. Here we have only defined the training data and our model, and
have not discussed a more rigorous approach to the **cost** function.
We need more rigorous criteria in defining whether we have succeeded or
not in modeling our training data. You will be surprised to see that
many scientists seldomly venture beyond this 'by the eye' approach. A
standard approach for the *cost* function is the so-called $\chi^2$
function (a variant of the mean-squared error (MSE))
$$
\chi^2 = \frac{1}{n}
\sum_{i=0}^{n-1}\frac{(y_i-\tilde{y}_i)^2}{\sigma_i^2},
$$
where $\sigma_i^2$ is the variance (to be defined later) of the entry
$y_i$. We may not know the explicit value of $\sigma_i^2$, it serves
however the aim of scaling the equations and make the cost function
dimensionless.
Minimizing the cost function is a central aspect of
our discussions to come. Finding its minima as function of the model
parameters ($\alpha$ and $\beta$ in our case) will be a recurring
theme in these series of lectures. Essentially all machine learning
algorithms we will discuss center around the minimization of the
chosen cost function. This depends in turn on our specific
model for describing the data, a typical situation in supervised
learning. Automatizing the search for the minima of the cost function is a
central ingredient in all algorithms. Typical methods which are
employed are various variants of **gradient** methods. These will be
discussed in more detail later. Again, you'll be surprised to hear that
many practitioners minimize the above function ''by the eye', popularly dubbed as
'chi by the eye'. That is, change a parameter and see (visually and numerically) that
the $\chi^2$ function becomes smaller.
There are many ways to define the cost function. A simpler approach is to look at the relative difference between the training data and the predicted data, that is we define
the relative error (why would we prefer the MSE instead of the relative error?) as
$$
\epsilon_{\mathrm{relative}}= \frac{\vert \hat{y} -\hat{\tilde{y}}\vert}{\vert \hat{y}\vert}.
$$
The squared cost function results in an arithmetic mean-unbiased
estimator, and the absolute-value cost function results in a
median-unbiased estimator (in the one-dimensional case, and a
geometric median-unbiased estimator for the multi-dimensional
case). The squared cost function has the disadvantage that it has the tendency
to be dominated by outliers.
We can modify easily the above Python code and plot the relative error instead
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
x = np.random.rand(100,1)
y = 5*x+0.01*np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
ypredict = linreg.predict(x)
plt.plot(x, np.abs(ypredict-y)/abs(y), "ro")
plt.axis([0,1.0,0.0, 0.5])
plt.xlabel(r'$x$')
plt.ylabel(r'$\epsilon_{\mathrm{relative}}$')
plt.title(r'Relative error')
plt.show()
Depending on the parameter in front of the normal distribution, we may
have a small or larger relative error. Try to play around with
different training data sets and study (graphically) the value of the
relative error.
As mentioned above, **Scikit-Learn** has an impressive functionality.
We can for example extract the values of $\alpha$ and $\beta$ and
their error estimates, or the variance and standard deviation and many
other properties from the statistical data analysis.
Here we show an
example of the functionality of **Scikit-Learn**.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error
x = np.random.rand(100,1)
y = 2.0+ 5*x+0.5*np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
ypredict = linreg.predict(x)
print('The intercept alpha: \n', linreg.intercept_)
print('Coefficient beta : \n', linreg.coef_)
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(y, ypredict))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y, ypredict))
# Mean squared log error
print('Mean squared log error: %.2f' % mean_squared_log_error(y, ypredict) )
# Mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(y, ypredict))
plt.plot(x, ypredict, "r-")
plt.plot(x, y ,'ro')
plt.axis([0.0,1.0,1.5, 7.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Linear Regression fit ')
plt.show()
The function **coef** gives us the parameter $\beta$ of our fit while **intercept** yields
$\alpha$. Depending on the constant in front of the normal distribution, we get values near or far from $alpha =2$ and $\beta =5$. Try to play around with different parameters in front of the normal distribution. The function **meansquarederror** gives us the mean square error, a risk metric corresponding to the expected value of the squared (quadratic) error or loss defined as
$$
MSE(\hat{y},\hat{\tilde{y}}) = \frac{1}{n}
\sum_{i=0}^{n-1}(y_i-\tilde{y}_i)^2,
$$
The smaller the value, the better the fit. Ideally we would like to
have an MSE equal zero. The attentive reader has probably recognized
this function as being similar to the $\chi^2$ function defined above.
The **r2score** function computes $R^2$, the coefficient of
determination. It provides a measure of how well future samples are
likely to be predicted by the model. Best possible score is 1.0 and it
can be negative (because the model can be arbitrarily worse). A
constant model that always predicts the expected value of $\hat{y}$,
disregarding the input features, would get a $R^2$ score of $0.0$.
If $\tilde{\hat{y}}_i$ is the predicted value of the $i-th$ sample and $y_i$ is the corresponding true value, then the score $R^2$ is defined as
$$
R^2(\hat{y}, \tilde{\hat{y}}) = 1 - \frac{\sum_{i=0}^{n - 1} (y_i - \tilde{y}_i)^2}{\sum_{i=0}^{n - 1} (y_i - \bar{y})^2},
$$
where we have defined the mean value of $\hat{y}$ as
$$
\bar{y} = \frac{1}{n} \sum_{i=0}^{n - 1} y_i.
$$
Another quantity taht we will meet again in our discussions of regression analysis is
the mean absolute error (MAE), a risk metric corresponding to the expected value of the absolute error loss or what we call the $l1$-norm loss. In our discussion above we presented the relative error.
The MAE is defined as follows
$$
\text{MAE}(\hat{y}, \hat{\tilde{y}}) = \frac{1}{n} \sum_{i=0}^{n-1} \left| y_i - \tilde{y}_i \right|.
$$
We present the
squared logarithmic (quadratic) error
$$
\text{MSLE}(\hat{y}, \hat{\tilde{y}}) = \frac{1}{n} \sum_{i=0}^{n - 1} (\log_e (1 + y_i) - \log_e (1 + \tilde{y}_i) )^2,
$$
where $\log_e (x)$ stands for the natural logarithm of $x$. This error
estimate is best to use when targets having exponential growth, such
as population counts, average sales of a commodity over a span of
years etc.
Finally, another cost function is the Huber cost function used in robust regression.
The rationale behind this possible cost function is its reduced
sensitivity to outliers in the data set. In our discussions on
dimensionality reduction and normalization of data we will meet other
ways of dealing with outliers.
The Huber cost function is defined as
$$
H_{\delta}(a)={\begin{cases}{\frac {1}{2}}{a^{2}}&{\text{for }}|a|\leq \delta ,\\\delta (|a|-{\frac {1}{2}}\delta ),&{\text{otherwise.}}\end{cases}}}.
$$
Here $a=\boldsymbol{y} - \boldsymbol{\tilde{y}}$.
We will discuss in more
detail these and other functions in the various lectures. We conclude this part with another example. Instead of
a linear $x$-dependence we study now a cubic polynomial and use the polynomial regression analysis tools of scikit-learn.
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
x=np.linspace(0.02,0.98,200)
noise = np.asarray(random.sample((range(200)),200))
y=x**3*noise
yn=x**3*100
poly3 = PolynomialFeatures(degree=3)
X = poly3.fit_transform(x[:,np.newaxis])
clf3 = LinearRegression()
clf3.fit(X,y)
Xplot=poly3.fit_transform(x[:,np.newaxis])
poly3_plot=plt.plot(x, clf3.predict(Xplot), label='Cubic Fit')
plt.plot(x,yn, color='red', label="True Cubic")
plt.scatter(x, y, label='Data', color='orange', s=15)
plt.legend()
plt.show()
def error(a):
for i in y:
err=(y-yn)/yn
return abs(np.sum(err))/len(err)
print (error(y))
### To our real data: nuclear binding energies. Brief reminder on masses and binding energies
Let us now dive into nuclear physics and remind ourselves briefly about some basic features about binding
energies. A basic quantity which can be measured for the ground
states of nuclei is the atomic mass $M(N, Z)$ of the neutral atom with
atomic mass number $A$ and charge $Z$. The number of neutrons is $N$. There are indeed several sophisticated experiments worldwide which allow us to measure this quantity to high precision (parts per million even).
Atomic masses are usually tabulated in terms of the mass excess defined by
$$
\Delta M(N, Z) = M(N, Z) - uA,
$$
where $u$ is the Atomic Mass Unit
$$
u = M(^{12}\mathrm{C})/12 = 931.4940954(57) \hspace{0.1cm} \mathrm{MeV}/c^2.
$$
The nucleon masses are
$$
m_p = 1.00727646693(9)u,
$$
and
$$
m_n = 939.56536(8)\hspace{0.1cm} \mathrm{MeV}/c^2 = 1.0086649156(6)u.
$$
In the [2016 mass evaluation of by W.J.Huang, G.Audi, M.Wang, F.G.Kondev, S.Naimi and X.Xu](http://nuclearmasses.org/resources_folder/Wang_2017_Chinese_Phys_C_41_030003.pdf)
there are data on masses and decays of 3437 nuclei.
The nuclear binding energy is defined as the energy required to break
up a given nucleus into its constituent parts of $N$ neutrons and $Z$
protons. In terms of the atomic masses $M(N, Z)$ the binding energy is
defined by
$$
BE(N, Z) = ZM_H c^2 + Nm_n c^2 - M(N, Z)c^2 ,
$$
where $M_H$ is the mass of the hydrogen atom and $m_n$ is the mass of the neutron.
In terms of the mass excess the binding energy is given by
$$
BE(N, Z) = Z\Delta_H c^2 + N\Delta_n c^2 -\Delta(N, Z)c^2 ,
$$
where $\Delta_H c^2 = 7.2890$ MeV and $\Delta_n c^2 = 8.0713$ MeV.
A popular and physically intuitive model which can be used to parametrize
the experimental binding energies as function of $A$, is the so-called
**liquid drop model**. The ansatz is based on the following expression
$$
BE(N,Z) = a_1A-a_2A^{2/3}-a_3\frac{Z^2}{A^{1/3}}-a_4\frac{(N-Z)^2}{A},
$$
where $A$ stands for the number of nucleons and the $a_i$s are parameters which are determined by a fit
to the experimental data.
To arrive at the above expression we have assumed that we can make the following assumptions:
* There is a volume term $a_1A$ proportional with the number of nucleons (the energy is also an extensive quantity). When an assembly of nucleons of the same size is packed together into the smallest volume, each interior nucleon has a certain number of other nucleons in contact with it. This contribution is proportional to the volume.
* There is a surface energy term $a_2A^{2/3}$. The assumption here is that a nucleon at the surface of a nucleus interacts with fewer other nucleons than one in the interior of the nucleus and hence its binding energy is less. This surface energy term takes that into account and is therefore negative and is proportional to the surface area.
* There is a Coulomb energy term $a_3\frac{Z^2}{A^{1/3}}$. The electric repulsion between each pair of protons in a nucleus yields less binding.
* There is an asymmetry term $a_4\frac{(N-Z)^2}{A}$. This term is associated with the Pauli exclusion principle and reflects the fact that the proton-neutron interaction is more attractive on the average than the neutron-neutron and proton-proton interactions.
We could also add a so-called pairing term, which is a correction term that
arises from the tendency of proton pairs and neutron pairs to
occur. An even number of particles is more stable than an odd number.
### Organizing our data
Let us start with reading and organizing our data.
We start with the compilation of masses and binding energies from 2016.
After having downloaded this file to our own computer, we are now ready to read the file and start structuring our data.
We start with preparing folders for storing our calculations and the data file over masses and binding energies. We import also various modules that we will find useful in order to present various Machine Learning methods. Here we focus mainly on the functionality of **scikit-learn**.
# Common imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("MassEval2016.dat"),'r')
Before we proceed, we define also a function for making our plots. You can obviously avoid this and simply set up various **matplotlib** commands every time you need them. You may however find it convenient to collect all such commands in one function and simply call this function.
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
def MakePlot(x,y, styles, labels, axlabels):
plt.figure(figsize=(10,6))
for i in range(len(x)):
plt.plot(x[i], y[i], styles[i], label = labels[i])
plt.xlabel(axlabels[0])
plt.ylabel(axlabels[1])
plt.legend(loc=0)
Our next step is to read the data on experimental binding energies and
reorganize them as functions of the mass number $A$, the number of
protons $Z$ and neutrons $N$ using **pandas**. Before we do this it is
always useful (unless you have a binary file or other types of compressed
data) to actually open the file and simply take a look at it!
In particular, the program that outputs the final nuclear masses is written in Fortran with a specific format. It means that we need to figure out the format and which columns contain the data we are interested in. Pandas comes with a function that reads formatted output. After having admired the file, we are now ready to start massaging it with **pandas**. The file begins with some basic format information.
"""
This is taken from the data file of the mass 2016 evaluation.
All files are 3436 lines long with 124 character per line.
Headers are 39 lines long.
col 1 : Fortran character control: 1 = page feed 0 = line feed
format : a1,i3,i5,i5,i5,1x,a3,a4,1x,f13.5,f11.5,f11.3,f9.3,1x,a2,f11.3,f9.3,1x,i3,1x,f12.5,f11.5
These formats are reflected in the pandas widths variable below, see the statement
widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1),
Pandas has also a variable header, with length 39 in this case.
"""
The data we are interested in are in columns 2, 3, 4 and 11, giving us
the number of neutrons, protons, mass numbers and binding energies,
respectively. We add also for the sake of completeness the element name. The data are in fixed-width formatted lines and we will
covert them into the **pandas** DataFrame structure.
# Read the experimental data with Pandas
Masses = pd.read_fwf(infile, usecols=(2,3,4,6,11),
names=('N', 'Z', 'A', 'Element', 'Ebinding'),
widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1),
header=39,
index_col=False)
# Extrapolated values are indicated by '#' in place of the decimal place, so
# the Ebinding column won't be numeric. Coerce to float and drop these entries.
Masses['Ebinding'] = pd.to_numeric(Masses['Ebinding'], errors='coerce')
Masses = Masses.dropna()
# Convert from keV to MeV.
Masses['Ebinding'] /= 1000
# Group the DataFrame by nucleon number, A.
Masses = Masses.groupby('A')
# Find the rows of the grouped DataFrame with the maximum binding energy.
Masses = Masses.apply(lambda t: t[t.Ebinding==t.Ebinding.max()])
We have now read in the data, grouped them according to the variables we are interested in.
We see how easy it is to reorganize the data using **pandas**. If we
were to do these operations in C/C++ or Fortran, we would have had to
write various functions/subroutines which perform the above
reorganizations for us. Having reorganized the data, we can now start
to make some simple fits using both the functionalities in **numpy** and
**Scikit-Learn** afterwards.
Now we define five variables which contain
the number of nucleons $A$, the number of protons $Z$ and the number of neutrons $N$, the element name and finally the energies themselves.
A = Masses['A']
Z = Masses['Z']
N = Masses['N']
Element = Masses['Element']
Energies = Masses['Ebinding']
print(Masses)
The next step, and we will define this mathematically later, is to set up the so-called **design matrix**. We will throughout call this matrix $\boldsymbol{X}$.
It has dimensionality $p\times n$, where $n$ is the number of data points and $p$ are the so-called predictors. In our case here they are given by the number of polynomials in $A$ we wish to include in the fit.
# Now we set up the design matrix X
X = np.zeros((len(A),5))
X[:,0] = 1
X[:,1] = A
X[:,2] = A**(2.0/3.0)
X[:,3] = A**(-1.0/3.0)
X[:,4] = A**(-1.0)
With **scikitlearn** we are now ready to use linear regression and fit our data.
clf = skl.LinearRegression().fit(X, Energies)
fity = clf.predict(X)
Pretty simple!
Now we can print measures of how our fit is doing, the coefficients from the fits and plot the final fit together with our data.
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(Energies, fity))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(Energies, fity))
# Mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(Energies, fity))
print(clf.coef_, clf.intercept_)
Masses['Eapprox'] = fity
# Generate a plot comparing the experimental with the fitted values values.
fig, ax = plt.subplots()
ax.set_xlabel(r'$A = N + Z$')
ax.set_ylabel(r'$E_\mathrm{bind}\,/\mathrm{MeV}$')
ax.plot(Masses['A'], Masses['Ebinding'], alpha=0.7, lw=2,
label='Ame2016')
ax.plot(Masses['A'], Masses['Eapprox'], alpha=0.7, lw=2, c='m',
label='Fit')
ax.legend()
save_fig("Masses2016")
plt.show()
### Seeing the wood for the trees
As a teaser, let us now see how we can do this with decision trees using **scikit-learn**. Later we will switch to so-called **random forests**!
#Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regr_1=DecisionTreeRegressor(max_depth=5)
regr_2=DecisionTreeRegressor(max_depth=7)
regr_3=DecisionTreeRegressor(max_depth=9)
regr_1.fit(X, Energies)
regr_2.fit(X, Energies)
regr_3.fit(X, Energies)
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
y_3=regr_3.predict(X)
Masses['Eapprox'] = y_3
# Plot the results
plt.figure()
plt.plot(A, Energies, color="blue", label="Data", linewidth=2)
plt.plot(A, y_1, color="red", label="max_depth=5", linewidth=2)
plt.plot(A, y_2, color="green", label="max_depth=7", linewidth=2)
plt.plot(A, y_3, color="m", label="max_depth=9", linewidth=2)
plt.xlabel("$A$")
plt.ylabel("$E$[MeV]")
plt.title("Decision Tree Regression")
plt.legend()
save_fig("Masses2016Trees")
plt.show()
print(Masses)
print(np.mean( (Energies-y_1)**2))
### And what about using neural networks?
The **seaborn** package allows us to visualize data in an efficient way. Note that we use **scikit-learn**'s multi-layer perceptron (or feed forward neural network)
functionality.
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import accuracy_score
import seaborn as sns
X_train = X
Y_train = Energies
n_hidden_neurons = 100
epochs = 100
# store models for later use
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
# store the models for later use
DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
sns.set()
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = MLPRegressor(hidden_layer_sizes=(n_hidden_neurons), activation='logistic',
alpha=lmbd, learning_rate_init=eta, max_iter=epochs)
dnn.fit(X_train, Y_train)
DNN_scikit[i][j] = dnn
train_accuracy[i][j] = dnn.score(X_train, Y_train)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
## A first summary
The aim behind these introductory words was to present to you various
Python libraries and their functionalities, in particular libraries like
**numpy**, **pandas**, **xarray** and **matplotlib** and other that make our life much easier
in handling various data sets and visualizing data.
Furthermore,
**Scikit-Learn** allows us with few lines of code to implement popular
Machine Learning algorithms for supervised learning. Later we will meet **Tensorflow**, a powerful library for deep learning.
Now it is time to dive more into the details of various methods. We will start with linear regression and try to take a deeper look at what it entails. | [
"[email protected]"
] | |
2fba0ba36775b47980f0366230afbcb1da18fd79 | 749efa5eb1ea53a5372b31832ed74d85dc15f641 | /temporal_graph/structure_network/structure_graph.py | 75ee3f2cae8ea41f57d59fa4cfb7acf6a2211fdc | [] | no_license | summukhe/TemporalGraph | ac8f7be103620b5d53aeae6e6f462d2f74cd5f8c | 722fedaf737950ac32d4dabd830afbd241a03a2c | refs/heads/master | 2020-05-05T03:17:05.918359 | 2019-06-01T05:40:49 | 2019-06-01T05:40:49 | 179,666,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,220 | py | import numpy as np
from copy import deepcopy
from temporal_graph.network_analysis import GeometricGraph3d
from temporal_graph.spatial_ds import *
from temporal_graph.pdb_processor import *
from temporal_graph.force_field import *
__version__ = "1.0"
__all__ = ['contact_graph',
'potential_contact_graph',
'contact_energy_graph']
def contact_graph(pdb_structure,
cutoff=12,
potential='charmm',
weight_normalizer=FFNormalizer()):
assert isinstance(pdb_structure, PDBStructure) or \
isinstance(pdb_structure, CaTrace)
if isinstance(cutoff, DistanceCutoff):
cutoff = cutoff.cutoff
assert cutoff > 0
assert potential in ['energy', 'charmm', 'mj']
if potential == 'energy':
if isinstance(pdb_structure, CaTrace):
return potential_contact_graph(pdb_structure,
cutoff=DistanceCutoff(def_cutoff=cutoff),
potential='charmm')
else:
g = contact_energy_graph(pdb_structure,
contact_radius=cutoff,
energy_score=weight_normalizer)
else:
if isinstance(pdb_structure, PDBStructure):
structure = pdb_to_catrace(pdb_structure)
else:
structure = deepcopy(pdb_structure)
g = potential_contact_graph(structure,
cutoff=DistanceCutoff(def_cutoff=cutoff),
potential=potential)
return g
def potential_contact_graph(ca_trace, cutoff=DistanceCutoff(), potential='mj'):
assert isinstance(ca_trace, CaTrace)
assert isinstance(cutoff, DistanceCutoff)
assert potential in ['mj', 'charmm']
res_ids = ca_trace.residue_ids
c_graph = GeometricGraph3d(directed=False)
for r in res_ids:
amino_key = ca_trace.key(r)
amino_crd = Coordinate3d(*ca_trace.xyz(r))
c_graph.add_vertex(amino_key, attribute=amino_crd)
for ri in res_ids:
amino_i = ca_trace.get_amino(ri)
x_i, y_i, z_i = ca_trace.xyz(ri)
for rj in res_ids:
if ri < rj:
amino_j = ca_trace.get_amino(rj)
x_j, y_j, z_j = ca_trace.xyz(rj)
c = cutoff(amino_i, amino_j)
d = np.sqrt((x_i-x_j)**2 + (y_i-y_j)**2 + (z_i-z_j)**2)
if d <= c:
p = get_pair_potential(amino_i, amino_j, d, pot_type=potential)
c_graph.add_edge('%s%d' % (amino_i, ri),
'%s%d' % (amino_j, rj),
weight=p)
return c_graph
def contact_energy_graph(pdb_struct,
contact_radius=12,
epsilon=1.,
elec_only=False,
summed=True,
energy_score=FFNormalizer()):
assert isinstance(pdb_struct, PDBStructure)
assert isinstance(energy_score, FFNormalizer)
ca_trace = pdb_to_catrace(pdb_struct)
residues = ca_trace.residue_ids
x_lst, y_lst, z_lst = [], [], []
for r in residues:
x, y, z = ca_trace.xyz(r)
x_lst.append(x)
y_lst.append(y)
z_lst.append(z)
grid = Grid3D(max_coord=Coordinate3d(np.max(x_lst), np.max(y_lst), np.max(z_lst)),
min_coord=Coordinate3d(np.min(x_lst), np.min(y_lst), np.min(z_lst)),
spacing=2)
for r in residues:
grid.register_obj(r, Coordinate3d(*ca_trace.xyz(r)))
neighbors = dict()
for r1 in residues:
neighbors[r1] = {r2: 0 for r2 in grid.neighbors(r1, contact_radius)}
ff = FFManager()
for r1 in neighbors:
residue_name1 = pdb_struct.residue_name(r1)
atom_names1 = pdb_struct.atom_names(r1)
for r2 in neighbors[r1]:
residue_name2 = pdb_struct.residue_name(r2)
atom_names2 = pdb_struct.atom_names(r2)
for atom1 in atom_names1:
for atom2 in atom_names2:
d = distance(Coordinate3d(*pdb_struct.xyz(r1, atom1)),
Coordinate3d(*pdb_struct.xyz(r2, atom2)))
neighbors[r1][r2] += ff.energy(residue_name1,
atom1,
residue_name2,
atom2,
distance=d,
epsilon=epsilon,
elec_only=elec_only,
summed=summed)
c_graph = GeometricGraph3d(directed=False)
for r in residues:
c_graph.add_vertex(pdb_struct.key(r),
attribute=Coordinate3d(*pdb_struct.xyz(r, 'CA')))
for r1 in neighbors:
for r2 in neighbors[r1]:
c_graph.add_edge(pdb_struct.key(r1),
pdb_struct.key(r2),
weight=energy_score(neighbors[r1][r2]))
return c_graph
| [
"[email protected]"
] | |
58e4e79d5aff5d675c44d475f8eb78c71f373b12 | 20d9130fdc21756c4f8fe255583922352f5c5762 | /src/DIRAC/DataManagementSystem/Service/LcgFileCatalogProxyHandler.py | ba7ea68222b2cc3e09dd20556e2c46165ff46f1b | [] | no_license | bopopescu/bes3-jinr | 095314e43f41f08bd48b248fe3ca627a5c009f58 | fdfd852c92a56192b8ee9970b66f0136e6e0afff | refs/heads/master | 2022-11-26T06:01:36.718508 | 2014-03-17T06:03:50 | 2014-03-17T06:03:50 | 282,113,617 | 0 | 0 | null | 2020-07-24T03:30:10 | 2020-07-24T03:30:09 | null | UTF-8 | Python | false | false | 3,628 | py | ########################################################################
# $HeadURL $
# File: LcgFileCatalogProxyHandler.py
########################################################################
""" :mod: LcgFileCatalogProxyHandler
================================
.. module: LcgFileCatalogProxyHandler
:synopsis: This is a service which represents a DISET proxy to the LCG File Catalog
"""
## imports
import os
from types import StringType, DictType, TupleType
## from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "6bbf469 (2013-01-31 23:24:59 +0100) Andrei Tsaregorodtsev <[email protected]>"
def initializeLcgFileCatalogProxyHandler( _serviceInfo ):
""" service initalisation """
return S_OK()
class LcgFileCatalogProxyHandler( RequestHandler ):
"""
.. class:: LcgFileCatalogProxyHandler
"""
types_callProxyMethod = [ StringType, TupleType, DictType ]
def export_callProxyMethod( self, name, args, kargs ):
""" A generic method to call methods of the Storage Element.
"""
res = pythonCall( 120, self.__proxyWrapper, name, args, kargs )
if res['OK']:
return res['Value']
else:
return res
def __proxyWrapper( self, name, args, kwargs ):
""" The wrapper will obtain the client proxy and set it up in the environment.
The required functionality is then executed and returned to the client.
:param self: self reference
:param str name: fcn name
:param tuple args: fcn args
:param dict kwargs: fcn keyword args
"""
res = self.__prepareSecurityDetails()
if not res['OK']:
return res
try:
fileCatalog = FileCatalog( ['LcgFileCatalogCombined'] )
method = getattr( fileCatalog, name )
except AttributeError, error:
errStr = "LcgFileCatalogProxyHandler.__proxyWrapper: No method named %s" % name
gLogger.exception( errStr, name, error )
return S_ERROR( errStr )
try:
result = method( *args, **kwargs )
return result
except Exception, error:
errStr = "LcgFileCatalogProxyHandler.__proxyWrapper: Exception while performing %s" % name
gLogger.exception( errStr, name, error )
return S_ERROR( errStr )
def __prepareSecurityDetails( self ):
""" Obtains the connection details for the client """
try:
credDict = self.getRemoteCredentials()
clientDN = credDict[ 'DN' ]
clientUsername = credDict['username']
clientGroup = credDict['group']
gLogger.debug( "Getting proxy for %s@%s (%s)" % ( clientUsername, clientGroup, clientDN ) )
res = gProxyManager.downloadVOMSProxy( clientDN, clientGroup )
if not res['OK']:
return res
chain = res['Value']
proxyBase = "/tmp/proxies"
if not os.path.exists( proxyBase ):
os.makedirs( proxyBase )
proxyLocation = "%s/%s-%s" % ( proxyBase, clientUsername, clientGroup )
gLogger.debug( "Obtained proxy chain, dumping to %s." % proxyLocation )
res = gProxyManager.dumpProxyToFile( chain, proxyLocation )
if not res['OK']:
return res
gLogger.debug( "Updating environment." )
os.environ['X509_USER_PROXY'] = res['Value']
return res
except Exception, error:
exStr = "__getConnectionDetails: Failed to get client connection details."
gLogger.exception( exStr, '', error )
return S_ERROR( exStr )
| [
"[email protected]"
] | |
ff091989b08d7360d35edcde8d929dbfa9826630 | 185960fa68ac9ff6377eff50e3afc3900827c264 | /one_model_n_data.py | 29e8ab68e888ca6f56013c0a4b34873b52af055a | [] | no_license | enjoysport2022/Incident_Detection | 0a9158e163ff5c74c7d2bb1ebae6fad8adb53de4 | 87b6848ae3f038e7eab9c0c40c0ea19efa27208c | refs/heads/master | 2023-02-22T11:29:00.385996 | 2015-12-07T03:21:07 | 2015-12-07T03:21:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,730 | py | # -*- coding: UTF-8 -*-
#运行程序需要安装numpy,scipy,sklearn
# 需要导入的库:
import time
import requests
import conf
start=time.clock()
import csv
from sklearn.svm import SVC
import numpy as np
from sklearn import preprocessing,neighbors
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
####################################################################################
# 特征值读取
# featureList
allElectronicsData=open(r'F:\TrafficFlow\pems\7606\7606bu.csv','rb')#****bu.csv特征值文件
reader=csv.reader(allElectronicsData)
headers=reader.next()
temp1=[]
# temp2=[]
for row in reader:
c1=[]
c2=[]
for i in range(1,9):#读取第2列到第9列
b=0
b=float(row[i])-float(row[i+8])
c1.append(b)
temp1.append(c1)
# for i in range(9,17):#读取第10列到第16列
# b=0
# b=float(row[i])
# c2.append(b)
# temp2.append(c2)
# print("temp1:")
# print(temp1)
# print("temp2:")
# print(temp2)
n=len(temp1)
# print(n)
featureList1=[]
for i in range(0,n-4):
k1=[]
k1=temp1[i]
k2=temp1[i+1]
k3=temp1[i+2]
k4=temp1[i+3]
k5=temp1[i+4]
k1.extend(k2)
k1.extend(k3)
k1.extend(k4)
k1.extend(k5)
featureList1.append(k1)
featureList1=np.array(featureList1)
# print("featureList1:")
# temp1=np.array(temp1)
# print(featureList1.shape)
# print(len(featureList))
# print("temp2:")
# temp2=np.array(temp2)
# print(temp2)
# featureList=temp1-temp2
# print(featureList.shape)
# f=open('featurelist.txt','w')
# f.write(str(featureList))
# f.close()
####################################################################################
# 特征值读取
# featureList
allElectronicsData=open(r'F:\TrafficFlow\pems\6080\6080bu.csv','rb')#****bu.csv特征值文件
reader=csv.reader(allElectronicsData)
headers=reader.next()
temp1=[]
# temp2=[]
for row in reader:
c1=[]
c2=[]
for i in range(1,9):#读取第2列到第9列
b=0
b=float(row[i])-float(row[i+8])
c1.append(b)
temp1.append(c1)
# for i in range(9,17):#读取第10列到第16列
# b=0
# b=float(row[i])
# c2.append(b)
# temp2.append(c2)
# print("temp1:")
# print(temp1)
# print("temp2:")
# print(temp2)
n=len(temp1)
# print(n)
featureList2=[]
for i in range(0,n-4):
k1=[]
k1=temp1[i]
k2=temp1[i+1]
k3=temp1[i+2]
k4=temp1[i+3]
k5=temp1[i+4]
k1.extend(k2)
k1.extend(k3)
k1.extend(k4)
k1.extend(k5)
featureList2.append(k1)
featureList2=np.array(featureList2)
# print("featureList2:")
# temp1=np.array(temp1)
# print(featureList2.shape)
# print(len(featureList))
# print("temp2:")
# temp2=np.array(temp2)
# print(temp2)
# featureList=temp1-temp2
# print(featureList.shape)
# f=open('featurelist.txt','w')
# f.write(str(featureList))
# f.close()
featureList=np.vstack((featureList1,featureList2))
# print(featureList.shape)
##########################################################################################
# 标签读取
# labelList
incidentData=open(r'F:\TrafficFlow\pems\7606\7606label.csv','rb')#****label.csv标签文件
label=csv.reader(incidentData)
headers=label.next()
# print(headers)
labelList1=[]
for row in label:
labelList1.append(row[len(row)-1])
# print(labelList)
lb=preprocessing.LabelBinarizer()
dummyY1=lb.fit_transform(labelList1)
# dummyY=np.array(dummyY)
# print(dummyY)
# print(len(dummyY))
# print("dummyY:"+str(dummyY))
##########################################################################################
# 标签读取
# labelList
incidentData=open(r'F:\TrafficFlow\pems\6080\6080label.csv','rb')#****label.csv标签文件
label=csv.reader(incidentData)
headers=label.next()
# print(headers)
labelList2=[]
for row in label:
labelList2.append(row[len(row)-1])
# print(labelList)
lb=preprocessing.LabelBinarizer()
dummyY2=lb.fit_transform(labelList2)
# dummyY=np.array(dummyY)
# print(dummyY)
# print(len(dummyY))
# print("dummyY:"+str(dummyY))
dummyY=np.vstack((dummyY1,dummyY2))
# print(dummyY.shape)
# 将数据拆分成训练样本和测试样本:
X_train, X_test, y_train, y_test = train_test_split(featureList, dummyY, test_size=0.1)
print("Fitting the classifier to the training set---->")
#支持向量机模型:
# clf=SVC(kernel='rbf',C=1e3,gamma=0.001)
#kernel、C、gamma可调
#最近邻算法:
n_neighbors = 15
#n_neighbors可调
weights='uniform'
# weights='distance'
clf = neighbors.KNeighborsClassifier(n_neighbors,weights=weights)
#随机森林分类器:
# clf = RandomForestClassifier(n_estimators=10)
#决策树算法:
# clf = DecisionTreeClassifier()
##################################################
# 训练模型:
clf.fit(X_train,y_train)
########################################################
####################################################
# 测试模型过程
print("Predicting test set--->")
predictions=[]
for x in range(len(X_test)):
result=clf.predict(X_test[x])
predictions.append(result)
print('>predicted= '+repr(result)+',actual='+repr(y_test[x][-1]))
# 模型矩阵
y_pred = clf.predict(X_test)
conm=confusion_matrix(y_test, y_pred, labels=range(2))
print(conm)
# 准确率:
a=float(conm[0][0])
b=float(conm[0][1])
c=float(conm[1][0])
d=float(conm[1][1])
DR=(a/(a+c))
DR=DR*100
FAR=(b/(a+b))
FAR=FAR*100
print('Detection rate: '+repr(DR)+'%')
print('False alarm rate: '+repr(FAR)+'%')
# print(accuracy)
############################################################
######################################################
# 读取新的样本数据进行预测:
# p_featureList
allElectronicsData=open(r'F:\TrafficFlow\pems\3245\3245bu.csv','rb')#****bu.csv特征值文件
reader=csv.reader(allElectronicsData)
headers=reader.next()
temp1=[]
# temp2=[]
for row in reader:
c1=[]
c2=[]
for i in range(1,9):#读取第2列到第9列
b=0
b=float(row[i])-float(row[i+8])
c1.append(b)
temp1.append(c1)
n=len(temp1)
# print(n)
p_featureList=[]
for i in range(0,n-4):
k1=[]
k1=temp1[i]
k2=temp1[i+1]
k3=temp1[i+2]
k4=temp1[i+3]
k5=temp1[i+4]
k1.extend(k2)
k1.extend(k3)
k1.extend(k4)
k1.extend(k5)
p_featureList.append(k1)
print('predict------------->')
kk=clf.predict(k1)
if kk==0:
r = requests.post(conf.dz, data = {"key":"value","key":"value","key":"value"})
print(kk)
p=clf.predict(p_featureList)
###########################################################
print("all together:")
print(p)
end=time.clock()
print "time: %f s" % (end - start) | [
"[email protected]"
] | |
1352905030391c025ff13e169795a5a478951b82 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/LJ4250-MIB.py | 31ae0e9723d805ae04c3db23ae971db782eaad75 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 416,747 | py | #
# PySNMP MIB module LJ4250-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LJ4250-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:07:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, IpAddress, Unsigned32, ObjectIdentity, Integer32, Counter64, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, ModuleIdentity, NotificationType, Gauge32, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "IpAddress", "Unsigned32", "ObjectIdentity", "Integer32", "Counter64", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "ModuleIdentity", "NotificationType", "Gauge32", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hp = ModuleIdentity((1, 3, 6, 1, 4, 1, 11))
hp.setRevisions(('1904-02-18 13:45', '1904-02-18 13:55',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hp.setRevisionsDescriptions(('Added the hp module identity construct', 'Removed the following incorrect object definitions: channelTable channelEntry channel-table channel-entry',))
if mibBuilder.loadTexts: hp.setLastUpdated('0402181325Z')
if mibBuilder.loadTexts: hp.setOrganization('Hewlett-Packard Company')
if mibBuilder.loadTexts: hp.setContactInfo('Customer Care Center Tel: +1 800-474-6836 WEB: http://www.hp.com')
if mibBuilder.loadTexts: hp.setDescription('Objects used in the HP LaserJet 4250 printer.')
netPMLmgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2))
class DisplayString(OctetString):
pass
device = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1))
device_system = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1)).setLabel("device-system")
status_system = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2)).setLabel("status-system")
test = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 5))
background_message = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 37)).setLabel("background-message")
background_message1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 37, 1)).setLabel("background-message1")
background_message2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 37, 2)).setLabel("background-message2")
control_panel_display = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 65)).setLabel("control-panel-display")
destination_subsystem = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4)).setLabel("destination-subsystem")
print_engine = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1)).setLabel("print-engine")
settings_system = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 1)).setLabel("settings-system")
interface = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4))
simm = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1))
simm1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1))
simm1_bank = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6)).setLabel("simm1-bank")
simm1_bank1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6, 1)).setLabel("simm1-bank1")
simm1_bank2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6, 2)).setLabel("simm1-bank2")
simm2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2))
simm2_bank = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6)).setLabel("simm2-bank")
simm2_bank1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6, 1)).setLabel("simm2-bank1")
simm2_bank2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6, 2)).setLabel("simm2-bank2")
simm3 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3))
simm3_bank = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6)).setLabel("simm3-bank")
simm3_bank1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6, 1)).setLabel("simm3-bank1")
simm3_bank2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6, 2)).setLabel("simm3-bank2")
simm4 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4))
simm4_bank = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6)).setLabel("simm4-bank")
simm4_bank1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6, 1)).setLabel("simm4-bank1")
simm4_bank2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6, 2)).setLabel("simm4-bank2")
simm5 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5))
simm5_bank = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6)).setLabel("simm5-bank")
simm5_bank1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6, 1)).setLabel("simm5-bank1")
simm5_bank2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6, 2)).setLabel("simm5-bank2")
job = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6))
settings_job = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 1)).setLabel("settings-job")
operating_system = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 19)).setLabel("operating-system")
processing_subsystem = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3)).setLabel("processing-subsystem")
pdl = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3))
settings_pdl = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1)).setLabel("settings-pdl")
status_pdl = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 2)).setLabel("status-pdl")
errorlog = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11))
error1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 1))
error2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 2))
error3 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 3))
error4 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 4))
error5 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 5))
error6 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 6))
error7 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 7))
error8 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 8))
error9 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 9))
error10 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 10))
error11 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 11))
error12 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 12))
error13 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 13))
error14 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 14))
error15 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 15))
error16 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 16))
error17 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 17))
error18 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 18))
error19 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 19))
error20 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 20))
error21 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 21))
error22 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 22))
error23 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 23))
error24 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 24))
error25 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 25))
error26 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 26))
error27 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 27))
error28 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 28))
error29 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 29))
error30 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 30))
error31 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 31))
error32 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 32))
error33 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 33))
error34 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 34))
error35 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 35))
error36 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 36))
error37 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 37))
error38 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 38))
error39 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 39))
error40 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 40))
error41 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 41))
error42 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 42))
error43 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 43))
error44 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 44))
error45 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 45))
error46 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 46))
error47 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 47))
error48 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 48))
error49 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 49))
error50 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 50))
channel = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6))
display = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 20))
display_status = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 20, 1)).setLabel("display-status")
id = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3))
intray = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3))
settings_intray = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1)).setLabel("settings-intray")
pdl_pcl = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 3)).setLabel("pdl-pcl")
pdl_postscript = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 4)).setLabel("pdl-postscript")
socket_ping = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 7)).setLabel("socket-ping")
active_print_jobs = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 2)).setLabel("active-print-jobs")
job_being_parsed = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 2, 1)).setLabel("job-being-parsed")
job_info = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5)).setLabel("job-info")
job_info_attribute = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23)).setLabel("job-info-attribute")
job_info_accounting = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28)).setLabel("job-info-accounting")
held_job = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7)).setLabel("held-job")
held_job_info = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1)).setLabel("held-job-info")
held_job_control = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 2)).setLabel("held-job-control")
source_subsystem = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2)).setLabel("source-subsystem")
spooler = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 4))
settings_spooler = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 4, 1)).setLabel("settings-spooler")
pjl = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 5))
mio = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3))
mio1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 1))
mio2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 2))
mio3 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 3))
io = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1))
settings_io = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 1)).setLabel("settings-io")
ports = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 3))
port1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 3, 1))
tables = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7))
remote_procedure_call = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 13)).setLabel("remote-procedure-call")
settings_rpc = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 13, 1)).setLabel("settings-rpc")
status_rpc = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 13, 2)).setLabel("status-rpc")
file_system = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10)).setLabel("file-system")
settings_file_system = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1)).setLabel("settings-file-system")
file_systems = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3)).setLabel("file-systems")
file_system2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3, 2)).setLabel("file-system2")
file_system3 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3, 3)).setLabel("file-system3")
file_system4 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3, 4)).setLabel("file-system4")
resource_manager = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 12)).setLabel("resource-manager")
mass_storage_resources = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 12, 3)).setLabel("mass-storage-resources")
mass_storage_block_driver = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 15)).setLabel("mass-storage-block-driver")
settings_mass_storage_bd = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 15, 1)).setLabel("settings-mass-storage-bd")
status_mass_storage_bd = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 15, 2)).setLabel("status-mass-storage-bd")
device_configure = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 1, 32)).setLabel("device-configure")
phd = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 5))
phd2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 5, 2))
settings_prt_eng = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 1)).setLabel("settings-prt-eng")
marking_agent_density = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 1, 9)).setLabel("marking-agent-density")
status_prt_eng = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 2)).setLabel("status-prt-eng")
intrays = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3))
intray1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 1))
intray2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 2))
intray3 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 3))
intray4 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 4))
intray5 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 5))
intray6 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 6))
outbin = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 4))
settings_outbin = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 4, 1)).setLabel("settings-outbin")
outbins = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 4, 3))
outbin1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 4, 3, 1))
marking_agent = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 5)).setLabel("marking-agent")
settings_marking_agent = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 5, 1)).setLabel("settings-marking-agent")
ph = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 7))
ph_devices = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 7, 3)).setLabel("ph-devices")
ph2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 7, 3, 2))
print_media = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8)).setLabel("print-media")
settings_print_media = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 1)).setLabel("settings-print-media")
media_info = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3)).setLabel("media-info")
media1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 1))
media2 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 2))
media3 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 3))
media4 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 4))
media5 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 5))
media6 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 6))
media7 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 7))
media8 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 8))
media9 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 9))
media10 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 10))
media11 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 11))
media12 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 12))
media13 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 13))
media14 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 14))
media15 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 15))
media16 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 16))
media17 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 17))
media18 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 18))
media19 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 19))
media_modes = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 4)).setLabel("media-modes")
media_types = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 8)).setLabel("media-types")
media_counts = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 7)).setLabel("media-counts")
media_size = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 5)).setLabel("media-size")
service_channel = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 77)).setLabel("service-channel")
accounting = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16))
printer_accounting = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1)).setLabel("printer-accounting")
printed_media_usage = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1)).setLabel("printed-media-usage")
printed_modes_accounting = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 4)).setLabel("printed-modes-accounting")
printed_modes_usage = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 4, 1)).setLabel("printed-modes-usage")
source_tray_accounting = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 5)).setLabel("source-tray-accounting")
source_tray_usage = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 5, 1)).setLabel("source-tray-usage")
destination_bin_accounting = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 6)).setLabel("destination-bin-accounting")
destination_bin_usage = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 6, 1)).setLabel("destination-bin-usage")
consumables = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10))
consumables_1 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1)).setLabel("consumables-1")
consumable_status = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1)).setLabel("consumable-status")
consumable_string = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 8)).setLabel("consumable-string")
consumables_status = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 5)).setLabel("consumables-status")
consumables_life = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 5, 1)).setLabel("consumables-life")
print_meter = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11)).setLabel("print-meter")
printer_average = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11, 1)).setLabel("printer-average")
webserver_proc_sub = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 9)).setLabel("webserver-proc-sub")
settings_webserver = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 9, 1)).setLabel("settings-webserver")
firmware_download = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18)).setLabel("firmware-download")
upgradable_devices = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20)).setLabel("upgradable-devices")
perm_store_init_occurred = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 10), OctetString()).setLabel("perm-store-init-occurred").setMaxAccess("readonly")
if mibBuilder.loadTexts: perm_store_init_occurred.setStatus('optional')
if mibBuilder.loadTexts: perm_store_init_occurred.setDescription('This object will set the cAllPermDevices bit when a full perm storage initialization occurs (as would be the case for a brand new system or as a result of a powerup key sequence or <<hidden>> object request). If only one device was initialized (as would be the case if a disk were added to an existing system or a formatter were swapped out), then only the appropriate collection bits will be returned. If there are no collection bits set then this indicates that no initialization took place.')
self_test = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 4))).clone(namedValues=NamedValues(("eNotInASelfTest", 1), ("eNonDestructiveSelfTest", 4)))).setLabel("self-test").setMaxAccess("readwrite")
if mibBuilder.loadTexts: self_test.setStatus('optional')
if mibBuilder.loadTexts: self_test.setDescription('Writing this object allows a device self test to be started. Reading this object provides an indication what self-test is currently executing, if any. Actual self-test operation is device specific. A self test may not be allowed at any arbitrary time. If the device supports the requested self test, but can not start the execution of the self test when requested, the device will respond with <genErr>. If a non-destructive self test is being started, the device will generate a response before the self test is completed. RECOMMENDATION: If the device is ready (i.e. the NOT-READY-PRINTER object does not contain any items, except maybe being off-line) and is idle (i.e. the NOT-IDLE object does not contain any items), this request should always succeed. This provides a mechanism for driver writers to always determine if the action will succeed or not. Additional information: The eNonDestructiveSelfTest performs limited testing on the printer and its attached paper handling devices. After the self-test is complete a configuration page is printed. The recommended way to cause a configuration page to be printed is to use the PRINT-INTERNAL-PAGE object. Setting this object to eNonDestructiveSelfTest results in a status of <noError> and a value of eNonDestructiveSelfTest being returned. If the printer is not idle (whether printing an external job or printing an internal page), this action will be delayed until the next job boundary. See the PRINT-INTERNAL-PAGE object for more details.')
print_internal_page = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 7, 8, 9, 100, 101, 254, 255, 350, 450))).clone(namedValues=NamedValues(("eNotPrintingAnInternalPage", 1), ("ePrintingAnUnknownInternalPage", 2), ("eDeviceDemoPage1ConfigurationPage", 3), ("eDeviceDemoPage2", 4), ("eDeviceDemoPage5ErrorLog", 7), ("eDeviceDemoPage6FileSystemDirectoryListing", 8), ("eDeviceDemoPage7MenuMap", 9), ("ePrintUsagePage", 100), ("eSuppliesPage", 101), ("eDevicePaperPathTest", 254), ("eDevicePageRegistrationPage", 255), ("ePCLFontList1", 350), ("ePSFontList", 450)))).setLabel("print-internal-page").setMaxAccess("readwrite")
if mibBuilder.loadTexts: print_internal_page.setStatus('optional')
if mibBuilder.loadTexts: print_internal_page.setDescription("Writing this object to a value other than eNotPrintingAnInternalPage causes the device to attempt to print an internally generated page. Reading this object provides an indication what internally generated page is currently being printed, if any. The actual page is device specific. Some devices may support continuously printing an internally generated page. Setting this object to eNotPrintingAnInternalPage terminates continuously printing internally generated pages. An internally generated page may not be allowed to be printed at any arbitrary time; under what conditions it will succeed or fail is device specific. If the device supports the requested page, but can not start printing the page when requested, the device will respond with <genErr>. RECOMMENDATION: If the device is ready (i.e. the NOT-READY-PRINTER object does not contain any items, except maybe being off-line) and is idle (i.e. the NOT-IDLE object does not contain any items), this request should always succeed. This provides a mechanism for driver writers to always determine if the action will succeed or not. Additional information: Previous products used ePCLDemoPage1ConfigurationPage(300) for the Configuration or Self Test page. This product uses eDeviceDemoPage1ConfigurationPage(3). LaserJet 8150 does not support continuously printing a demo page. When this object is set to a valid value, the status returned is <noError> and the value returned is ePrintingAnUnknownInternalPage. If the printer is idle, the page will be printed immediately. If the printer is currently printing another job, the internal page requested will not print until that job is finished. Setting this object to a valid value causes the desired page to be formatted and put in the printer's print queue. While the page is being formatted a get on this object will return the value ePrintingAnUnknownInternalPage. Once the page (or pages) is finished being formatted, this object returns a value of eNotPrintingAnInternalPage, even though the page may not have finished being printed. Setting this object multiple times will queue up the requests, but only a limited number will be queued. Once the queue is full, additional requests will be ignored.")
localization_languages_supported = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 52), DisplayString()).setLabel("localization-languages-supported").setMaxAccess("readonly")
if mibBuilder.loadTexts: localization_languages_supported.setStatus('optional')
if mibBuilder.loadTexts: localization_languages_supported.setDescription('The list of languages supported by the device. The languages are primarily, but not limited to, two character codes from ISO 639, each separated by a comma character. Additional information: This string will always be in the Roman-8 character set. See prtLocalizationLanguage for details about each language value.')
localization_countries_supported = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 53), DisplayString()).setLabel("localization-countries-supported").setMaxAccess("readonly")
if mibBuilder.loadTexts: localization_countries_supported.setStatus('optional')
if mibBuilder.loadTexts: localization_countries_supported.setDescription('The list of countries supported by the device. The countries are primarily, but not limited to, two character codes from ISO 3166, each separated by a comma character. Additional information: This string will always be in the Roman-8 character set. See prtLocalizationCountry for details about each country value.')
background_status_msg_line1_part1 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 37, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setLabel("background-status-msg-line1-part1").setMaxAccess("readwrite")
if mibBuilder.loadTexts: background_status_msg_line1_part1.setStatus('optional')
if mibBuilder.loadTexts: background_status_msg_line1_part1.setDescription("The string displayed on the device's front panel in place of the printer's built-in background status string. An example built-in background status string is '00 READY'. Additional information: The display size for the LaserJet 8150 printers is 2 X 16. The value of this object and the current value of BACKGROUND-STATUS-MSG-LINE2-PART1 are displayed together on the 2-line display, but they must be set independently. If line 2 has been set, and the next message to be displayed only requires line 1, BACKGROUND-STATUS-MSG-LINE2-PART1 must be set to the null string to clear it. This object allows a message to be displayed when it is the highest priority message. Setting this object does not guarantee the message will be displayed; and reading it returns the value last written, not the currently displayed message (use prtConsoleDisplayBufferText to read the display). The priority assigned for displaying this message is one lower than the READY message. In other words, the only message that can be replaced by these objects is the READY message. To clear the message, write a null string to both this object and BACKGROUND-STATUS-MSG-LINE2-PART1.")
background_status_msg_line2_part1 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 37, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setLabel("background-status-msg-line2-part1").setMaxAccess("readwrite")
if mibBuilder.loadTexts: background_status_msg_line2_part1.setStatus('optional')
if mibBuilder.loadTexts: background_status_msg_line2_part1.setDescription("The string displayed on the device's front panel in place of the printer's built-in background status string. An example built-in background status string is '00 READY'. Additional information: See BACKGROUND-STATUS-MSG-LINE1-PART1")
control_panel_button_press = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 60), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29))).clone(namedValues=NamedValues(("eGoButton", 1), ("eMenuPlusButton", 2), ("eMenuMinusButton", 3), ("eItemPlusButton", 4), ("eItemMinusButton", 5), ("eValuePlusButton", 6), ("eValueMinusButton", 7), ("eSelectButton", 8), ("eCancelJobButton", 9), ("ePauseResumeButton", 10), ("eUpArrowButton", 11), ("eDownArrowButton", 12), ("eBackButton", 13), ("eQuestionMarkButton", 14), ("eClearButton", 15), ("eNumericButton0", 16), ("eNumericButton1", 17), ("eNumericButton2", 18), ("eNumericButton3", 19), ("eNumericButton4", 20), ("eNumericButton5", 21), ("eNumericButton6", 22), ("eNumericButton7", 23), ("eNumericButton8", 24), ("eNumericButton9", 25), ("eRotateButton", 26), ("eInfoButton", 27), ("eMenuButton", 28), ("eStopButton", 29)))).setLabel("control-panel-button-press").setMaxAccess("readwrite")
if mibBuilder.loadTexts: control_panel_button_press.setStatus('optional')
if mibBuilder.loadTexts: control_panel_button_press.setDescription('Writing this object simulates pressing a button on the control panel. Reading it will return the last key pressed either on the control panel or via PML. The device POS will specify which keys are supported.')
control_panel_display_contents_change_counter = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 63), Integer32()).setLabel("control-panel-display-contents-change-counter").setMaxAccess("readonly")
if mibBuilder.loadTexts: control_panel_display_contents_change_counter.setStatus('optional')
if mibBuilder.loadTexts: control_panel_display_contents_change_counter.setDescription('A counter which increments whenever the contents of the front panel display changes. This object is implemented as a 32-bit signed integer which rolls over to zero when it reaches a maximum value.')
control_panel_display_contents_crc = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 64), Integer32()).setLabel("control-panel-display-contents-crc").setMaxAccess("readonly")
if mibBuilder.loadTexts: control_panel_display_contents_crc.setStatus('optional')
if mibBuilder.loadTexts: control_panel_display_contents_crc.setDescription('Reading this object returns a 32-bit Cyclical Redundancy Check (CRC) which represents the current contents of the display. Additional information: This object has been implimented as an 8-bit CRC for this product.')
control_panel_display_graphical_contents = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 65, 1), OctetString()).setLabel("control-panel-display-graphical-contents").setMaxAccess("readonly")
if mibBuilder.loadTexts: control_panel_display_graphical_contents.setStatus('optional')
if mibBuilder.loadTexts: control_panel_display_graphical_contents.setDescription('Reading this object returns a graphical file format image representing the current pixel content of the display. The device POS will specify the expected screen resolution, color depth of the display and graphics file format for a given product (eg. 160x64x1 GIF format OR 640x240x4 JPEG format). If the image is large enough that it needs to be returned in multiple objects then each array object will contain a portion of the image. The image will then need to be reconstructed by a host application. An application that needs to determine if their is an additional object to be retreived will need to perform a GETNEXT operation until there are no more objects in the sub-tree. Additional information: This object returns a GIF image that represents the current contents of the 160x64x1 control panel display. Most display images require more than one instance of this object in order to retreive the complete GIF image.')
energy_star = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 1, 1), Integer32()).setLabel("energy-star").setMaxAccess("readwrite")
if mibBuilder.loadTexts: energy_star.setStatus('optional')
if mibBuilder.loadTexts: energy_star.setDescription('Returns or changes the Energy Star sleep value. If the value is greater than zero, then the device will go into energy saving sleep mode after the print engine has been idle for the number of seconds specified by this object. A value of zero means Energy Star is disabled and the device will not go to sleep based on print engine idle time. The value must be non-negative. Additional information: Returns or changes the Energy Star sleep value. The device will go into energy saving sleep mode after the print engine has been idle for the number of seconds specified by this object. LaserJet 5500 supports values of 0, 60, 900, 1800, 3600, 5400, 7200, 14400 seconds. A value of 0 means never enter sleep mode based on the print engine idle time. Setting to an unsupported value causes the printer to substitute a value (listed below) and to return <noError> status. Setting this value when the printer is in sleep mode will not cause it to wakeup unless it is set to 0. The values are as follow: <=0 snap to 0 >=1 and <= 1349 snap to 900 (15 minutes) >=1350 and <= 2699 snap to 1800 (30 minutes) >=2700 and <= 4499 snap to 3600 (1 hour) >=4500 and <= 6299 snap to 5400 (90 minutes) >=6300 and <= 10799 snap to 7200 (2 hours) >=10800 snap to 14400 (4 hours).')
sleep_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eFalse", 1), ("eTrue", 2)))).setLabel("sleep-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: sleep_mode.setStatus('optional')
if mibBuilder.loadTexts: sleep_mode.setDescription("Returns eTrue if the device is in energy saving sleep mode, otherwise returns eFalse. Setting SLEEP-MODE to eFalse causes the device to wake up, if it is in sleep mode. Setting SLEEP-MODE to eTrue causes the device to go into sleep mode. Additional information: This object returns eTrue if the device is in energy saving sleep mode, otherwise it returns eFalse. Setting this object to eTrue while the printer is awake will not change the printer's current state and will return <genErr> status. NOTE: This object should behave this way when the printer does not have an instant-on fuser. Setting this object to eTrue while printer is already in Sleep Mode will not change the printer's current state and will return <noError> status. Setting this object to eFalse while printer is already awake will not change the printer's current state and will return <noError> status. Setting this object to eFalse while the printer is asleep causes the device to wake up. ")
on_off_line = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("eOnline", 1), ("eOffline", 2), ("eOfflineAtEndOfJob", 3)))).setLabel("on-off-line").setMaxAccess("readwrite")
if mibBuilder.loadTexts: on_off_line.setStatus('optional')
if mibBuilder.loadTexts: on_off_line.setDescription("To bring the PDL processing sub-system on or off line. If the device is a printer, then the printer does not process print job data when the printer is off line. PML communication persists. Additional information: When the printer is in the Offline state, printing will stop as soon as possible (no more sheets of media are pulled from input trays). The I/O is taken offline also. Setting to eOnline has the following affect: Printer will immediately go to or remain in the Online state; <noError> status is returned. If the printer's current state is Offline with a pending error condition that prevents the printer from going to the Online state, the printer will remain in the Offline state; <genErr> status is returned (see the Control Panel ERS for a list of error conditions). Setting to eOffline has the following affect: Printer will immediately go to or remain in the Offline state; <noError> status is returned. If pages are being printed, those pages will complete with the printer in the Offline state. Setting to eOfflineAtEndOfJob has the following affect: If not in a job or already in the Offline state, the printer will immediately go to or remain in the Offline state; <noError> status is returned. If in a job and the current state is Online, the printer will remain in the Online state, with the value of this object as eOfflineAtEndOfJob, until the end of the job; <noError> status is returned. At the end of the job, the printer goes to the Offline state and the value of this object becomes eOffline. Setting this object to eOffline or eOnline before the end of the job causes the action for that value to be taken immediately.")
pysmi_continue = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("eInitiateAction", 1)))).setLabel("continue").setMaxAccess("writeonly")
if mibBuilder.loadTexts: pysmi_continue.setStatus('optional')
if mibBuilder.loadTexts: pysmi_continue.setDescription("A device can support a class of errors called continuable errors. When a continuable error is encountered, the device requires a continue event to occur before the device will continue operation. One continue event is setting the CONTINUE object to eInitiateAction. Devices can support other continue events, like auto-continue. A continue event causes the continuable error to be acknowledged, thus allowing the device to continue. Each device needs to list the continuable errors. If the device doesn't currently have an unacknowledged continuable error, the response will contain <genErr>. Additional information: See the CLEARABLE-WARNING and AUTO-CONTINUE objects for the errors that this object will clear.")
auto_continue = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eOff", 1), ("eOn", 2)))).setLabel("auto-continue").setMaxAccess("readwrite")
if mibBuilder.loadTexts: auto_continue.setStatus('optional')
if mibBuilder.loadTexts: auto_continue.setDescription('Indicates if the device will automatically continue after encountering a continuable error. If AUTO-CONTINUE is set to eOn, the device will automatically generate continue event to acknowledge continuable errors. If AUTO-CONTINUE is set to eOff, then some other continue event will have to acknowledge the continuable error. Additional information: If this is set to eOn the device displays an error message and goes offline for ten seconds. After ten seconds the printer automatically returns to the online state. If this is set to eOff then the device displays an error message and goes offline. It remains offline until the operator presses the GO key or until the CONTINUE object is set. If the printer is not idle, the new value may not take effect until a job boundary is reached. If a get is done on this object before the job boundary is reached, the value last set will be returned.')
simm1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm1_type.setStatus('optional')
if mibBuilder.loadTexts: simm1_type.setDescription("Returns an indication of the type of option installed in SIMM slot 1. eEmpty means the device did not detect any option installed in the interface slot. eUnknown means the device doesn't recognize the installed option. eUnSupported means the device recognizes the installed option, but does not support the option. eReadOnlyMemory means the installed option contains ROM Ics. eVolatileRandomAccessMemory means the installed option contains RAM ICs that loose data when the power is turned off. eNonVolatileRandomAccessMemory means that the installed option contains RAM ICs that do not loose data when the power is turned off. eFlashMemory means that the installed option contains a type of non-volatile RAM that needs to be erased before it can be written. eDiskDrive means the installed option contains a disk drive. eRamRom means the installed option contains both volatile random access memory and read only memory. eInputPHD means the installed option is an input paper handling device. eOutputPHD means the installed option is an output paper handling device. eIOCard means the installed option is an I/O card. Additional information: This object is used for describing DIMMs instead of SIMMs on LaserJet 8150. eRamRom is used to denote LaserJet 8150's combo simm.")
simm1_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 5), Integer32()).setLabel("simm1-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm1_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm1_capacity.setDescription('Returns an indication of the capacity of the SIMM installed in SIMM slot 1. The capacity is dependent on the type of option, as indicated by the SIMM1-TYPE object. This object has no meaning if the installed option type is eUnknown or eReadOnlyMemory. This object contains the size, in bytes, if the installed option type is eVolatileRandomAccessMemory, eNonVolatileRandomAccessMemory, eFlashMemory, or eDiskDrive. If the type of the installed option is eRamRom, this object contains the size, in bytes, of the random access memory. If the type of the installed option is eInputPHD, the capacity indicates the number of input trays supported by the installed option. If the type of the installed option is eOutputPHD, the capacity indicates the number of output bins supported by the installed option. If the type of the installed option is eIOCard, the capacity indicates the number of logical I/O ports supported by the I/O card. Additional information: Returns an indication of the capacity of the installed option in bytes. This object is not supported unless the SIMM1-TYPE type is eVolatileRandomAccessMemory, eRamRom, or eFlashMemory. For eRamRom only the size of the Ram portion of the SIMM is returned.')
simm1_bank1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm1-bank1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm1_bank1_type.setStatus('optional')
if mibBuilder.loadTexts: simm1_bank1_type.setDescription('Returns an indication of the type of option installed in Bank 1 of SIMM slot 1. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm1_bank1_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6, 1, 2), Integer32()).setLabel("simm1-bank1-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm1_bank1_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm1_bank1_capacity.setDescription('Returns an indication of the capacity of Bank 1 of the SIMM installed in SIMM slot 1. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM1-BANK1-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm1_bank2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm1-bank2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm1_bank2_type.setStatus('optional')
if mibBuilder.loadTexts: simm1_bank2_type.setDescription('Returns an indication of the type of option installed in Bank 2 of SIMM slot 1. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm1_bank2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 1, 6, 2, 2), Integer32()).setLabel("simm1-bank2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm1_bank2_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm1_bank2_capacity.setDescription('Returns an indication of the capacity of Bank 2 of the SIMM installed in SIMM slot 1. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM1-BANK2-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm2_type.setStatus('optional')
if mibBuilder.loadTexts: simm2_type.setDescription("Returns an indication of the type of option installed in SIMM slot 2. See SIMM1-TYPE for a description. Additional information: This object is used for describing DIMMs instead of SIMMs on LaserJet 8150. eRamRom is used to denote LaserJet 8150's combo simm.")
simm2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 5), Integer32()).setLabel("simm2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm2_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm2_capacity.setDescription('Returns an indication of the capacity of the SIMM installed in SIMM slot 2. See SIMM1-CAPACITY for a description. Additional information: This object is used for describing DIMMs instead of SIMMs on LaserJet 8150. Returns an indication of the capacity of the installed option in bytes. This object is not supported unless the SIMM2-TYPE type is eVolatileRandomAccessMemory, eRamRom, or eFlashMemory. For eRamRom only the size of the Ram portion of the SIMM is returned.')
simm2_bank1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm2-bank1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm2_bank1_type.setStatus('optional')
if mibBuilder.loadTexts: simm2_bank1_type.setDescription('Returns an indication of the type of option installed in Bank 1 of SIMM slot 2. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm2_bank1_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6, 1, 2), Integer32()).setLabel("simm2-bank1-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm2_bank1_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm2_bank1_capacity.setDescription('Returns an indication of the capacity of Bank 1 of the SIMM installed in SIMM slot 2. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM2-BANK1-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm2_bank2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm2-bank2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm2_bank2_type.setStatus('optional')
if mibBuilder.loadTexts: simm2_bank2_type.setDescription('Returns an indication of the type of option installed in Bank 2 of SIMM slot 2. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm2_bank2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 2, 6, 2, 2), Integer32()).setLabel("simm2-bank2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm2_bank2_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm2_bank2_capacity.setDescription('Returns an indication of the capacity of Bank 2 of the SIMM installed in SIMM slot 2. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM2-BANK2-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm3_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm3-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm3_type.setStatus('optional')
if mibBuilder.loadTexts: simm3_type.setDescription('Returns an indication of the type of option installed in SIMM slot 3. See SIMM1-TYPE for a description.')
simm3_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 5), Integer32()).setLabel("simm3-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm3_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm3_capacity.setDescription('Returns an indication of the capacity of the SIMM installed in SIMM slot 3. See SIMM1-CAPACITY for a description. Additional information: This object is used for describing DIMMs instead of SIMMs on LaserJet 8150. Returns an indication of the capacity of the installed option in bytes. This object is not supported unless the SIMM3-TYPE type is eVolatileRandomAccessMemory, eRamRom, or eFlashMemory. For eRamRom only the size of the Ram portion of the simm is returned.')
simm3_bank1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm3-bank1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm3_bank1_type.setStatus('optional')
if mibBuilder.loadTexts: simm3_bank1_type.setDescription('Returns an indication of the type of option installed in Bank 1 of SIMM slot 3. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm3_bank1_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6, 1, 2), Integer32()).setLabel("simm3-bank1-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm3_bank1_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm3_bank1_capacity.setDescription('Returns an indication of the capacity of Bank 1 of the SIMM installed in SIMM slot 3. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM3-BANK1-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm3_bank2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm3-bank2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm3_bank2_type.setStatus('optional')
if mibBuilder.loadTexts: simm3_bank2_type.setDescription('Returns an indication of the type of option installed in Bank 2 of SIMM slot 3. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm3_bank2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 3, 6, 2, 2), Integer32()).setLabel("simm3-bank2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm3_bank2_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm3_bank2_capacity.setDescription('Returns an indication of the capacity of Bank 2 of the SIMM installed in SIMM slot 3. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM3-BANK2-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm4_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm4-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm4_type.setStatus('optional')
if mibBuilder.loadTexts: simm4_type.setDescription('Returns an indication of the type of option installed in SIMM slot 4. See SIMM1-TYPE for a description.')
simm4_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 5), Integer32()).setLabel("simm4-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm4_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm4_capacity.setDescription('Returns an indication of the capacity of the SIMM installed in SIMM slot 4. See SIMM1-CAPACITY for a description. Additional information: This object is used for describing DIMMs instead of SIMMs on LaserJet 8150. Returns an indication of the capacity of the installed option in bytes. This object is not supported unless the SIMM4-TYPE type is eVolatileRandomAccessMemory, eRamRom, or eFlashMemory. For eRamRom only the size of the Ram portion of the simm is returned.')
simm4_bank1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm4-bank1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm4_bank1_type.setStatus('optional')
if mibBuilder.loadTexts: simm4_bank1_type.setDescription('Returns an indication of the type of option installed in Bank 1 of SIMM slot 4. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm4_bank1_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6, 1, 2), Integer32()).setLabel("simm4-bank1-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm4_bank1_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm4_bank1_capacity.setDescription('Returns an indication of the capacity of Bank 1 of the SIMM installed in SIMM slot 4. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM4-BANK1-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm4_bank2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm4-bank2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm4_bank2_type.setStatus('optional')
if mibBuilder.loadTexts: simm4_bank2_type.setDescription('Returns an indication of the type of option installed in Bank 2 of SIMM slot 4. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm4_bank2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 4, 6, 2, 2), Integer32()).setLabel("simm4-bank2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm4_bank2_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm4_bank2_capacity.setDescription('Returns an indication of the capacity of Bank 2 of the SIMM installed in SIMM slot 4. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM4-BANK2-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm5_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm5-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm5_type.setStatus('optional')
if mibBuilder.loadTexts: simm5_type.setDescription('Returns an indication of the type of option installed in SIMM slot 5. See SIMM1-TYPE for a description.')
simm5_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 5), Integer32()).setLabel("simm5-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm5_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm5_capacity.setDescription('Returns an indication of the capacity of the SIMM installed in SIMM slot 5. See SIMM1-CAPACITY for a description. Additional information: This object is used for describing DIMMs instead of SIMMs on LaserJet 8150. Returns an indication of the capacity of the installed option in bytes. This object is not supported unless the SIMM5-TYPE type is eVolatileRandomAccessMemory, eRamRom, or eFlashMemory. For eRamRom only the size of the Ram portion of the simm is returned.')
simm5_bank1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm5-bank1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm5_bank1_type.setStatus('optional')
if mibBuilder.loadTexts: simm5_bank1_type.setDescription('Returns an indication of the type of option installed in Bank 1 of SIMM slot 5. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm5_bank1_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6, 1, 2), Integer32()).setLabel("simm5-bank1-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm5_bank1_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm5_bank1_capacity.setDescription('Returns an indication of the capacity of Bank 1 of the SIMM installed in SIMM slot 5. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM5-BANK1-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
simm5_bank2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 9))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eUnSupported", 3), ("eReadOnlyMemory", 4), ("eVolatileRandomAccessMemory", 5), ("eFlashMemory", 7), ("eRamRom", 9)))).setLabel("simm5-bank2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm5_bank2_type.setStatus('optional')
if mibBuilder.loadTexts: simm5_bank2_type.setDescription('Returns an indication of the type of option installed in Bank 2 of SIMM slot 5. See SIMM1-TYPE for a description. Additional information: This object is used for describing the type of DIMM banks. Each physical DIMM slot has up to 2 banks.')
simm5_bank2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 1, 5, 6, 2, 2), Integer32()).setLabel("simm5-bank2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: simm5_bank2_capacity.setStatus('optional')
if mibBuilder.loadTexts: simm5_bank2_capacity.setDescription('Returns an indication of the capacity of Bank 2 of the SIMM installed in SIMM slot 5. See SIMM1-CAPACITY for a description. Additional information: Returns an indication of the capacity of the installed bank option in bytes. This object is not supported unless the SIMM5-BANK2-TYPE type is eReadOnlyMemory, eFlashMemory, eEDORandomAccessMemory, eSDRandomAccessMemory, eSRandomAccessMemory, or eFPMRandomAccessMemory.')
cancel_job = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 32767))).setLabel("cancel-job").setMaxAccess("writeonly")
if mibBuilder.loadTexts: cancel_job.setStatus('optional')
if mibBuilder.loadTexts: cancel_job.setDescription("Cancels the print job whose ID matches the value written to the CANCEL-JOB object. The host first learns the job ID using the CURRENT-JOB-PARSING-ID command. If the printer has completely processed the job, the printer responds with <badValue>. If the value of the CURRENT-JOB-PARSING-ID is smaller than the value written to the CANCEL-JOB object, then the printer responds with <badValue>. When read, returns the value of the last job ID what was canceled, or -1 to indicate no job has been canceled. Additional information: If the value written matches the ID of a job that is currently being canceled (for any reason), the printer responds with <noError>. It uses job ID's in the range of -1..32767. Because the ID number can wrap to zero, CURRENT-JOB-PARSING-ID may be smaller than the value written to this object; when this occurs, no error will result as long as the ID is for a currently processing job. This object is write only, so the comment in the general description stating the job ID will be returned on a read does not apply. A -1 represents the current job")
os_execute_file = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 19, 1), DisplayString()).setLabel("os-execute-file").setMaxAccess("writeonly")
if mibBuilder.loadTexts: os_execute_file.setStatus('optional')
if mibBuilder.loadTexts: os_execute_file.setDescription("This object's input is a null-terminated string representing a fully-qualified path name for an executable file. This object causes the file to be executed by the OS. Additional information: This object's input is a null-terminated string of two or more whitespace-separated tokens. The first token is a path to a directory to make the current working directory. The second token is a path to an executable file to be executed. Any remaining whitespace-separated tokens are optional and will be passed as parameters to the executable. The paths to the directory and executable can be either PJL style (e.g., 1:\\app\\example) or UNIX-style (e.g., /hpmnt/dsk_ide1a/app/example). The executable is run in a separate process.")
form_feed = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("eInitiateAction", 1)))).setLabel("form-feed").setMaxAccess("writeonly")
if mibBuilder.loadTexts: form_feed.setStatus('optional')
if mibBuilder.loadTexts: form_feed.setDescription('Instructs the PDL processing sub-system to finishing processing the current page of the current job. Form feed is also known as close page or eject page. If the PDL processing sub-system is in a FORM-FEED-NEEDED state, this causes the device to flush or finish processing the current page of the current job. If the device is not in the FORM-FEED-NEEDED state, an <genErr> will occur.')
form_feed_needed = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eFalse", 1), ("eTrue", 2)))).setLabel("form-feed-needed").setMaxAccess("readonly")
if mibBuilder.loadTexts: form_feed_needed.setStatus('optional')
if mibBuilder.loadTexts: form_feed_needed.setDescription("Indicates if the PDL processing sub-system has made marks on the current page and the source subsystem has been idle for a device specific amount of time. Additional information: LaserJet 8150 will set this object to eTrue when it has made marks on the current page, the IO-TIMEOUT has expired while PCL was running in `backward-compatibility mode' (which is caused by jobs consisting purely of PCL data with no prepended PJL commands), and no data is pending on another I/O. Once it is set to eTrue, more data on the same I/O will cause this object to be set to eFalse, until the above conditions are met again.")
error_log_clear = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("eClearErrorLog", 1)))).setLabel("error-log-clear").setMaxAccess("writeonly")
if mibBuilder.loadTexts: error_log_clear.setStatus('optional')
if mibBuilder.loadTexts: error_log_clear.setDescription("Setting this object clears all the entries in the error log sub-tree. Additional information: Setting this object removes all errors that have been stored in the printer's non-volatile memory.")
clearable_warning = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("eOn", 2), ("eJob", 3)))).setLabel("clearable-warning").setMaxAccess("readwrite")
if mibBuilder.loadTexts: clearable_warning.setStatus('optional')
if mibBuilder.loadTexts: clearable_warning.setDescription('Returns or controls how the device will track clearable warnings. A clearable warning indicates a transient event in the device. The device will continue after the transient event occurs. If CLEARABLE-WARNING is eOff, the device does not track clearable warnings. If CLEARABLE-WARNING is eOn, all clearable warnings will be tracked until cleared (acknowledged). If CLEARABLE-WARNING is eJob, a clearable warning generated due to an event that occurs because of the print job being processed will be automatically cleared when the device has finished processing the job. Example clearable warning events include the device altering resolution or page protection due to memory constraints. The POS will document what transient events are treated as clearable warnings. Additional information: If set to eOn, the warning is displayed until the GO key is pressed or the CONTINUE object is set. If set to eJob, the warning is displayed until the end of the job in which it was generated.')
error1_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 1, 1), Integer32()).setLabel("error1-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error1_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error1_time_stamp.setDescription("Contains some sort of time stamp indicating when error 1 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: This item contains the engine page count when the error occurred. If there is currently no error entry for this object, a '0' will be returned. Note that '0' may also be returned when there is a valid error, but a current page count was unavailable. If ERROR1-CODE object also returns '0', then an error has not yet been logged for this object. See ERROR1-CODE for an explanation of the order used for storing errors.")
error1_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 1, 2), Integer32()).setLabel("error1-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error1_code.setStatus('optional')
if mibBuilder.loadTexts: error1_code.setDescription("Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: Returns a device specific error code. If the error code returned is '0', then the printer has not yet logged an error for this object. When the maximum number of errors is reached (30 on MOBY printers), and a new error occurs, the error in ERROR1-CODE will be replaced by the one in ERROR2-CODE, and so on until the last error object will be given the value of the new error. The error number is returned in the upper 16 bits. If it is 68, 69, 79, or 80, then the sub code is returned in the lower 16 bits (eg. 68 001C, where the subcode is already a hexadecimal value). If the error number is any other number, then the 1st sub code (XX) will be in bits 15-8 and the 2nd sub code (YY) will be in bits 7-0 (eg. 55.04.02) where XX=04 and YY=02). See the Control Panel ERS for specific information about the meaning of each code. Example: If the error is 68 001A, then the value returned will be 4456474. To break it down: 4456474 = 0x0044001A The upper 16 bits: 0x0044 = 68 The lower 16 bits: 0x001A = 001A Which is the error: 68 001A Example: If the error is 55.04.241, then the error code will be 3605745. To break it down: 3605745 = 0x003704F1 The upper 16 bits: 0x0037 = 55 The upper byte of the lower 16 bits: 0x04 = 04 The lower byte of the lower 16 bits: 0xF1 = 241 Which is the error: 55.04.241")
error2_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 2, 1), Integer32()).setLabel("error2-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error2_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error2_time_stamp.setDescription('Contains some sort of time stamp indicating when error 2 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error2_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 2, 2), Integer32()).setLabel("error2-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error2_code.setStatus('optional')
if mibBuilder.loadTexts: error2_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error3_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 3, 1), Integer32()).setLabel("error3-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error3_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error3_time_stamp.setDescription('Contains some sort of time stamp indicating when error 3 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error3_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 3, 2), Integer32()).setLabel("error3-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error3_code.setStatus('optional')
if mibBuilder.loadTexts: error3_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error4_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 4, 1), Integer32()).setLabel("error4-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error4_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error4_time_stamp.setDescription('Contains some sort of time stamp indicating when error 4 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error4_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 4, 2), Integer32()).setLabel("error4-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error4_code.setStatus('optional')
if mibBuilder.loadTexts: error4_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error5_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 5, 1), Integer32()).setLabel("error5-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error5_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error5_time_stamp.setDescription('Contains some sort of time stamp indicating when error 5 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error5_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 5, 2), Integer32()).setLabel("error5-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error5_code.setStatus('optional')
if mibBuilder.loadTexts: error5_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error6_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 6, 1), Integer32()).setLabel("error6-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error6_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error6_time_stamp.setDescription('Contains some sort of time stamp indicating when error 6 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error6_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 6, 2), Integer32()).setLabel("error6-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error6_code.setStatus('optional')
if mibBuilder.loadTexts: error6_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error7_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 7, 1), Integer32()).setLabel("error7-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error7_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error7_time_stamp.setDescription('Contains some sort of time stamp indicating when error 7 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error7_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 7, 2), Integer32()).setLabel("error7-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error7_code.setStatus('optional')
if mibBuilder.loadTexts: error7_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error8_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 8, 1), Integer32()).setLabel("error8-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error8_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error8_time_stamp.setDescription('Contains some sort of time stamp indicating when error 8 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error8_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 8, 2), Integer32()).setLabel("error8-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error8_code.setStatus('optional')
if mibBuilder.loadTexts: error8_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error9_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 9, 1), Integer32()).setLabel("error9-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error9_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error9_time_stamp.setDescription('Contains some sort of time stamp indicating when error 9 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error9_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 9, 2), Integer32()).setLabel("error9-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error9_code.setStatus('optional')
if mibBuilder.loadTexts: error9_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error10_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 10, 1), Integer32()).setLabel("error10-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error10_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error10_time_stamp.setDescription('Contains some sort of time stamp indicating when error 10 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error10_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 10, 2), Integer32()).setLabel("error10-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error10_code.setStatus('optional')
if mibBuilder.loadTexts: error10_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error11_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 11, 1), Integer32()).setLabel("error11-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error11_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error11_time_stamp.setDescription('Contains some sort of time stamp indicating when error 11 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error11_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 11, 2), Integer32()).setLabel("error11-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error11_code.setStatus('optional')
if mibBuilder.loadTexts: error11_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error12_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 12, 1), Integer32()).setLabel("error12-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error12_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error12_time_stamp.setDescription('Contains some sort of time stamp indicating when error 12 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error12_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 12, 2), Integer32()).setLabel("error12-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error12_code.setStatus('optional')
if mibBuilder.loadTexts: error12_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error13_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 13, 1), Integer32()).setLabel("error13-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error13_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error13_time_stamp.setDescription('Contains some sort of time stamp indicating when error 13 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error13_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 13, 2), Integer32()).setLabel("error13-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error13_code.setStatus('optional')
if mibBuilder.loadTexts: error13_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error14_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 14, 1), Integer32()).setLabel("error14-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error14_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error14_time_stamp.setDescription('Contains some sort of time stamp indicating when error 14 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error14_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 14, 2), Integer32()).setLabel("error14-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error14_code.setStatus('optional')
if mibBuilder.loadTexts: error14_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error15_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 15, 1), Integer32()).setLabel("error15-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error15_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error15_time_stamp.setDescription('Contains some sort of time stamp indicating when error 15 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error15_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 15, 2), Integer32()).setLabel("error15-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error15_code.setStatus('optional')
if mibBuilder.loadTexts: error15_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error16_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 16, 1), Integer32()).setLabel("error16-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error16_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error16_time_stamp.setDescription('Contains some sort of time stamp indicating when error 16 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error16_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 16, 2), Integer32()).setLabel("error16-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error16_code.setStatus('optional')
if mibBuilder.loadTexts: error16_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error17_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 17, 1), Integer32()).setLabel("error17-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error17_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error17_time_stamp.setDescription('Contains some sort of time stamp indicating when error 17 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error17_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 17, 2), Integer32()).setLabel("error17-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error17_code.setStatus('optional')
if mibBuilder.loadTexts: error17_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error18_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 18, 1), Integer32()).setLabel("error18-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error18_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error18_time_stamp.setDescription('Contains some sort of time stamp indicating when error 18 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error18_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 18, 2), Integer32()).setLabel("error18-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error18_code.setStatus('optional')
if mibBuilder.loadTexts: error18_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error19_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 19, 1), Integer32()).setLabel("error19-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error19_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error19_time_stamp.setDescription('Contains some sort of time stamp indicating when error 19 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error19_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 19, 2), Integer32()).setLabel("error19-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error19_code.setStatus('optional')
if mibBuilder.loadTexts: error19_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error20_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 20, 1), Integer32()).setLabel("error20-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error20_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error20_time_stamp.setDescription('Contains some sort of time stamp indicating when error 20 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error20_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 20, 2), Integer32()).setLabel("error20-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error20_code.setStatus('optional')
if mibBuilder.loadTexts: error20_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error21_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 21, 1), Integer32()).setLabel("error21-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error21_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error21_time_stamp.setDescription('Contains some sort of time stamp indicating when error 21 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error21_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 21, 2), Integer32()).setLabel("error21-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error21_code.setStatus('optional')
if mibBuilder.loadTexts: error21_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error22_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 22, 1), Integer32()).setLabel("error22-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error22_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error22_time_stamp.setDescription('Contains some sort of time stamp indicating when error 22 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error22_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 22, 2), Integer32()).setLabel("error22-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error22_code.setStatus('optional')
if mibBuilder.loadTexts: error22_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error23_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 23, 1), Integer32()).setLabel("error23-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error23_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error23_time_stamp.setDescription('Contains some sort of time stamp indicating when error 23 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error23_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 23, 2), Integer32()).setLabel("error23-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error23_code.setStatus('optional')
if mibBuilder.loadTexts: error23_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error24_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 24, 1), Integer32()).setLabel("error24-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error24_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error24_time_stamp.setDescription('Contains some sort of time stamp indicating when error 24 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error24_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 24, 2), Integer32()).setLabel("error24-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error24_code.setStatus('optional')
if mibBuilder.loadTexts: error24_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error25_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 25, 1), Integer32()).setLabel("error25-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error25_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error25_time_stamp.setDescription('Contains some sort of time stamp indicating when error 25 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error25_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 25, 2), Integer32()).setLabel("error25-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error25_code.setStatus('optional')
if mibBuilder.loadTexts: error25_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error26_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 26, 1), Integer32()).setLabel("error26-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error26_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error26_time_stamp.setDescription('Contains some sort of time stamp indicating when error 26 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error26_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 26, 2), Integer32()).setLabel("error26-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error26_code.setStatus('optional')
if mibBuilder.loadTexts: error26_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error27_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 27, 1), Integer32()).setLabel("error27-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error27_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error27_time_stamp.setDescription('Contains some sort of time stamp indicating when error 27 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error27_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 27, 2), Integer32()).setLabel("error27-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error27_code.setStatus('optional')
if mibBuilder.loadTexts: error27_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error28_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 28, 1), Integer32()).setLabel("error28-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error28_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error28_time_stamp.setDescription('Contains some sort of time stamp indicating when error 28 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error28_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 28, 2), Integer32()).setLabel("error28-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error28_code.setStatus('optional')
if mibBuilder.loadTexts: error28_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error29_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 29, 1), Integer32()).setLabel("error29-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error29_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error29_time_stamp.setDescription('Contains some sort of time stamp indicating when error 29 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error29_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 29, 2), Integer32()).setLabel("error29-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error29_code.setStatus('optional')
if mibBuilder.loadTexts: error29_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error30_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 30, 1), Integer32()).setLabel("error30-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error30_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error30_time_stamp.setDescription('Contains some sort of time stamp indicating when error 30 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error30_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 30, 2), Integer32()).setLabel("error30-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error30_code.setStatus('optional')
if mibBuilder.loadTexts: error30_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error31_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 31, 1), Integer32()).setLabel("error31-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error31_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error31_time_stamp.setDescription('Contains some sort of time stamp indicating when error 31 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error31_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 31, 2), Integer32()).setLabel("error31-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error31_code.setStatus('optional')
if mibBuilder.loadTexts: error31_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error32_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 32, 1), Integer32()).setLabel("error32-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error32_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error32_time_stamp.setDescription('Contains some sort of time stamp indicating when error 32 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error32_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 32, 2), Integer32()).setLabel("error32-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error32_code.setStatus('optional')
if mibBuilder.loadTexts: error32_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error33_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 33, 1), Integer32()).setLabel("error33-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error33_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error33_time_stamp.setDescription('Contains some sort of time stamp indicating when error 33 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error33_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 33, 2), Integer32()).setLabel("error33-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error33_code.setStatus('optional')
if mibBuilder.loadTexts: error33_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error34_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 34, 1), Integer32()).setLabel("error34-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error34_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error34_time_stamp.setDescription('Contains some sort of time stamp indicating when error 34 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error34_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 34, 2), Integer32()).setLabel("error34-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error34_code.setStatus('optional')
if mibBuilder.loadTexts: error34_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error35_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 35, 1), Integer32()).setLabel("error35-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error35_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error35_time_stamp.setDescription('Contains some sort of time stamp indicating when error 35 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error35_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 35, 2), Integer32()).setLabel("error35-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error35_code.setStatus('optional')
if mibBuilder.loadTexts: error35_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error36_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 36, 1), Integer32()).setLabel("error36-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error36_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error36_time_stamp.setDescription('Contains some sort of time stamp indicating when error 36 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error36_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 36, 2), Integer32()).setLabel("error36-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error36_code.setStatus('optional')
if mibBuilder.loadTexts: error36_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error37_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 37, 1), Integer32()).setLabel("error37-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error37_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error37_time_stamp.setDescription('Contains some sort of time stamp indicating when error 37 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error37_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 37, 2), Integer32()).setLabel("error37-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error37_code.setStatus('optional')
if mibBuilder.loadTexts: error37_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error38_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 38, 1), Integer32()).setLabel("error38-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error38_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error38_time_stamp.setDescription('Contains some sort of time stamp indicating when error 38 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error38_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 38, 2), Integer32()).setLabel("error38-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error38_code.setStatus('optional')
if mibBuilder.loadTexts: error38_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error39_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 39, 1), Integer32()).setLabel("error39-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error39_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error39_time_stamp.setDescription('Contains some sort of time stamp indicating when error 39 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error39_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 39, 2), Integer32()).setLabel("error39-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error39_code.setStatus('optional')
if mibBuilder.loadTexts: error39_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error40_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 40, 1), Integer32()).setLabel("error40-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error40_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error40_time_stamp.setDescription('Contains some sort of time stamp indicating when error 40 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error40_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 40, 2), Integer32()).setLabel("error40-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error40_code.setStatus('optional')
if mibBuilder.loadTexts: error40_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error41_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 41, 1), Integer32()).setLabel("error41-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error41_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error41_time_stamp.setDescription('Contains some sort of time stamp indicating when error 41 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error41_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 41, 2), Integer32()).setLabel("error41-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error41_code.setStatus('optional')
if mibBuilder.loadTexts: error41_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error42_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 42, 1), Integer32()).setLabel("error42-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error42_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error42_time_stamp.setDescription('Contains some sort of time stamp indicating when error 42 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error42_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 42, 2), Integer32()).setLabel("error42-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error42_code.setStatus('optional')
if mibBuilder.loadTexts: error42_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error43_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 43, 1), Integer32()).setLabel("error43-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error43_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error43_time_stamp.setDescription('Contains some sort of time stamp indicating when error 43 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error43_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 43, 2), Integer32()).setLabel("error43-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error43_code.setStatus('optional')
if mibBuilder.loadTexts: error43_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error44_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 44, 1), Integer32()).setLabel("error44-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error44_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error44_time_stamp.setDescription('Contains some sort of time stamp indicating when error 44 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error44_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 44, 2), Integer32()).setLabel("error44-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error44_code.setStatus('optional')
if mibBuilder.loadTexts: error44_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error45_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 45, 1), Integer32()).setLabel("error45-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error45_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error45_time_stamp.setDescription('Contains some sort of time stamp indicating when error 45 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error45_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 45, 2), Integer32()).setLabel("error45-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error45_code.setStatus('optional')
if mibBuilder.loadTexts: error45_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error46_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 46, 1), Integer32()).setLabel("error46-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error46_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error46_time_stamp.setDescription('Contains some sort of time stamp indicating when error 46 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error46_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 46, 2), Integer32()).setLabel("error46-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error46_code.setStatus('optional')
if mibBuilder.loadTexts: error46_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error47_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 47, 1), Integer32()).setLabel("error47-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error47_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error47_time_stamp.setDescription('Contains some sort of time stamp indicating when error 47 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error47_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 47, 2), Integer32()).setLabel("error47-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error47_code.setStatus('optional')
if mibBuilder.loadTexts: error47_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error48_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 48, 1), Integer32()).setLabel("error48-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error48_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error48_time_stamp.setDescription('Contains some sort of time stamp indicating when error 48 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error48_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 48, 2), Integer32()).setLabel("error48-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error48_code.setStatus('optional')
if mibBuilder.loadTexts: error48_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error49_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 49, 1), Integer32()).setLabel("error49-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error49_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error49_time_stamp.setDescription('Contains some sort of time stamp indicating when error 49 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error49_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 49, 2), Integer32()).setLabel("error49-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error49_code.setStatus('optional')
if mibBuilder.loadTexts: error49_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
error50_time_stamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 50, 1), Integer32()).setLabel("error50-time-stamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: error50_time_stamp.setStatus('optional')
if mibBuilder.loadTexts: error50_time_stamp.setDescription('Contains some sort of time stamp indicating when error 50 occurred. Example time stamps include the actual time the error occurred (in seconds since Jan. 1, 1970), and the total engine page count. The device POS documents the meaning of the time stamp. Additional information: See ERROR1-TIME-STAMP.')
error50_code = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 11, 50, 2), Integer32()).setLabel("error50-code").setMaxAccess("readonly")
if mibBuilder.loadTexts: error50_code.setStatus('optional')
if mibBuilder.loadTexts: error50_code.setDescription('Contains a device specific error code. Each device POS should list what errors are logged to the error log and the meaning of each supported error code value. Additional information: See ERROR1-CODE.')
channelprinteralert = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelprinteralert.setStatus('optional')
if mibBuilder.loadTexts: channelprinteralert.setDescription("Identifies the values of prtAlertIndex, prtAlertSeverityLevel, prtAlertGroup, prtAlertGroupIndex, prtAlertLocation, and prtAlertCode for the latest critical evnet in the prtAlertTable. The binary string is defined as following: <table> ----------------------------------------------------- Field | Filed | Description Offset | Length | ----------------------------------------------------- 0 | 4 | the value of prtAlertIndex 4 | 4 | the value of prtAlertSeverityLevel 8 | 4 | the value of prtAlertGroup 12 | 4 | the value of prtAlertGroupIndex 16 | 4 | the value of prtAlertLocation 20 | 4 | the value of prtAlertCode ------------------------------------------------------ </table> Each field is in Big Endian style. Additional information: This object is used to pass alert information from the peripheral to the IIO card. The standard printer MIB contains the following description: printerAlert TRAP-TYPE ENTERPRISE printerV1Alert VARIABLES { prtAlertIndex, prtAlertSeverityLevel, prtAlertGroup, prtAlertGroupIndex, prtAlertLocation, prtAlertCode } DESCRIPTION 'This trap is sent whenever a critical event is added to the prtAlertTable.' In order to provide this information in the trap packet, the IIO card enables traps on channelPrinterAlert. When a critical alert is generated the peripheral fills the appropriate value into this object and sends it to the card. This object is a structure which contains 24 bytes of data. The structure is: struct structAlertInfo { sint32 prtAlertIndex; sint32 prtAlertSeverityLevel; sint32 prtAlertGroup; sint32 prtAlertGroupIndex; sint32 prtAlertLocation; sint32 prtAlertCode; } thisAlertData;")
install_date = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(13, 13)).setFixedLength(13)).setLabel("install-date").setMaxAccess("readwrite")
if mibBuilder.loadTexts: install_date.setStatus('optional')
if mibBuilder.loadTexts: install_date.setDescription("Identifies the date that the device was installed. The format of the string is 'YYYYMMDDHHmmZ'. Where: YYYY is the year. MM is the month (1-12). DD is the day (1-31). HH is the hour of the day (0-23). mm are the minutes (0-59). 'Z' designates Greenwich Mean Time; if 'Z' not specified, value is local time. Device POS must specify the conditions for setting this object. Additional information: Setting the SERVICE-PASSWORD object will enable setting this object.")
timestamp = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(15, 15)).setFixedLength(15)).setMaxAccess("writeonly")
if mibBuilder.loadTexts: timestamp.setStatus('optional')
if mibBuilder.loadTexts: timestamp.setDescription("Sets the printer's current time in (UTC). The format of the string is 'YYYYMMDDHHMMSS'. Where YYYY is the year. MM is the month (1-12) DD is the day of the month (1-31) HH is the hour of the day (0-23) MM is the minutes (0-59) SS is the seconds (0-59) Device POS must specify the conditions for setting this object.")
service_id = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 19), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setLabel("service-id").setMaxAccess("readwrite")
if mibBuilder.loadTexts: service_id.setStatus('optional')
if mibBuilder.loadTexts: service_id.setDescription("A read of this object will return the current SERVICE ID value in the printer. The format is 'YYDDD' where: YY = calendar year - 1990 DDD = (calendar month - 1) * 30 + (calendar day of the month or 30, if > 30) A write of this object will only succeed if the MANUFACTURING-CONTROL PML object has been set with the correct <<hidden>>. If the write operation is not allowed, this object will return an <genErr> status. Additional information: Setting the SERVICE-PASSWORD object will enable setting this object.")
show_address = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 20, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3))).clone(namedValues=NamedValues(("eOff", 1), ("eAuto", 3)))).setLabel("show-address").setMaxAccess("readwrite")
if mibBuilder.loadTexts: show_address.setStatus('optional')
if mibBuilder.loadTexts: show_address.setDescription('If this object is set to eAuto, and the device has an IP address, the IP address of the device will be shown with the READY message. If this object is set to eOff, the IP address will not be shown.')
serial_number = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setLabel("serial-number").setMaxAccess("readonly")
if mibBuilder.loadTexts: serial_number.setStatus('optional')
if mibBuilder.loadTexts: serial_number.setDescription('Identifies the serial number for the device. If the SERIAL-NUMBER object is set by the user, then setting the object does not need to be protected. If the SERIAL-NUMBER object is set at the factory, then the SERVICE-PASSWORD object must be set correctly before the SERIAL-NUMBER object is writable. If this is a writable object, the POS should indicate the maximum supported string length. If possible, encode the serial number in a symbol set (like Roman-8) that matches the ASCII character set and limit the characters used to ASCII characters. Additional information: This value IS AFFECTED BY NVRAM resets, it is set to the default value of XXXXXXXXXX, when a NVRAM init is done.')
fw_rom_datecode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 5), DisplayString()).setLabel("fw-rom-datecode").setMaxAccess("readonly")
if mibBuilder.loadTexts: fw_rom_datecode.setStatus('optional')
if mibBuilder.loadTexts: fw_rom_datecode.setDescription('Identifies the base system firmware date code. The date code will be encoded in the yyyymmdd format. There may be several versions of the base system firmware. The date code associated with the version of the base system firmware that is being used is reported. There may be other date code objects for other specific modules such as fonts, localization modules, etc.; these other datecode objects are device specific.')
fw_rom_revision = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 6), DisplayString()).setLabel("fw-rom-revision").setMaxAccess("readonly")
if mibBuilder.loadTexts: fw_rom_revision.setStatus('optional')
if mibBuilder.loadTexts: fw_rom_revision.setDescription('This identifies the system code firmware ROM revision code. The format for a revision is major_revision.minor_revision. There may be other ROM revision code objects for other specific ROMs such as font ROMs, localization ROMs, etc; these other ROM revision code objects are device specific.')
device_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setLabel("device-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: device_name.setStatus('optional')
if mibBuilder.loadTexts: device_name.setDescription('User defined device name. The POS should indicate the maximum supported string length. If the user entered string is too long, the device will store as much as possible and will return the <OKNearestLegal ValueSubstituted>.Additional information: The maximum supported string length is 32 characters. If the user entered string is too long, the device will store the first 32 characters and will return the <noError> status.')
device_location = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 11), DisplayString()).setLabel("device-location").setMaxAccess("readwrite")
if mibBuilder.loadTexts: device_location.setStatus('optional')
if mibBuilder.loadTexts: device_location.setDescription('User defined device location. The POS should indicate the maximum supported string length. If the user entered string is too long, the device will store as much as possible and will return the <OKNearestLegal ValueSubstituted>.Additional information: The maximum supported string length is 16 characters. If the user entered string is too long, the device will store the first 16 characters and will return the <noError> status.')
asset_number = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 12), DisplayString()).setLabel("asset-number").setMaxAccess("readwrite")
if mibBuilder.loadTexts: asset_number.setStatus('optional')
if mibBuilder.loadTexts: asset_number.setDescription('User defined asset number. The POS should indicate the maximum supported string length. If the user entered string is too long, the device will store as much as possible and will return the <OKNearestLegal ValueSubstituted>.Additional information: The maximum supported string length is 8 characters. If the user entered string is too long, the device will store the first 8 characters and will return the <noError> status.')
default_copies = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 4), Integer32()).setLabel("default-copies").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_copies.setStatus('optional')
if mibBuilder.loadTexts: default_copies.setDescription('Returns or changes default copies. Default copies is the default values used by the PDL to control the number of copies of each page in the print job that are printed. The print job can override this value. The list of supported values should be documented in the device POS. Additional information: The supported values are 1 through 32000. Setting to an unsupported value causes the printer to substitute in a snap value and to return <noError> status. The snaps are as follows: <1 snaps to 1 >999 snaps to 32000')
default_lines_per_page = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 11), Integer32()).setLabel("default-lines-per-page").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_lines_per_page.setStatus('optional')
if mibBuilder.loadTexts: default_lines_per_page.setDescription('Returns or changes the default number of lines per page. The POS indicates the supported values. An unsupported value causes the printer to use the closest supported value, causing the printer to return the <noError>. Additional information: The supported values in LaserJet 8150 are 5 to 128. Setting to an unsupported value causes the the printer to substitute in a snap value and to return <noError> status. The snap values are as follow: <5 snaps to 5 >128 snaps to 128')
default_vmi = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 12), Integer32()).setLabel("default-vmi").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_vmi.setStatus('optional')
if mibBuilder.loadTexts: default_vmi.setDescription('Returns or changes the default vertical motion index. The unit of measure for VMI is centipoints per line. The POS indicates the supported values. An unsupported value causes the printer to use the closest supported value, causing the printer to return <noError>.')
default_media_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 10, 15, 17, 18, 25, 26, 45, 72, 80, 81, 90, 91, 100, 101))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eFoolscap", 10), ("eStatement", 15), ("eROC16K", 17), ("eJISExecutive", 18), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eJISB5", 45), ("eJapanesePostcardDouble", 72), ("eMonarch", 80), ("eCommercial10", 81), ("eInternationalDL", 90), ("eInternationalC5", 91), ("eInternationalB5", 100), ("eCustom", 101)))).setLabel("default-media-size").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_media_size.setStatus('optional')
if mibBuilder.loadTexts: default_media_size.setDescription("This indicates the default media size. A write of an unsupported value causes an <ErrorInvalidOrUnsupported Value>. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. (for a full list of media size enums see the end of this file) ")
cold_reset_media_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 26))).clone(namedValues=NamedValues(("eUSLetter", 2), ("eISOandJISA4", 26)))).setLabel("cold-reset-media-size").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cold_reset_media_size.setStatus('optional')
if mibBuilder.loadTexts: cold_reset_media_size.setDescription('Returns or sets the media size that is used as the DEFAULT-MEDIA-SIZE when a cold reset occurs.')
reprint = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("eOff", 1), ("eOn", 2), ("eAuto", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: reprint.setStatus('optional')
if mibBuilder.loadTexts: reprint.setDescription('Returns or changes the reprint (jam recovery) setting. If eOn, then the device will reprint pages jammed pages. If eOff, the device will not attempt to reprint jammed pages. If eAuto, a device dependent algorithm (which should be documented in the POS) will be used to determine if the page gets reprinted. This object controls all PDLs, unless a specific PDL supports its own reprint control mechanism. To date, only PostScript has a PDL reprint control mechanism.')
tray_prompt = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eDoNotDisplay", 1), ("eDisplay", 2)))).setLabel("tray-prompt").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray_prompt.setStatus('optional')
if mibBuilder.loadTexts: tray_prompt.setDescription("If this object is set to eDisplay, whenever a paper tray is opened, refilled, and closed in a printer, the end user is prompted with a message 'To change size or type press check' on the control panel. If this object is set to eDoNotDisplay, then the end user will not be prompted with this message whenever a paper tray is opened, refilled, and closed in a printer. Additional information: Whenever a paper tray is opened and closed, the user is provided with two options Display and Do Not Display, through the menu item Size/Type Prompt.On selecting Display he chooses to get prompted with the message to change the size or type of paper. Whereas he is not prompted with this message in case he chooses Do Not Display.")
pcl_total_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 3, 5), Integer32()).setLabel("pcl-total-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: pcl_total_page_count.setStatus('optional')
if mibBuilder.loadTexts: pcl_total_page_count.setDescription('Total number of PCL pages printed by the device. Additional information: In LaserJet 8150 the PCL page count is kept in NVRAM, and the NVRAM value is updated at least every 10 pages. NOTE: The value returned by this object will be incremented every page but if power is lost between NVRAM updates, up to 9 pages of the page count may be lost. The page count counter will be reset to zero after 16,777,215 (2^24-1) pages. The page count is incremented when a sheet of media is pulled from an input tray. A duplex printed sheet will cause this counter to be incremented by two.')
pcl_default_font_height = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 3, 13), Integer32()).setLabel("pcl-default-font-height").setMaxAccess("readwrite")
if mibBuilder.loadTexts: pcl_default_font_height.setStatus('optional')
if mibBuilder.loadTexts: pcl_default_font_height.setDescription("Returns or changes the default PCL height. Height is an approximate measure of the body of the type in centipoints. A centipoint is 1/7200 inch. Height applies only to proportional fonts. Point size, in points, can be converted to font height, in centipoints, by multiplying the point size by 100. The POS indicates the supported values. An unsupported value causes the printer to use the closest supported value, causing the printer to return <noError>. ''Closest'' means the smallest absolute difference. Additional information: Supported values range from 400 to 99975, in increments of 25 units. Setting to an unsupported value causes the printer to substitute in a snap value and to return <noError> status. The snap values are as follow: <=400 snaps to 400 >=99975 snaps to 99975 Unsupported values in the range 400 to 99975 snap DOWN to the previous supported value (i.e 25293 snaps to 25275 or 75038 snaps to 75025).")
pcl_default_font_source = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 3, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 11, 12, 13, 14))).clone(namedValues=NamedValues(("eInternal", 1), ("ePermanentSoft", 2), ("eRomSimm2", 11), ("eRomSimm3", 12), ("eRomSimm4", 13), ("eRomSimm5", 14)))).setLabel("pcl-default-font-source").setMaxAccess("readwrite")
if mibBuilder.loadTexts: pcl_default_font_source.setStatus('optional')
if mibBuilder.loadTexts: pcl_default_font_source.setDescription('Returns or changes the value of the default font source variable in NVRAM.')
pcl_default_font_number = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 3, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setLabel("pcl-default-font-number").setMaxAccess("readwrite")
if mibBuilder.loadTexts: pcl_default_font_number.setStatus('optional')
if mibBuilder.loadTexts: pcl_default_font_number.setDescription('Returns or changes the value of the default font number variable in NVRAM. Writing an unsupported value causes the printer to generate an <badValue>. Additional information: Valid numbers or 0 - 255, but only font numbers that are included in the PCL FONT LIST are selectable.')
pcl_default_font_width = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 3, 16), Integer32()).setLabel("pcl-default-font-width").setMaxAccess("readwrite")
if mibBuilder.loadTexts: pcl_default_font_width.setStatus('optional')
if mibBuilder.loadTexts: pcl_default_font_width.setDescription("Returns or changes the default PCL font width. Width is expressed as the width of a character in centipoints. A centipoint is 1/7200 inch. Width applies only to fixed space fonts. Pitch, in character per inch, can be converted to font width, in centipoints, by dividing 7200 by the pitch. The POS indicates the supported values. An unsupported value causes the printer to use the closest supported value, causing the printer to return <noError>. ''Closest'' means the smallest absolute difference. Additional information: Setting to an unsupported value causes the printer to substitue in a snap value (listed below) and return <noError> status. The snap values for pitch are as follow: <=44 snaps to 44 >=9999 snaps to 9999")
postscript_total_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 4, 5), Integer32()).setLabel("postscript-total-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: postscript_total_page_count.setStatus('optional')
if mibBuilder.loadTexts: postscript_total_page_count.setDescription('Total number of PostScript pages printed by the device. Additional information: This object is only supported if the PostScript option is installed. In LaserJet 8150 the count is kept in NVRAM, and the NVRAM value is updated at least every 10 sheets. NOTE: The value returned by this object will be incremented every sheet but if power is lost between NVRAM updates up to 9 sheets of the count may be lost. The counter will be reset to zero after 16,777,215 (2^24-1) pages. ')
postscript_print_errors = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 4, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eOff", 1), ("eOn", 2)))).setLabel("postscript-print-errors").setMaxAccess("readwrite")
if mibBuilder.loadTexts: postscript_print_errors.setStatus('optional')
if mibBuilder.loadTexts: postscript_print_errors.setDescription('Returns or changes the value of the print PostScript errors setting. If eOn, PostScript prints an error page showing the error encountered and the stack at the time of the error. Additional information: This object is only supported if the PostScript option is installed.')
collated_originals_support = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 42), OctetString()).setLabel("collated-originals-support").setMaxAccess("readonly")
if mibBuilder.loadTexts: collated_originals_support.setStatus('optional')
if mibBuilder.loadTexts: collated_originals_support.setDescription("Indicates that the printer can create multiple, collated 'originals' of a job and shows the level of support for this capability. cCollatedOriginals - The printer can create multiple collated originals, or 'mopies'. This is the base functionality required by the other collection items. cProofAndHold - The printer saves a copy of the job while printing the first mopy. Later, this held job can be released and the rest of the mopies will print. cSecurityPrint - The printer spools the job and holds the job until the user releases the job by entering a password at the printer's control panel. cAutoHighlight - The printer prints the job a number of times with a mail distribution list prepended to each job. A different mailing address is highlighted on each mopy. cCollatedAtSpeed - The printer can create multiple collated copies where all copies after the original are printed at engine speed. Additional information: Indicates that the printer can create multiple, collated originals of a job and shows the level of support for this capability. cCollatedOriginals - The printer can create multiple collated originals, or mopies. This is the base functionality required by the other collection items. Without disk, this object will not exist. This will be set at boot-up initialization. If the disk fails, in any way, then the disk error functionality will handle the situation. A color printer (LaserJet 4550) that only has a RAMDISK installed is treated like there is no disk and therefore this object will not exist. The value of this object will be: !cCollatedOriginals - Only FLASH installed cCollatedOriginals - IDE is installed combined with/without FLASH cCollatedOriginals - RAMDISK is On combined with/without FLASH")
host_application_available_memory = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 59), Integer32()).setLabel("host-application-available-memory").setMaxAccess("readonly")
if mibBuilder.loadTexts: host_application_available_memory.setStatus('optional')
if mibBuilder.loadTexts: host_application_available_memory.setDescription('Returns the amount of memory, in bytes, that the device has available for job-related processing or resources. Also known as driver work space (DWS) memory.')
socket_ping_job_events_version = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 7, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setLabel("socket-ping-job-events-version").setMaxAccess("readonly")
if mibBuilder.loadTexts: socket_ping_job_events_version.setStatus('optional')
if mibBuilder.loadTexts: socket_ping_job_events_version.setDescription("This object reports the current version of the Socket Ping Job Events capability. The version number is returned as a string in the format MAJOR.MINOR.SUB version. (eg. 1.0.1) If socket ping is not supported by this printer then the object is either not implemented or it returns a '0'.")
job_info_change_id = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16)).setLabel("job-info-change-id").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_change_id.setStatus('optional')
if mibBuilder.loadTexts: job_info_change_id.setDescription('Returns the current value of an object in the job-info sub-tree whose value has changed. Define the object identifier for the object whose value has changed as job-info.required-field.optional-field.job-id. job-info represents the OID prefix of all objects in the job-info sub-tree. Required-field represents the OID field value that follows the job-info prefix. Since some objects in the job-info sub-tree have two OID fields between the job-info prefix and the job-id, the optional-field represents the OID field between the required-field and the job-id field, if present. Using this definition, the format for the JOB-INFO-CHANGE-ID binary value can be described as follows: Bytes 0-3 : required-field Bytes 4-7 : optional-field, or all zeros if there is no optional field Bytes 8-11: Job ID. Bytes 12-n : The value of the object job-info. required-field.optional-field.job-id. All multi-byte values stored in Motorola (big-endian) format, where the most significant byte occurs first. Additional information: LaserJet 8100 will use this object to report changes to the job-info-pages-printed and job-info-state objects. The required-field (bytes 0 through 3) will designate whether the change to be reported involves the pages printed (13) or job state (15). The optional-field (bytes 4 through 7) will always be zeroes. The value-field (bytes 12 through 15) will contain the new value for pages printed or job state cast to a 32-bit integer. Note: It is possible that traps generated by this object have the same value. This is caused by the architecture of LaserJet 8100. In the LaserJet 8100 System, it is highly probable that the object value will change so rapidly that when the trap is processed, PML will read the same value twice. This is timing related and will generally be seen at the end of a job.')
hold_job_timeout = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 1, 10), Integer32()).setLabel("hold-job-timeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: hold_job_timeout.setStatus('optional')
if mibBuilder.loadTexts: hold_job_timeout.setDescription('The time, in minutes, that the printer will wait before automatically deleting a held job. This allows the printer to automatically clean up jobs that have been forgotten (held but never released). Additional information: This only applies to temporary held jobs, i.e., HOLD=ON or PROOF. This is a global timer that only affects the jobs that are sent after it is set. A value of 0 means never delete the jobs. Setting it to an unsupported value causes the printer to substitute a value (listed below) and to return <noError> status. LaserJet 4600 supports values of 0, 60, 240, 1440, and 10080 The values are as follows: <=0 snap to 0 (disabled) >=1 and <=150 snap to 60 (60 minutes) >=151 and <=840 snap to 240 (4 hours) >=841 and <=2880 snap to 1440 (1 day) >=2881 snap to 10080 (1 week)')
current_job_parsing_id = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setLabel("current-job-parsing-id").setMaxAccess("readonly")
if mibBuilder.loadTexts: current_job_parsing_id.setStatus('optional')
if mibBuilder.loadTexts: current_job_parsing_id.setDescription("Contains the printer assigned job identification for the job currently being processed by the processing PDL sub-system. The job ID is a monotonically increasing number. The job ID may be reset to zero at power-up and may roll over to zero after reaching some maximum value. Additional information: A value of -1 is returned when the printer is not parsing a job. When data for a new job is detected, this object is updated (the job may or may not turn out to be a Null Job); however, the trap does not occur until the printer determines that it is not a Null Job. (A job is considered to be a 'null job' if it has no name or job attribute, causes no pages to be printed, and consumes MIN_JOB_SIZE (9) or fewer bytes.) A trapped value will differ from the previous value by one or more. See the Job Boundary ERS for details on what constitutes a job boundary. Some job-info- objects are created when the first data bytes are received. If the printer determines that the job is a Null Job, the job-info- objects related to the Null Job are deleted. LaserJet 8100 retains the job-info- objects for the MAX_JOBS_IN_LIST (32) most recent jobs that are not Null Jobs. The first job received after power-up will have job ID 1, and the job ID will increment to 2,147,483,647 before rolling to zero. To distinguish whether a power cycle or a rollover causes a reduction in the job ID value, the object prtGeneralConfigChanges can be watched and if it increments at the same time as the drop is observed in the value of CURRENT-JOB-PARSING-ID, then a power cycle is the most likely cause.")
job_info_name1 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 1), DisplayString()).setLabel("job-info-name1").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_name1.setStatus('optional')
if mibBuilder.loadTexts: job_info_name1.setDescription("Contains the first part of this print job's name. The last OID field for this dynamic object contains the job ID. Additional information: The job name is the string specified by the NAME= parameter of the @PJL JOB command which allows a maximum of 80 characters. The first 40 characters are in this object and the second 40 are in job-info-name2. The symbol set is Roman-8. When jobs are nested, the value assigned to this object is the name provided by the most recent @PJL JOB NAME= command. If no name is provided, a null string is returned.")
job_info_name2 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 2), DisplayString()).setLabel("job-info-name2").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_name2.setStatus('optional')
if mibBuilder.loadTexts: job_info_name2.setDescription("Contains the second part of this print job's name. The last OID field for this dynamic object contains the job ID. Additional information: See job-info-name1.")
job_info_stage = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 10), OctetString()).setLabel("job-info-stage").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_stage.setStatus('optional')
if mibBuilder.loadTexts: job_info_stage.setDescription("Indicates what printer sub-systems are currently processing this print job. The last OID field for this dynamic object contains the job ID. cSourceSubsystem - some of the job is in the printer I/O subsystem. cProcessingSubsystem - some of the job is in the printer imaging processing subsystem. cDestinationSubsystem - some of the job is being printed. Additional information: For LaserJet 8100, cSourceSubsystem and cProcessingSubsystem will always be set and cleared together. They are set when the beginning of the job is detected, and they are cleared when the end of the job is parsed. A non-printing job will never set cDestinationSubsystem. When a page is ready to be printed (the intermediate has been built and the page is `closed' by the personality), cDestinationSubsystem will be set even if a page of a previous job is currently being printed. cDestinationSubsystem remains set until the last page of the job has finished printing. If a page requires extensive processing and allows all the previous pages of a job to complete printing, cDestinationSubsystem will remain set until the last page of the job has finished printing.")
job_info_io_source = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 11), Integer32()).setLabel("job-info-io-source").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_io_source.setStatus('optional')
if mibBuilder.loadTexts: job_info_io_source.setDescription('Indicates which I/O source, the print job was received over. The value maps to port numbering scheme supported in the DEVICE/SOURCE-SUBSYSTEM/IO/PORTS sub-tree.')
job_info_pages_processed = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 12), Integer32()).setLabel("job-info-pages-processed").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_pages_processed.setStatus('optional')
if mibBuilder.loadTexts: job_info_pages_processed.setDescription("Indicates the number of pages processed by the processing subsystem. If neither the cSourceSubsystem or the cProcessingSubsystem items are in the associated JOB-INFO-STAGE object, then this object contains the total number of pages processed for this job. The last OID field for this dynamic object contains the job ID. Additional information: This object is incremented by one when a page is processed (`closed' by the personality), regardless of the number of pages that are printed as a result of the single page processed. In other words, it does not matter how many identical copies of a page are printed or no pages may be printed if operating in silent run mode, this count is incremented exactly once. A page is considered processed when all of the input data has been processed for a page (that is, when the intermediate has been produced, and the personality has `closed' the page. At this time, the image of the page is not necessarily completely formed.)")
job_info_pages_printed = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 13), Integer32()).setLabel("job-info-pages-printed").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_pages_printed.setStatus('optional')
if mibBuilder.loadTexts: job_info_pages_printed.setDescription('Indicates the number of pages printed by the destination subsystem. If none of the cSourceSubsystem, cProcessingSubsystem or cDestinationSubsystem items are in the associated JOB-INFO-STAGE object, then this object contains the total number of pages printed for this job. This value may increase by two each time for duplex jobs. The last OID field for this dynamic object contains the job ID. Additional information: If multiple copies of a page are printed, each copy is counted individually.')
job_info_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 14), Integer32()).setLabel("job-info-size").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_size.setStatus('optional')
if mibBuilder.loadTexts: job_info_size.setDescription('Indicates the number of bytes of data processed by the processing subsystem. If neither of the cSourceSubsystem or cProcessingSubsystem items are in the associated JOB-INFO-STAGE object, then this object contains the size of this job, in bytes. The last OID field for this dynamic object contains the job ID. Additional information: This count may be incremented by values other than one as blocks of data are processed; blocks of data (which may be as large as 2K bytes) will be processed in a varying amount of time. During the processing of a job and even when a job completes, an exact count of the number of I/O bytes processed by the job is not necessarily to be expected.')
job_info_state = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3, 4, 5, 7, 10, 11, 12, 13))).clone(namedValues=NamedValues(("eAborted", 3), ("eWaitingForResources", 4), ("ePrinted", 5), ("eTerminating", 7), ("eCancelled", 10), ("eProcessing", 11), ("eScanning", 12), ("eSending", 13)))).setLabel("job-info-state").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_state.setStatus('optional')
if mibBuilder.loadTexts: job_info_state.setDescription('Indicates the state of the job. The last OID field for this dynamic object contains the job ID. eAborted - the print job was aborted. eWaitingForResources - the print job requires resources that are not currently available. Example resources that can cause the job to wait include the print engine or PDL processor being unavailable. The print engine could be unavailable due to paper out, paper jam, staple out, stapler jam, marking agent low, output bin full, etc. The PDL processor could be unavailable due to an off-line condition. Each printer specific object specification should state which conditions cause a job to be waiting for resources and also state which objects can be retrieved by an application to determine the exact cause of a resource being unavailable. ePrinted - the job has printed. The related JOB-INFO- OUTCOME object indicates if any problems were encountered while the job was processed. eRetained - the job can be reprinted. eTerminating - the job was aborted or cancelled and is currently is terminating. eInterrupted - the job has been interrupted. The job can be continued. ePaused - the job has been paused. The job can be continuted. eCancelled - the job has been cancelled. eProcessing - the job is currently being printed normally. ')
job_info_outcome = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3))).clone(namedValues=NamedValues(("eOk", 3)))).setLabel("job-info-outcome").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_outcome.setStatus('optional')
if mibBuilder.loadTexts: job_info_outcome.setDescription('Indicates if any warning or error conditions were encountered while processing the assoicated job. The last OID field for this dynamic object contains the job ID. Additional information: In LaserJet 8100, warnings and errors are not recorded in this object. Although no meaningful information can be obtained from this object, it is kept around for the compatibility needs of existing software.')
job_info_outbins_used = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 20), OctetString()).setLabel("job-info-outbins-used").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_outbins_used.setStatus('optional')
if mibBuilder.loadTexts: job_info_outbins_used.setDescription("Indicates which output bins this job has delivered printed pages to. The last OID field for this dynamic object contains the job ID. Additional information: The bins designated by this collection include the printer's own output bins as well as the `logical output bins' associated with any attached external paper handling devices. (The mapping of the physical output bins of an external paper handling device to its `logical output bins' depends on the usage mode of the device. For instance, in `mailbox mode', there is one-to-one correspondence of `logical output bins' to physical bins, whereas in `stacker mode', one `logical output bin' may map to several physical bins.) Although LaserJet 4100 and LaserJet 4550 both come with two output bins -- a face-down bin on the top of the device and a face-up bin at the back of the device -- firmware will not provide a means of selecting between the two, nor will it report which of the two gets used. For this reason, bit 0 (cOutbin1) of this collection is being used to designate both of these bins together.")
job_info_physical_outbins_used = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 22), OctetString()).setLabel("job-info-physical-outbins-used").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_physical_outbins_used.setStatus('optional')
if mibBuilder.loadTexts: job_info_physical_outbins_used.setDescription("Indicates which physical output bins this job has delivered printed pages to. The last OID field for this dynamic object contains the job ID. Additional information: The output bins designated by the bits of this collection include the standard output bin(s) plus the physical bins of attached external paper handling devices. The configuration (if any) of external paper handling devices will determine the mapping of individual bits in this collection to the physical bins of the devices. For LaserJet 4100 and LaserJet 4550, the cOutbin1 is used for Face Down bin, cOutbin2 is used for Face Up bin, and 3-15 are used for the 13 optional output bins (12 plus an extension to the Face Up bin) for backwards compatibility with LaserJet 8000/LaserJet 8100 and forwards compatibility with LaserJet 8150. However, since they cannot distinguish output going to the Face Up or Face Down bin in the firmware because the diverter can only be moved manually, bit 0 (cOutbin1) is being used to designate both of these bits together. If an external paper handling output device is connected, then bit 2 (cOutbin3) will represent the device's first output bin (and so on).")
job_info_attr_1 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-1").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_1.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_1.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. Attribute objects are saved sequentially, starting with 1, after the start of a job. If more attributes are set than there are objects to store them, the excess JOBATTR values are ignored. If the corresponding SET JOBATTR= command has not been received when a get is done for this object, a status of <noSuchName> will be returned.')
job_info_attr_2 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-2").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_2.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_2.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_3 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-3").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_3.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_3.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_4 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-4").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_4.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_4.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_5 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-5").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_5.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_5.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_6 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-6").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_6.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_6.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_7 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-7").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_7.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_7.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_8 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-8").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_8.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_8.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_9 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-9").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_9.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_9.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_10 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-10").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_10.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_10.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_11 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-11").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_11.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_11.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_12 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-12").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_12.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_12.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_13 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-13").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_13.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_13.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_14 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-14").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_14.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_14.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_15 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-15").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_15.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_15.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_attr_16 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 23, 16), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setLabel("job-info-attr-16").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_attr_16.setStatus('optional')
if mibBuilder.loadTexts: job_info_attr_16.setDescription('Returns the value that was set in PJL via the SET JOBATTR= command. See JOB-INFO-ATTR-1 for details.')
job_info_requested_originals = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 24), Integer32()).setLabel("job-info-requested-originals").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_requested_originals.setStatus('optional')
if mibBuilder.loadTexts: job_info_requested_originals.setDescription('The number of requested collated copies. The value was supplied with the job via PJL QTY variable, the PostScript Collate and NumCopies entries in the page device dictionary, or via some other mechansim.')
job_info_page_count_current_original = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 25), Integer32()).setLabel("job-info-page-count-current-original").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_page_count_current_original.setStatus('optional')
if mibBuilder.loadTexts: job_info_page_count_current_original.setDescription('The page number being printed within the current copy of a collated multi-copy job. This value changes when the job-info-pages-printed changes. The job-info-pages-printed indicates the total number of pages printed in the job, while this object indicates the number of pages printed for this copy of a collated multi-copy job. Additional information: The number of pages in the current copy of a collated multi-copy job which have been completely printed and safely delivered to the output bin.')
job_info_pages_in_original = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 26), Integer32()).setLabel("job-info-pages-in-original").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_pages_in_original.setStatus('optional')
if mibBuilder.loadTexts: job_info_pages_in_original.setDescription('Number of pages in a single copy of a collated multi-copy job.')
job_info_printed_originals = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 27), Integer32()).setLabel("job-info-printed-originals").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_printed_originals.setStatus('optional')
if mibBuilder.loadTexts: job_info_printed_originals.setDescription('Number of collated copies completely printed and delivered to the output bin at time of query.')
job_info_accounting_media_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 11, 17, 18, 19, 25, 26, 27, 45, 46, 65, 72, 80, 81, 90, 91, 100, 101, 258, 282, 32767))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eLedger", 11), ("eROC16K", 17), ("eJISExecutive", 18), ("eROC8K", 19), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eISOandJISA3", 27), ("eJISB5", 45), ("eJISB4", 46), ("eISOB5", 65), ("eJapansePostcardDouble", 72), ("eMonarch", 80), ("eCommercial10", 81), ("eInternationalDL", 90), ("eInternationalC5", 91), ("eInternationalB5", 100), ("eCustom", 101), ("eUSLetterR", 258), ("eISOandJISA4R", 282), ("eUnknownMediaSize", 32767)))).setLabel("job-info-accounting-media-size").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_media_size.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_media_size.setDescription('Contains the media size of the printed job. The media size of the first page will decide the media size of the entire job. The return value of this object will only be valid when the printer finishes printing the entire job.')
job_info_accounting_media_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20))).clone(namedValues=NamedValues(("eUnknownMedia", 1), ("eStandardType", 2), ("ePreprinted", 3), ("eBond", 4), ("eLetterhead", 5), ("eTransparency", 7), ("eLabels", 8), ("eRecycled", 9), ("eColored", 10), ("eCardStock", 11), ("eRough", 12), ("ePrepunched", 13), ("eHeavy", 14), ("eUserType1", 16), ("eUserType2", 17), ("eUserType3", 18), ("eUserType4", 19), ("eUserType5", 20)))).setLabel("job-info-accounting-media-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_media_type.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_media_type.setDescription('Contains the media type of the printed job. The media type of the first page will decide the media type of the entire job. The return value of this object will only be valid when the printer finishes printing the entire job.')
job_info_accounting_finishing_options = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("eNoFinish", 1), ("eOffset", 2), ("ePunch", 3), ("eStapler", 4), ("eFinisher", 5)))).setLabel("job-info-accounting-finishing-options").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_finishing_options.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_finishing_options.setDescription('Contains the finishing option used on the printed job. The finishing option specified for the first page will decide the finishing option of the entire job. The return value of this object will only be valid when the printer finishes printing the entire job.')
job_info_accounting_media_simplex_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 4), Integer32()).setLabel("job-info-accounting-media-simplex-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_media_simplex_count.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_media_simplex_count.setDescription('Contains the total number of simplex pages printed in a particular job.')
job_info_accounting_media_duplex_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 5), Integer32()).setLabel("job-info-accounting-media-duplex-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_media_duplex_count.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_media_duplex_count.setDescription('Contains the total number of duplex pages printed in a particular job.')
job_info_accounting_grayscale_impression_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 6), Integer32()).setLabel("job-info-accounting-grayscale-impression-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_grayscale_impression_count.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_grayscale_impression_count.setDescription('Contains the total number of monochrome pages printed in a particular job.')
job_info_accounting_color_impression_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 7), Integer32()).setLabel("job-info-accounting-color-impression-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_color_impression_count.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_color_impression_count.setDescription('Contains the total number of color pages printed in a particular job.')
job_info_accounting_black_dots = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 8), Integer32()).setLabel("job-info-accounting-black-dots").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_black_dots.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_black_dots.setDescription('Contains the total number of black pixels used in a particular job. Additional information: This object reports kilodots. (A kilodot is 1000 dots.)')
job_info_accounting_yellow_dots = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 9), Integer32()).setLabel("job-info-accounting-yellow-dots").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_yellow_dots.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_yellow_dots.setDescription('Contains the total number of yellow pixels used in a particular job. Additional information: This object reports kilodots. (A kilodot is 1000 dots.)')
job_info_accounting_cyan_dots = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 10), Integer32()).setLabel("job-info-accounting-cyan-dots").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_cyan_dots.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_cyan_dots.setDescription('Contains the total number of cyan pixels used in a particular job. Additional information: This object reports kilodots. (A kilodot is 1000 dots.)')
job_info_accounting_magenta_dots = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 11), Integer32()).setLabel("job-info-accounting-magenta-dots").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_magenta_dots.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_magenta_dots.setDescription('Contains the total number of magenta pixels used in a particular job. Additional information: This object reports kilodots. (A kilodot is 1000 dots.)')
job_info_accounting_job_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 5, 28, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1000))).clone(namedValues=NamedValues(("ePrintJob", 1), ("eIPPJob", 2), ("eCopyJob", 3), ("eCopyInterruptJob", 4), ("eJetSendJob", 5), ("eInternalPage", 6), ("eCleaningPage", 7), ("eAutoCleaningPage", 8), ("eDigitalSendJob", 9), ("eWebPrintJob", 10), ("eFAXPrintJob", 11), ("eRetrievedJob", 12), ("ePhotoCardPrintJob", 13), ("eUnknownJob", 1000)))).setLabel("job-info-accounting-job-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: job_info_accounting_job_type.setStatus('optional')
if mibBuilder.loadTexts: job_info_accounting_job_type.setDescription('Keeps track of what type of job is processed. ')
held_job_user_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setLabel("held-job-user-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: held_job_user_name.setStatus('optional')
if mibBuilder.loadTexts: held_job_user_name.setDescription('User name that is obtained by the driver through some internal method or from user input. Additional information: The following is a general description for all the JOB-MANAGEMENT objects described below. These object describe the attributes of the dynamic list containing all the stored jobs on the disk available for printing or deleting via the job-management feature. The jobs on this list are not deleted from the disk unless explicitly specified by the user. A print job may be specified for retention by PJL commands in the data stream. Following is a list of the PJL commands (i.e. these comments describe the PJL implementation and in some cases do not reflect the PML implementation.): @PJL SET HOLD=OFF|ON|PROOF|STORE|PRINT GENERAL DESCRIPTION: This variable specifies the retention classification of the job. The values indicate whether the job is printed immediately and/or stored. OFF: The job is printed but not retained on disk after printing. This is the default value. ON: This setting may be thought of as free proof-and-hold. The requested number of copies will be printed and the job will be temporarily stored on disk. The job will then be available for printing additional copies through the control panel and through PML. There will be a limit to the number of these temporary jobs that may be stored, and when the limit is exceeded the oldest job will be removed from the disk. PROOF: One copy of the job is printed and remaining copies are stored on disk. The job is then available to select for printing via the control panel menus or PML. The job will be deleted from the disk when the disk space is needed for another proof and hold job but only after the additional copies have been printed. It will also be deleted when the user sends down another proof and hold job with the same job name, or the user explicitly deletes the job. STORE: The job is not printed immediately but is retained on disk. The job is available to select for printing via the control panel DEFAULT VALUE: OFF The value will be stored in RAM only, not in NVRAM. The legal PJL commands are SET and INQUIRE. DEFAULT is not allowed. The variable will appear in the PJL INFO VARIABLES list. @PJL SET USERNAME=<80 bytes> GENERAL DESCRIPTION: Eighty-character user name that is obtained by the driver through some internal method or from user input. If the job stream does not contain a USERNAME the default value will be NO USER NAME. The driver is responsible for determining the size of the printers control panel and sending a string of appropriate length. DEFAULT VALUE: NO USER NAME The value will be stored in RAM only, not in NVRAM. The legal PJL commands are SET and, INQUIRE. DEFAULT is not allowed. The variable will appear in the PJL INFO VARIABLES list. @PJL SET JOBNAME=<80 bytes> GENERAL DESCRIPTION: Eighty-character job name that may be generated by the driver or obtained from user input. This value may be used in conjunction with the USERNAME to select a job from the front panel. If the job stream does not contain a JOBNAME, the printer will assume no job name; each subsequent job that is sent down by the same user would replace the users last job on disk. The driver is responsible for determining the size of the printers control panel and sending a string of appropriate length. NOTE: The limit of 80 bytes is a PJL limit. The limit for PML will be 40 bytes. DEFAULT VALUE: NULL STRING The value will be stored in RAM only, not in NVRAM. The legal PJL commands are SET and INQUIRE. DEFAULT is not allowed. The variable will appear in the PJL INFO VARIABLES list. @PJL SET HOLDTYPE=PUBLIC|PRIVATE GENERAL DESCRIPTION: This variable specifies the privacy level of the job. PUBLIC: The job does not require a PIN in order to be released for printing. This is the default value. PRIVATE: The job requires a PIN in order to be released for printing. The PIN is specified by the HOLDKEY variable. If the HOLDTYPE is set to PRIVATE, a HOLDKEY value must be specified. If no HOLDKEY is specified, the job will be considered PUBLIC. DEFAULT VALUE: PUBLIC The value will be stored in RAM only, not in NVRAM. The legal PJL commands are SET and INQUIRE. DEFAULT is not allowed. The variable will appear in the PJL INFO VARIABLES list. @PJL SET HOLDKEY=4 digits, 0000...9999 GENERAL DESCRIPTION: A 4-digit string, each digit 0-9, that is specified in the job stream and then required to be entered in order to release the job for printing. If a HOLDTYPE is PRIVATE, a HOLDKEY value must be specified. If no HOLDKEY value is specified, a job will be considered PUBLIC. DEFAULT VALUE: NULL STRING The value will be stored in RAM only, not in NVRAM. The legal PJL commands are SET and INQUIRE. DEFAULT is not allowed. The variable will appear in the PJL INFO VARIABLES list. The format for the OID is as follows: 3.4.1.6.x.y <-----> | | | | \\ / | job-id in the system / \\ PML_JOB_MGNT_ROOT_OID \\ job attribute 1..6 x values are 1..6: 1) user-name: user name that is obtained by the driver through some internal method or from user input. 2) job-name: job name may be generated by the driver or obtained from user input. 3) hold: indicates the retention classification of the job. The values indicate whether the job is printed immediately and/or stored. There are 4 options: OFF: The job is printed but not retained on the disk. ON: The job is printed and stored temporarily on the disk. STORE: The job is not printed but stored on the disk. PROOF: One copy of the job is printed and the remaining copies are stored on the disk. 4) holdtype: The variable specifies the security level of the job. PUBLIC: The job does not require a PIN in order to release the job for printing. PRIVATE: The job requires a PIN in order to be released for printing. 5) quantity: number of copies to print. Valid values are 0..999. 6) pin: A 4 digit string, each digit is 0-9, that is specified in the job stream and then required to be entered in order to release the job for printing. y : an unsigned 32-bit number which uniquely identifies the job. The job id for the job remains the same for the job until it is deleted from the held jobs list. For example, the OID below is asking for the user name of the job whose id is 3. The application would issue: OID get 3.4.1.6.1.3 For example, the <getnext> OID values sequence is as follows: OID returned OID getnext 3.4.1.6 3.4.1.6.1.1 getnext 3.4.1.6.1.1 3.4.1.6.1.2 getnext 3.4.1.6.2.1 3.4.1.6.2.2 *** getnext 3.4.1.6.1.6 3.4.1.6.1.7 getnext 3.4.1.6.2.1 3.4.1.6.2.2 getnext 3.4.1.6.5.2828 3.4.1.6.6.1 NOTE: for example above, job id 2828 is the last job in the list of held jobs. *** Also supported is <getnext> on the job attributes: OID returned OID getnext 3.4.1.6.1 3.4.1.6.1.1 getnext 3.4.1.6.6 3.4.1.6.6.1')
held_job_job_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setLabel("held-job-job-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: held_job_job_name.setStatus('optional')
if mibBuilder.loadTexts: held_job_job_name.setDescription('The job name may be generated by the driver or obtained from user input.')
held_job_retention = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("eHoldOff", 1), ("eHoldOn", 2), ("eHoldStore", 3), ("eHoldProof", 4)))).setLabel("held-job-retention").setMaxAccess("readonly")
if mibBuilder.loadTexts: held_job_retention.setStatus('optional')
if mibBuilder.loadTexts: held_job_retention.setDescription('Indicates the retention classification of the job. The values indicate whether the job is printed immediately or stored. There are 4 options: eHoldOff: The job is printed but not retained on the disk. eHoldOn: The job is printed and stored temporarily on the disk. eHoldStore: The job is not printed but stored on the disk. eHoldProof: One copy of the job is printed and the remaining copies are stored on the disk. ')
held_job_security = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eHoldTypePublic", 1), ("eHoldTypePrivate", 2)))).setLabel("held-job-security").setMaxAccess("readonly")
if mibBuilder.loadTexts: held_job_security.setStatus('optional')
if mibBuilder.loadTexts: held_job_security.setDescription('The variable specifies the security level of the job. eHoldTypePublic: The job does not require a PIN in order to release the job for printing. eHoldTypePrivate: The job requires a PIN in order to be released for printing. ')
held_job_quantity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 999))).setLabel("held-job-quantity").setMaxAccess("readonly")
if mibBuilder.loadTexts: held_job_quantity.setStatus('optional')
if mibBuilder.loadTexts: held_job_quantity.setDescription('Number of copies to print.')
held_job_pin = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 4))).setLabel("held-job-pin").setMaxAccess("readonly")
if mibBuilder.loadTexts: held_job_pin.setStatus('optional')
if mibBuilder.loadTexts: held_job_pin.setDescription('A string that is specified in the job stream and then required to be entered in order to release the job for printing. PIN stands for Personal Identification Number. Additional information: Must be a 4 digit string, each digit must be 0..9 or a null string if there is no pin. For security purposes, you can no longer get the value of the PIN.')
held_job_print = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 2, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(12, 12)).setFixedLength(12)).setLabel("held-job-print").setMaxAccess("writeonly")
if mibBuilder.loadTexts: held_job_print.setStatus('optional')
if mibBuilder.loadTexts: held_job_print.setDescription('Instructs the printer to schedule the specified held job for printing with the specified number of copies. The job-id is used to identify which job to print. A held job can also be printed from the control panel. Additional information: Bytes 0-3 is the job id of the job to print. Bytes 4-7 is the number of copies to print. Bytes 8-11 (optional) contain the PIN for a Private job. ')
held_job_delete = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 2, 2), Integer32()).setLabel("held-job-delete").setMaxAccess("writeonly")
if mibBuilder.loadTexts: held_job_delete.setStatus('optional')
if mibBuilder.loadTexts: held_job_delete.setDescription('Instructs the printer to delete the specified held job from the list. The job-id is used to identify which job to delete. A held job can also be deleted from the control panel. Additional information: Setting this to a value that is not a Held Job on the system or is a Private Held Job returns <ErrUnsupValue>. To delete a private Held Job, you must use the PML object HELD-JOB-PRINT with a quantity of 0 and supply the correct HELD-JOB-PIN with the request. (See HELD-JOB-PRINT)')
held_job_set_queue_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setLabel("held-job-set-queue-size").setMaxAccess("readwrite")
if mibBuilder.loadTexts: held_job_set_queue_size.setStatus('optional')
if mibBuilder.loadTexts: held_job_set_queue_size.setDescription('Sets the maximum number of jobs which can be stored in the held job list. Additional information: Sets the size of the temporary job lists queue.')
held_job_enable = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 6, 7, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eDisabled", 1), ("eEnabled", 2)))).setLabel("held-job-enable").setMaxAccess("readwrite")
if mibBuilder.loadTexts: held_job_enable.setStatus('optional')
if mibBuilder.loadTexts: held_job_enable.setDescription('Enables or disables Job Retention (Job Hold). The valid values are eDisabled and eEnabled. (Specifying an invalid mode causes an <badValue> error to be returned.) When eDisabled is specified all Job Retention (Hold, Proof, Store, PIN Printing) is disabled. When eEnabled is specified, the Job Retention characteristics of a given job are defined by the PJL variable SET HOLD. Additional information: When disabled, held jobs are not removed, but must be explicitly removed through the Control Panel or the PML object HELD-JOB-DELETE.')
mopy_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 4, 5))).clone(namedValues=NamedValues(("eOff", 1), ("eStandard", 4), ("eEnhanced", 5)))).setLabel("mopy-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: mopy_mode.setStatus('optional')
if mibBuilder.loadTexts: mopy_mode.setDescription('Controls or reports how mopies are generated. eOff turns off the mopy feature. eAuto allows the device to determine the best method for generating mopies based on the device configuration. eStandard spools the I/O data and replays the I/O data multiple times to generate the mopies. eEnhanced spools the rasterized page images and replays the rasterized data to generate the mopies. Typically, the rasterized data will be larger and will allow all but the first mopy to print at speed. If the job being mopied is not extremely complex, then the first mopy will print at speed also.')
default_vertical_black_resolution = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 8), Integer32()).setLabel("default-vertical-black-resolution").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_vertical_black_resolution.setStatus('optional')
if mibBuilder.loadTexts: default_vertical_black_resolution.setDescription('Returns or changes the value of the default vertical black resolution. The units are dots per inch. Additional information: In LaserJet 8100 changing this OID also causes DEFAULT-HORIZONTAL-BLACK RESOLUTION to change. DEFAULT-HORIZONTAL-BLACK-RESOLUTION and DEFAULT-VERTICAL-BLACK-RESOLUTION must always be the same. The supported values are: LaserJet 4100: 300, 600, 1200 Color Products: 600 LaserJet 9000: 300, 600 Setting to an unsupported value causes the printer to substitute in a snap value and to return <noError> status. The snap values are as follow: LaserJet 4100: 450 >= n < 900 snaps to 600 n >= 900 snaps to 1200 Color Products: n snaps to 600 LaserJet 9000: n < 450 snaps to 300 n >=450 snaps to 600 ')
default_horizontal_black_resolution = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 9), Integer32()).setLabel("default-horizontal-black-resolution").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_horizontal_black_resolution.setStatus('optional')
if mibBuilder.loadTexts: default_horizontal_black_resolution.setDescription('Returns or changes the value of the default horizontal black resolution. The units are dots per inch. Additional information: In LaserJet 8100 changing this object also causes DEFAULT-VERTICAL-BLACK RESOLUTION to change. DEFAULT-HORIZONTAL-BLACK-RESOLUTION and DEFAULT-VERTICAL-BLACK-RESOLUTION must always be the same. The supported values are: LaserJet 4100: 300, 600, 1200 Color Products: 600 LaserJet 9000: 300, 600 Setting to an unsupported value causes the printer to substitute in a snap value and to return <noError> status. The snap values are as follow: LaserJet 4100: n < 450 snaps to 300 450 >= n < 900 snaps to 600 n >= 900 snaps to 1200 Color Products: n snaps to 600 LaserJet 9000: n < 450 snaps to 300 n >=450 snaps to 600 ')
default_page_protect = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("eOn", 2)))).setLabel("default-page-protect").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_page_protect.setStatus('optional')
if mibBuilder.loadTexts: default_page_protect.setDescription('Returns or changes the default page protection behavior. If eOff, the device does not reserve memory for holding the entire raster form of a processed page. If eOn, then memory is reserved. If eAuto, the device determines the amount of memory to reserve. Additional information: Setting to eOn causes MET to be more conservative, but it will not allocate memory to hold an entire rasterized page.')
default_bits_per_pixel = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 39), Integer32()).setLabel("default-bits-per-pixel").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_bits_per_pixel.setStatus('optional')
if mibBuilder.loadTexts: default_bits_per_pixel.setDescription('Controls the number of levels used (per pixel) when printing grayscale or color images.')
date_display = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6))).clone(namedValues=NamedValues(("eDateDisplayMMM-DD-YYYY", 4), ("eDateDisplayDD-MMM-YYYY", 5), ("eDateDisplayYYYY-MMM-DD", 6)))).setLabel("date-display").setMaxAccess("readwrite")
if mibBuilder.loadTexts: date_display.setStatus('optional')
if mibBuilder.loadTexts: date_display.setDescription('Controls front-panel date display format. Additional information: Controls front-panel date display format.')
date_and_time = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 17), OctetString()).setLabel("date-and-time").setMaxAccess("readwrite")
if mibBuilder.loadTexts: date_and_time.setStatus('optional')
if mibBuilder.loadTexts: date_and_time.setDescription('A C structure containing the following fields: typedef struct { ubyte yr; /* year: 0 to 99 */ ubyte mon; /* month: 1 to 12 */ ubyte day; /* day: 1 to 31 */ ubyte wday; /* Day of week: 1 to 07 */ ubyte hr; /* hour: 0 to 23 */ ubyte min; /* minute: 0 to 59 */ ubyte sec; /* second: 0 to 59 */ } date_t; where ubyte is an unsigned byte (0-255). Additional information: A C structure containing the following fields: typedef struct { ubyte yr; /* year: 3 to 99 */ ubyte mon; /* month: 1 to 12 */ ubyte day; /* day: 1 to 31 */ ubyte wday; /* Day of week: 1 to 07 */ ubyte hr; /* hour: 0 to 23 */ ubyte min; /* minute: 0 to 59 */ ubyte sec; /* second: 0 to 59 */ } date_t; where ubyte is an unsigned byte (0-255).')
time_display = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eTimeDisplayTwelveHour", 1), ("eTimeDisplayTwentyFourHour", 2)))).setLabel("time-display").setMaxAccess("readwrite")
if mibBuilder.loadTexts: time_display.setStatus('optional')
if mibBuilder.loadTexts: time_display.setDescription('Controls front-panel time display format. Set to eTimeDisplayTwelveHour for AM/PM display. Set to eTimeDisplayTwentyFourHour for military-type display. Additional information: Controls front-panel time display format. Set to eTimeDisplayTwelveHour for AM/PM display. Set to eTimeDisplayTwentyFourHour for military-type display.')
mio1_model_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 1, 2), DisplayString()).setLabel("mio1-model-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio1_model_name.setStatus('optional')
if mibBuilder.loadTexts: mio1_model_name.setDescription('Returns product information identifying the I/O card. Example: XXXX.')
mio1_manufacturing_info = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 1, 3), DisplayString()).setLabel("mio1-manufacturing-info").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio1_manufacturing_info.setStatus('optional')
if mibBuilder.loadTexts: mio1_manufacturing_info.setDescription('Returns information describing the manufacture of the I/O card installed in MIO/EIO slot 1. May include serial number and firmware revision. Additional information: The format of the string returned is determined by the manufacturer of the EIO device. There is no standard for content of the string.')
mio1_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 8, 12))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eDiskDrive", 8), ("eIOCard", 12)))).setLabel("mio1-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio1_type.setStatus('optional')
if mibBuilder.loadTexts: mio1_type.setDescription('Returns an indication of the type of option installed in MIO/EIO slot 1. See SIMM1-TYPE for an explanation of the enumerations.')
mio2_model_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 2, 2), DisplayString()).setLabel("mio2-model-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio2_model_name.setStatus('optional')
if mibBuilder.loadTexts: mio2_model_name.setDescription('Returns product information identifying the I/O card. Example: XXXX.')
mio2_manufacturing_info = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 2, 3), DisplayString()).setLabel("mio2-manufacturing-info").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio2_manufacturing_info.setStatus('optional')
if mibBuilder.loadTexts: mio2_manufacturing_info.setDescription('Returns information describing the manufacture of the I/O card installed in MIO/EIO slot 2. May include serial number and firmware revision. Additional information: The format of the string returned is determined by the manufacturer of the EIO device. There is no standard for content of the string.')
mio2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 8, 12))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eDiskDrive", 8), ("eIOCard", 12)))).setLabel("mio2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio2_type.setStatus('optional')
if mibBuilder.loadTexts: mio2_type.setDescription('Returns an indication of the type of option installed in MIO/EIO slot 2. See SIMM1-TYPE for an explanation of the enumerations.')
mio3_model_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 3, 2), DisplayString()).setLabel("mio3-model-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio3_model_name.setStatus('optional')
if mibBuilder.loadTexts: mio3_model_name.setDescription('Returns product information identifying the I/O card. Example: XXXX.')
mio3_manufacturing_info = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 3, 3), DisplayString()).setLabel("mio3-manufacturing-info").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio3_manufacturing_info.setStatus('optional')
if mibBuilder.loadTexts: mio3_manufacturing_info.setDescription('Returns information describing the manufacture of the I/O card installed in MIO/EIO interface 3. May include serial number and firmware revision. Additional information: The format of the string returned is determined by the manufacturer of the EIO device. There is no standard for content of the string.')
mio3_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 3, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 8, 12))).clone(namedValues=NamedValues(("eEmpty", 1), ("eUnknown", 2), ("eDiskDrive", 8), ("eIOCard", 12)))).setLabel("mio3-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: mio3_type.setStatus('optional')
if mibBuilder.loadTexts: mio3_type.setDescription('Returns an indication of the type of option installed in MIO/EIO interface 3. See SIMM1-TYPE for an explanation of the enumerations.')
io_timeout = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 300))).setLabel("io-timeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: io_timeout.setStatus('optional')
if mibBuilder.loadTexts: io_timeout.setDescription('The amount of time, in seconds, to wait for more print job data to be received before an I/O timeout occurs. The I/O channel being timed is the I/O channel that received the data associated with the current print job. If an I/O timeout occurs, the PDL processing sub-system assumes all the data associated with the current print job has been received, and processes the end of job in a PDL specific manner. The POS specifies the supported values. Additional information: If an I/O timeout occurs,the PDL processing sub-system will consider it an end of job condition only if there is data from another I/O subsystem waiting to be processed. The supported values are 5 to 300 seconds. Setting to a value outside the supported range returns <noError> status and the value will be snapped to the nearest supported value.')
io_switch = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("eYes", 1)))).setLabel("io-switch").setMaxAccess("readonly")
if mibBuilder.loadTexts: io_switch.setStatus('optional')
if mibBuilder.loadTexts: io_switch.setDescription('Indicates if the device will switch between I/O channels when a job boundary is encountered and print job data is available on another I/O channel.')
port1_parallel_speed = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eSlow", 1), ("eFast", 2)))).setLabel("port1-parallel-speed").setMaxAccess("readwrite")
if mibBuilder.loadTexts: port1_parallel_speed.setStatus('optional')
if mibBuilder.loadTexts: port1_parallel_speed.setDescription('Returns or changes the maximum parallel I/O port speed, for port 1. This object is only supported if this port is a parallel port. An eSlow setting causes a 10 us busy pulse per received byte of data. An eFast setting causes a 1.5 us busy pulse per received byte of data. In rare cases, setting this value to eFast can cause the parallel port to no longer transfer data reliably. Additional information: When the value of this object is changed, it takes effect immediately. It is recommended that the printer be offline and not in a job when this object is changed.')
port1_parallel_bidirectionality = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 2, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eUnidirectional", 1), ("eBidirectional", 2)))).setLabel("port1-parallel-bidirectionality").setMaxAccess("readwrite")
if mibBuilder.loadTexts: port1_parallel_bidirectionality.setStatus('optional')
if mibBuilder.loadTexts: port1_parallel_bidirectionality.setDescription('Returns or changes whether the parallel I/O port supports bidirectional communication for port 1. This object is only supported if this port is a parallel port. Additional information: A get on this object returns the current mode for the parallel port. Setting this object specifies whether or not bidirectional communications will be allowed.')
channelnumberofchannels = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 1), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channelnumberofchannels.setStatus('optional')
if mibBuilder.loadTexts: channelnumberofchannels.setDescription('An indication of how many print data channels the I/O card supports. Additional information: This object is used by the IIO card to tell the peripheral firmware how many logical channels will be used by the card. This object can only be set once per physical channel. If an attempt is made to set it a subsequent time it is ignored and an return code of <ErrorActionCanNotBePerformedNow.is returned. If new firmware is dynamically downloaded to the Blazers Plus card, and if that firmware uses more logical channels, it is necessary to reset the printer.')
channelTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3), )
if mibBuilder.loadTexts: channelTable.setStatus('optional')
channelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1), ).setIndexNames((0, "LJ4250-MIB", "channelindex"))
if mibBuilder.loadTexts: channelEntry.setStatus('optional')
channeltype = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 7, 8, 9, 10, 11, 15, 38))).clone(namedValues=NamedValues(("eChOther", 1), ("eChAppleTalkPAP", 7), ("eChLPDServer", 8), ("eChNetwareRPrinter", 9), ("eChNetwarePServer", 10), ("eChPort9100", 11), ("eChDLCLLCPort", 15), ("eChBidirPortTCP", 38)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channeltype.setStatus('optional')
if mibBuilder.loadTexts: channeltype.setDescription('Identifies the type of MIO print data channel. Additional information: After telling the peripheral how many logical channels will be used, the IIO card tells the printer the type of each of the channels. The type information is passed using the channelType object. Values are then saved by the peripheral and reported when users request the value of prtChannelType.')
channelprotocolversion = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1, 3), OctetString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channelprotocolversion.setStatus('optional')
if mibBuilder.loadTexts: channelprotocolversion.setDescription('The version of the protocol used on this MIO print data channel. Additional information: After telling the peripheral how many logical channels will be used, the IIO card tells the printer the protocol version of each of the channels. The protocol version information is passed using the channelProtocolVersion object. Values are then saved by the peripheral and reported when users request the value of prtChannelProtocolVersion.')
channelstate = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("eChOther", 1), ("eChPrintDataAccecped", 3), ("eChNoDataAccepted", 4)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channelstate.setStatus('optional')
if mibBuilder.loadTexts: channelstate.setDescription('The state of this print data channel. The value determines whether control information and print data is allowed through this channel or not. Additional information: After telling the peripheral how many logical channels will be used, the IIO card tells the printer the state of each of the channels. The state can change from time to time. The state information is passed using the channelState object. Values are saved by the peripheral and reported when users request the value of prtChannelState.')
channelifindex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1, 5), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channelifindex.setStatus('optional')
if mibBuilder.loadTexts: channelifindex.setDescription('The value of ifIndex which corresponds to this channel. Additional information: After telling the peripheral how many logical channels will be used, the IIO card tells the printer the value of the IfIndex for each channel. The IfIndex is used as part of MIB-II. The values are saved by the peripheral and reported when users request the value of prtChannelIfIndex.')
channelstatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1, 6), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channelstatus.setStatus('optional')
if mibBuilder.loadTexts: channelstatus.setDescription("Sub-unit status is reported in the entries of the principle table in the Group that represents the sub-unit. For sub-units that report a status, there is a status column in the table and the value of this column is always an integer formed in the following way. The SubUnitStatus is an integer that is the sum of 5 distinct values, Availability, Non-Critical, Critical, On-line, and Transitioning. These values are: Availability value Available and Idle 0 000'b Available and Standby 2 010'b Available and Active 4 100'b Available and Busy 6 110'b Unavailable and OnRequest 1 001'b Unavailable because Broken 3 011'b Unknown 5 101'b Non-Critical No Non-Critical Alerts 0 Non-Critical Alerts 8 Critical No Critical Alerts 0 Critical Alerts 16 On-Line Intended state is On-Line 0 Intended state is Off-Line 32 Transitioning At intended state 0 Transitioning to intended state 64. Additional information: After telling the peripheral how many logical channels will be used, the IIO card tells the printer the status of each of the channels. The status can change from time to time. The status information is passed using the channelStatus object. Values are saved by the peripheral and reported when users request the value of prtChannelStatus.")
channelinformation = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 6, 3, 1, 7), OctetString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: channelinformation.setStatus('optional')
if mibBuilder.loadTexts: channelinformation.setDescription("Auxiliary information to allow a printing application to use the channel for data submission to the printer. An application capable of using a specific PrtChannelType should be able to use the combined information from the prtChannelInformation and other channel and interface group objects to 'bootstrap' its use of the channel. prtChannelInformation is not intended to provide a general channel description, nor to provide information that is available once the channel is in use. The encoding and interpretation of the prtChannelInformation object is specific to channel type. The description of each prtChannelType enum value for which prtChannelInformation is defined specifies the appropriate encoding and interpretation, including interaction with other objects. For channel types that do not specify a prtChannelInformation value, its value shall be null (0 length). When a new prtChannelType enumeration value is registered, its accompanying description must specify the encoding and interpretation of the prtChannelInformation value for the channel type. prtChannelInformation semantics for an existing PrtChannelType may be added or amended in the same manner as described in section 2.4.1 for type 2 enumeration values. The prtChannelInformation specifies values for a collection of channel attributes, represented as text according to the following rules: 1. The prtChannelInformation is coded in the NVT ASCII character set. It is not affected by localization. 2. The prtChannelInformation is a list of entries representing the attribute values. Each entry consists of the following items, in order: a. a keyword, composed of alphabetic characters (A-Z, a-z), that identifies a channel attribute, b. an Equals Sign (=) to delimit the keyword, c. a data value, consisting of NVT ASCII graphics characters (codes 32-126), d. a Line Feed character (code 10) to delimit the data value. No other characters shall be present. Keywords are case-sensitive. Conventionally, keywords are capitalized (including each word of a multi-word keyword), and, since they occupy space in the prtChannelInformation, they are kept short. 3. If a channel attribute has multiple values, it is represented by multiple entries with the same keyword, each specifying one value. Otherwise, there shall be at most one entry for each attribute. 4. By default, entries may appear in any order. If there are ordering constraints for particular entries, these must be specified in their definitions. 5. The prtChannelInformation value may represent information that is not normally coded in textual form, or that is coded in a character set other than NVT ASCII. In these cases, whatever symbolic representation is conventionally used for the information should be used for encoding the prtChannelInformation. (For instance, a binary port value might be represented as a decimal number, Unicode would be represented in UTF-8 format.) 6. For each PrtChannelType for which prtChannelInformation entries are defined, the descriptive text associated with the PrtChannelType enumeration value shall specify the following information for each entry: Title: Brief description phrase, e.g.: 'Port name,'Service Name,' Keyword: The keyword value, eg: 'Port,' 'Service' Syntax: The encoding of the entry value, if it cannot be directly represented by NVT ASCII, Status: 'Mandatory,' 'Optional,' or 'Conditionally Mandatory,' Multiplicity: 'Single' or 'Multiple,' to indicate whether the entry may be present multiple times, Description: Description of the use of the entry, other information required to complete the definition (e.g.: ordering contstraints, interactions between entries). Applications that interpret prtChannelInformation should ignore unrecognized entries, so they are not affected if new entry types are added. Additional information: After telling the peripheral how many logical channels will be used, the IIO card tells the printer the value of the information value of each of the channels. The information is auxiliary information used in conjunction with the channel Type.")
deviceChannelTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2), )
if mibBuilder.loadTexts: deviceChannelTable.setStatus('optional')
deviceChannelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2, 1), ).setIndexNames((0, "LJ4250-MIB", "channel-index"))
if mibBuilder.loadTexts: deviceChannelEntry.setStatus('optional')
channel_bytes_sent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2, 1, 2), Integer32()).setLabel("channel-bytes-sent").setMaxAccess("readonly")
if mibBuilder.loadTexts: channel_bytes_sent.setStatus('optional')
if mibBuilder.loadTexts: channel_bytes_sent.setDescription('The low order 31 bits of the number of bytes sent from the printer to the host on this logical channel.')
channel_bytes_received = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2, 1, 3), Integer32()).setLabel("channel-bytes-received").setMaxAccess("readonly")
if mibBuilder.loadTexts: channel_bytes_received.setStatus('optional')
if mibBuilder.loadTexts: channel_bytes_received.setDescription('The low order 31 bits of the number of bytes received by the printer from the host on this logical channel.')
channel_io_errors = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2, 1, 4), Integer32()).setLabel("channel-io-errors").setMaxAccess("readonly")
if mibBuilder.loadTexts: channel_io_errors.setStatus('optional')
if mibBuilder.loadTexts: channel_io_errors.setDescription('The low order 31 bits of the number of I/O errors which have occured on this logical channel.')
channel_jobs_received = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2, 1, 5), Integer32()).setLabel("channel-jobs-received").setMaxAccess("readonly")
if mibBuilder.loadTexts: channel_jobs_received.setStatus('optional')
if mibBuilder.loadTexts: channel_jobs_received.setDescription('The low order 31 bits of the number of print jobs which have been received on this logical channel.')
channel_mio = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 7, 2, 1, 6), Integer32()).setLabel("channel-mio").setMaxAccess("readonly")
if mibBuilder.loadTexts: channel_mio.setStatus('optional')
if mibBuilder.loadTexts: channel_mio.setDescription("The number of the MIO card associated with this logical channel. If this logical channel is not associated with an MIO card a zero is returned. The value returned for each logical channel is the MIO card's physical slot number. The list of supported values should be documented in the device POS. Additional information: The value returned will be zero for channels 1..3. If an MIOA card is installed, channels 4..10 will return a value of one or two. If an MIOB card is installed, channels 11..17 will return a value of one or two but not the same as channels 4..10. When two IIO cards are installed, the value returned by any channel 4..17 may change after the printer is power cycled.")
rpc_bind_protocol_address = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 13, 1, 2), OctetString()).setLabel("rpc-bind-protocol-address").setMaxAccess("readonly")
if mibBuilder.loadTexts: rpc_bind_protocol_address.setStatus('optional')
if mibBuilder.loadTexts: rpc_bind_protocol_address.setDescription('Array of Berkeley sockets style protocol addresses used to bind RPC to a communications protocol family. Setting an instance of this array object to a zero length binary value disables the transport protocol indicated by that instance. All multi-bytes fields are in network (or big-endian) order. Bytes 1 and 2 indicate the transport protocol. Some of the transport protocol mapping information can be found in RFC 1010 Assigned Numbers. A list of interesting transport protocol number mappings include: <table> Protocol | Number ---------+------- UDP/IP | 17 IPX | 1000 MLC | 4660 </table> Bytes 3 and 4 indicate the address family. The address family uses the same mapping as the BSD sockets address family. A list of interesting address family mappings include: <table> Address Family | Number ---------------+------- Internet | 2 NetWare | 6 MLC | 22136 </table> The format the fifth and following bytes is dependent on the address family. For the Internet address family, bytes 5 and 6 contain the port number, bytes 7 through 10 contain the IP address, and the following eight bytes are unused. For NetWare, bytes 5 through 8 are the network number, bytes 9 through 14 are the node number, and bytes 15 and 16 are the socket number. Additional information: The length of the binary value is zero if the instance of an object in the rpc-bind-protocol-address array is not in use. Returns <noSuchName> status if attempting to access this object and there is no storage device is installed.')
rpc_bound_protocol_address = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 13, 2, 3), OctetString()).setLabel("rpc-bound-protocol-address").setMaxAccess("readonly")
if mibBuilder.loadTexts: rpc_bound_protocol_address.setStatus('optional')
if mibBuilder.loadTexts: rpc_bound_protocol_address.setDescription('An array of Berkeley sockets style protocol addresses that the NFS service has been bound to successful. The format is that same as the array of RPC-BIND-PROTOCOL-ADDRESS of objects. Additional information: Returns <noSuchName> status if attempting to access this object and there is no storage device is installed.')
file_system_max_open_files = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 2), Integer32()).setLabel("file-system-max-open-files").setMaxAccess("readonly")
if mibBuilder.loadTexts: file_system_max_open_files.setStatus('optional')
if mibBuilder.loadTexts: file_system_max_open_files.setDescription('The number of open files allowed at one time. Opening a file when the maximum number of files are currently open will fail. Additional information: Indicates the number of open files a personality (e.g. PCL or PostScript) is guaranteed to be able to open before the file system runs out of file handles. This object is between 1 and 500. Returns <noSuchName> status if attempting to access this object and there is no storage device is installed.')
file_system_set_system_partition_writeable = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 6), OctetString()).setLabel("file-system-set-system-partition-writeable").setMaxAccess("writeonly")
if mibBuilder.loadTexts: file_system_set_system_partition_writeable.setStatus('optional')
if mibBuilder.loadTexts: file_system_set_system_partition_writeable.setDescription('This object allows the system partition to be written to. It consists of a C structure containing the following fields: typedef struct { ubyte password[8]; ubyte volumenumber; } fs_writeable_system_partition_t; which is described below: Bytes 0 - 7: contain the password Byte 8 : is the volume number Access to this command is controlled by the password. If the password supplied is incorrect the command will fail. The volumenumber is a volume number of an existing system partition. Additional information: Returns <noSuchName> status if attempting to access this object and there is no storage device is installed. Returns <badValue> if the password is incorrect or if the volume requested is not present.')
file_system_set_system_partition_readonly = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 7), Integer32()).setLabel("file-system-set-system-partition-readonly").setMaxAccess("writeonly")
if mibBuilder.loadTexts: file_system_set_system_partition_readonly.setStatus('optional')
if mibBuilder.loadTexts: file_system_set_system_partition_readonly.setDescription('Changes a system partition to be READ-ONLY. The value is the volume number to change. If the volume number specified is NOT a system partition an error is returned. Additional information: Returns <noSuchName> status if attempting to access this object and there is no storage device is installed. Returns <badValue> if the volume requested is not present.')
file_system_delete_files = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 8), OctetString()).setLabel("file-system-delete-files").setMaxAccess("writeonly")
if mibBuilder.loadTexts: file_system_delete_files.setStatus('optional')
if mibBuilder.loadTexts: file_system_delete_files.setDescription('Setting this object causes the specified filename to be deleted, after first validating that the authentication data is correct for the specified user ID. The format for this object is a C structure: typedef struct { sint32 UserId; uint16 AuthenticationDataLen ubyte AuthenticationData[] char Filename[]; } fs_delete_files_t; which is described below: Bytes 0 - 3: contains a user id represented as a multi-byte value that is stored in big-endian format, where the most significant byte occurs first. Bytes 4 - 5 : Length of the Athentication data that follows starting at offset 6. Stored as a multi-byte value that is stored in big-endian format, where the most significant byte occurs first. Bytes 6 - 6+AuthenticationDataLen : a ubyte array containing the Authentication data used to verify access for this operation. Bytes starting at offset (6+AuthenticationDataLen+1): A null terminated character array representing the ASCII file name to be deleted. The length of the string will be limited by the remaining space in the object. This string represents a fully-qualified path name which may specify a filename or a regular expression that may match multiple files (e.g <path>/*.exe). Access to this command is controlled by the UserId and the authentication data. If the UserID or authentication data supplied is incorrect the command will fail. The device POS will specify any limitations to the length of the filename string, what constitutes a correct user ID, what constitutes correct authentication data, and the significance of any return values. Additional information: Setting this object causes the specified filename to be deleted, after first validating that the authentication data is correct for the specified user ID. This object is always present. The format for this object is a C structure: typedef struct { sint32 UserId; uint16 AuthenticationDataLen ubyte AuthenticationData[] char Filename[]; } fs_delete_files_t; Bytes 0 - 3: contains a user id represented as a multi-byte value that is stored in big-endian format, where the most significant byte occurs first. Bytes 4 - 5 : Length of the authentication data that follows starting at offset 6. Stored as a multi-byte value that is stored in big-endian format, where the most significant byte occurs first. Bytes 6 - 6+AuthenticationDataLen : a ubyte array containing the authentication data used to verify access for this operation. Bytes starting at offset (6+AuthenticationDataLen+1): A null terminated character array representing the ASCII file name to be deleted. The length of the string will be limited by the remaining space in the object. This string represents a fully-qualified path name which may specify a filename or a file-type regular expression that may match multiple files (e.g., <path>/*.exe). Access to this command is controlled by the UserId and the authentication data. If the UserID or authentication data supplied is incorrect the command will fail. The device POS will specify any limitations to the length of the filename string, what constitutes a correct user ID, what constitutes correct authentication data, and the significance of any return values.')
file_system_security_access_password = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 9), DisplayString()).setLabel("file-system-security-access-password").setMaxAccess("readwrite")
if mibBuilder.loadTexts: file_system_security_access_password.setStatus('optional')
if mibBuilder.loadTexts: file_system_security_access_password.setDescription("This object acts as a gate keeper for write access to the PML objects FILE-SYSTEM-EXTERNAL-ACCESS-CAPABILITIES, FILE-SYSTEM-ERASE-MODE, and FILE-SYSTEM-WIPE-DISK. The object can be viewed as always being in one of 4 states: State 0 --> Un-initialized: FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is the empty string and a pmlget operation on the object will return the string '0'. This will be the state after an NVRAM init which is the only way to set the object to the empty string. It cannot be set to the empty string through PML. In this state write access to FILE-SYSTEM-EXTERNAL-ACCESS-CAPABILITIES, FILE-SYSTEM-ERASE-MODE and FILE-SYSTEM-WIPE-DISK is disabled while write access to the object itself is enabled. A successful set operation on the object in this state will return the string 'SET' and will result in a transition to State 3. State 1 --> The Password is set to a non-empty string: A pmlget operation on the object will return the string '1' regardless of the actual stored password value. In this state write access to FILE-SYSTEM-EXTERNAL-ACCESS-CAPABILITIES, FILE-SYSTEM-ERASE-MODE and FILE-SYSTEM-WIPE-DISK is disabled. The only string the object itself can be successfully set to while in this state is the current value of the password. Setting the object to its current value causes a transition to State 2 which is an authenticated state. The value returned from such a set operation will be the string 'OK'. State 2 --> The password is in an authenticated state: The 'gate' to write to the PML objects FILE-SYSTEM-EXTERNAL-ACCESS-CAPABILITIES, FILE-SYSTEM-ERASE-MODE, and FILE-SYSTEM-WIPE-DISK is open. One, and only one, write operation can be performed on one of these three objects. During such a write operation the object itself will transition to State 1 thus closing the gate. In State 2 a pmlset operation on the object will result in the password being set and a transfer to State 3. The value returned from a set operation in this state on FILE-SYSTEM-SECURITY-ACCESS-PASSWORD will be 'SET'. A pmlget operation on the object will return the string '2' regardless of the actual password value. State 3 --> The password was just changed: A pmlget operation on the object will return the string '3'. Otherwise behaviors in State 3 are exactly like in State 1. ")
file_system_external_access_capabilities = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 10), OctetString()).setLabel("file-system-external-access-capabilities").setMaxAccess("readwrite")
if mibBuilder.loadTexts: file_system_external_access_capabilities.setStatus('optional')
if mibBuilder.loadTexts: file_system_external_access_capabilities.setDescription("This object is used to control access to the file system of the device. It is always readable. It is writable when FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is not the empty string, and a successful write to that object with the current password has preceded the write to this object. In other words, the FILE-SYSTEM-SECURITY-ACCESS-PASSWORD must be in the 'authenticated state' for a write on FILE-SYSTEM-EXTERNAL-ACCESS-CAPABILITIES to succeed. After the object has successfully been written to, it becomes read-only, and the FILE-SYSTEM-SECURITY-ACCESS-PASSWORD object exits the authenticated state. FILE-SYSTEM-SECURITY-ACCESS-PASSWORD must then be written to again with the current password in order to make another write operation on FILE-SYSTEM-EXTERNAL-ACCESS-CAPABILITIES possible. cDisablePJLFileSystemAccess - All file system access through PJL will be disabled. cDisablePMLFileSystemWrite - All file-systems-related PML objects such as FILE-SYSTEM-DELETE-FILES or hrPartitionLabel become read-only. For objects that are normally write-only this means that no successful operations can now be performed on them. cDisableNFSFileSystemAccess - All NFS file system access will be disabled. cDisablePSFileSystemAccess - All file system access through PostScript will be disabled. cDisableEWSFileSystemAccess - All access to the file system by the Embedded Web Server will be disabled. ")
file_system_erase_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 11), OctetString()).setLabel("file-system-erase-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: file_system_erase_mode.setStatus('optional')
if mibBuilder.loadTexts: file_system_erase_mode.setDescription('This object is used to control how file data is handled when files are deleted. It is always readable. It is writable when FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is the empty string or when a write to FILE-SYSTEM-SECURITY-ACCESS-PASSWORD with the current password has preceeded the write to the object. When FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is not the empty string then this object becomes read-only after a successful write to it. FILE-SYSTEM-SECURITY-ACCESS-PASSWORD must then be written to again with the current password in order to make another write operation on the object possible. If the FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is incorrectly set, this object becomes read-only until the password is correctly set. The logical settings for this object are: NonSecureFastErase - When a file is deleted, only the reference to it in the file system tables is removed. No file data is overwritten. This is the default setting. This is the setting when both bits in the collection are set to 0. SecureFastErase - File information is overwritten with identical, random character pattern when it is deleted. This is the setting when cEraseMode0 is set to 1 and cEraseMode1 is set to 0. SecureSanitizeErase - Secure, repetitive algorithm used to overwrite all deleted file information and remove any residual data persistence. A random character is written to all bytes of deleted sectors. Then the complement of that character and finally, another random character is written. This is the setting when cEraseMode0 is set to 0 and cEraseMode1 is set to 1. Note that an error will be returned for an attempt to set both bits of the collection to 1.')
file_system_wipe_disk = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 12), Integer32()).setLabel("file-system-wipe-disk").setMaxAccess("readwrite")
if mibBuilder.loadTexts: file_system_wipe_disk.setStatus('optional')
if mibBuilder.loadTexts: file_system_wipe_disk.setDescription("This object is a non-leaf node which will have one leaf node under it for each file system in the printer. The leaf nodes will be in a 1-to-1 correspondence with the hrDeviceIndex objects. This object specifies for each leaf node whether the file system on the device is going to be erased according to the setting of the FILE-SYSTEM-ERASE-MODE referenced by the corresponding hrDeviceIndex. A file system will be overwritten on system reboot if this object is set to 1. It will be unchanged if this object is set to 0. If this object's value is -1 then the file system cannot be wiped. If the file system contains the permstore, the permstore will be saved off and restored after the file-system wipe is completed. This object is always readable. It is writable when FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is the empty string or when a write to FILE-SYSTEM-SECURITY-ACCESS-PASSWORD with the current password has preceeded the write to the object. When FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is not the empty string then this object becomes read-only after a successful write to it. FILE-SYSTEM-SECURITY-ACCESS-PASSWORD must then be written to again with the current password in order to make another write operation on the object possible. If the FILE-SYSTEM-SECURITY-ACCESS-PASSWORD is incorrectly set, this object becomes read-only until the password is correctly set.")
file_system_wipe_disk_status = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 1, 13), Integer32()).setLabel("file-system-wipe-disk-status").setMaxAccess("readonly")
if mibBuilder.loadTexts: file_system_wipe_disk_status.setStatus('optional')
if mibBuilder.loadTexts: file_system_wipe_disk_status.setDescription('This object is a non-leaf node which will have one leaf node under it for each file system in the printer. The leaf nodes will be in a 1-to-1 correspondence with the hrDeviceIndex objects. This object specifies for each leaf node the status of a disk wipe in progress. A return value of -1 indicates that no disk wipe is in progress. A return value in the range of 0 to 100 indicates the percent done for a disk wipe in progress. All other values are illegal.')
file_system2_initialize_volume = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("eInitializing", 2)))).setLabel("file-system2-initialize-volume").setMaxAccess("writeonly")
if mibBuilder.loadTexts: file_system2_initialize_volume.setStatus('optional')
if mibBuilder.loadTexts: file_system2_initialize_volume.setDescription('Setting this object to eInitializing causes file system 2 to be initialized. Reading this object indicates if the file system is currently being initialized. Additional information: The hrDeviceIndex value for the mass storage device is the same value that is used to index into the FILE-SYSTEM sub-tree. Since this product supports up to 3 physical mass storage device, and since the hrDeviceIndex for the mass storage devices will start at 2 if the mass storage device is installed, the FILE-SYSTEM2-INITIALIZE-VOLUME object will be the object that allows the mass storage device to be initialized that is the 1st device.')
file_system3_initialize_volume = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3, 3, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("eInitializing", 2)))).setLabel("file-system3-initialize-volume").setMaxAccess("writeonly")
if mibBuilder.loadTexts: file_system3_initialize_volume.setStatus('optional')
if mibBuilder.loadTexts: file_system3_initialize_volume.setDescription('Setting this object to eInitializing causes file system 3 to be initialized. Reading this object indicates if the file system is currently being initialized. Additional information: The hrDeviceIndex value for the mass storage device is the same value that is used to index into the FILE-SYSTEM sub-tree. Since this product supports up to 3 physical mass storage device, and since the hrDeviceIndex for the mass storage devices will start at 2 if the mass storage device is installed, the FILE-SYSTEM3-INITIALIZE-VOLUME object will be the object that allows the mass storage device to be initialized that is the 2nd device.')
file_system4_initialize_volume = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 10, 3, 4, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("eInitializing", 2)))).setLabel("file-system4-initialize-volume").setMaxAccess("writeonly")
if mibBuilder.loadTexts: file_system4_initialize_volume.setStatus('optional')
if mibBuilder.loadTexts: file_system4_initialize_volume.setDescription('Setting this object to eInitializing causes file system 4 to be initialized. Reading this object indicates if the file system is currently being initialized. Additional information: The hrDeviceIndex value for the mass storage device is the same value that is used to index into the FILE-SYSTEM sub-tree. Since this product supports up to 3 physical mass storage devices, and since the hrDeviceIndex for the mass storage devices will start at 2 if the mass storage device is installed, the FILE-SYSTEM4-INITIALIZE-VOLUME object will be the object that allows the mass storage device to be initialized that is the 3rd device.')
mass_storage_resource_change_counter = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 12, 3, 1), Integer32()).setLabel("mass-storage-resource-change-counter").setMaxAccess("readonly")
if mibBuilder.loadTexts: mass_storage_resource_change_counter.setStatus('optional')
if mibBuilder.loadTexts: mass_storage_resource_change_counter.setDescription('A counter which changes when a mass storage based resource has been added or deleted. Additional information: The value of this counter changes each time the MASS-STORAGE-RESOURCE-CHANGED object is set to eTrue. The value also changes when the mass storage device is initialized. However, the value does not change when a mass storage device is removed and a different mass storage device is installed. Initializing all volumes sets this object back to the factory default value. A reboot sets this object back to the factory devalut value. Returns <noSuchName> status if attempting to access this object and there is no storage device is installed.')
mass_storage_resource_changed = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 12, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("eTrue", 2)))).setLabel("mass-storage-resource-changed").setMaxAccess("writeonly")
if mibBuilder.loadTexts: mass_storage_resource_changed.setStatus('optional')
if mibBuilder.loadTexts: mass_storage_resource_changed.setDescription('Setting to eTrue causes MASS-STORAGE-RESOURCE-CHANGE-COUNTER to be incremented. Additional information: Returns <noSuchName> status if attempting to access this object and there is no storage device is installed.')
ram_disk_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3))).clone(namedValues=NamedValues(("eOff", 1), ("eAuto", 3)))).setLabel("ram-disk-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: ram_disk_mode.setStatus('optional')
if mibBuilder.loadTexts: ram_disk_mode.setDescription('Returns or controls RAM disk support. eOFF turns off the RAM disk functionality. eOn turns on the RAM disk functionality and creates a RAM disk whose size is controlled by the RAM-DISK-SIZE object. eAuto turns on the RAM disk functionality and creates a RAM disk size determined by the printer based on installed options and other memory related settings. Additional information: Returns or controls RAM disk support. eOFF turns off the RAM disk functionality. eAuto turns on the RAM disk functionality and creates a RAM disk size determined by the printer based on the amount of installed memory.')
ram_disk_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 15, 1, 2), Integer32()).setLabel("ram-disk-size").setMaxAccess("readonly")
if mibBuilder.loadTexts: ram_disk_size.setStatus('optional')
if mibBuilder.loadTexts: ram_disk_size.setDescription('Returns or controls the size of the RAM disk. The device POS specifies the minimum memory requirements. The object MAXIMUM-RAM-DISK-MEMORY specifies the maximum memory available for the RAM disk. Additional information: Returns the size of the RAM disk.')
maximum_ram_disk_memory = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 15, 2, 1), Integer32()).setLabel("maximum-ram-disk-memory").setMaxAccess("readonly")
if mibBuilder.loadTexts: maximum_ram_disk_memory.setStatus('optional')
if mibBuilder.loadTexts: maximum_ram_disk_memory.setDescription("This object's name is misleading. This object does not return the maximum configurable RAM disk size. Instead, it returns the maximum amount of memory, in bytes, that can used to increase the size of the RAM disk. Additional information: This object returns the maximum amount of additional memory that is available for increasing the size of the RAM disk.")
device_configure_printer_parameters = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 1, 32, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setLabel("device-configure-printer-parameters").setMaxAccess("readwrite")
if mibBuilder.loadTexts: device_configure_printer_parameters.setStatus('optional')
if mibBuilder.loadTexts: device_configure_printer_parameters.setDescription('Allow the printer to be configured as a duplex or simplex printer. There will be a password encoded in this string and decoded by the printer firmware. If the password check passes the printer will be configured accordingly. Additional information: Used to configure Laserjet 4650 engines for duplex enabled or not. Encoded configuration string is passed in, which is decoded by firmware. Firmware verifies config. string is valid, and retrieves device Configuration data. Expandable to accommodate future products configuration needs.')
job_input_auto_continue_timeout = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 35), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 3600))).setLabel("job-input-auto-continue-timeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: job_input_auto_continue_timeout.setStatus('optional')
if mibBuilder.loadTexts: job_input_auto_continue_timeout.setDescription('The number of seconds the device waits after a job related continuable error occurs before automatically continuing. An example job related continuable error is the job requesting a media size that is not available. After the timeout expires, the device will continue processing the job as if a continue event occurred, such as the front panel continue key being pressed. If the value is -1, the device does not automatically continue after a job related continuable error occurs. If the value is 0, the device immediately continues. If the value is greater than 0, the value represents the timeout value in seconds. Additional information: The number of seconds the device waits after a job related continuable error occurs before automatically continuing. An example job related continuable error is the job requesting a media size that is not available. After the timeout expires, the device will continue processing the job accourding to the action defined by JOB-INPUT-AUTO-CONTINUE-MODE. If the value is -1, the device does not automatically continue after a job related continuable error occurs. If the value is 0, the device immediately continues. If the value is greater than 0, the value represents the timeout value in seconds. The data for this object is stored in NVRAM.')
job_input_auto_continue_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 36), OctetString()).setLabel("job-input-auto-continue-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: job_input_auto_continue_mode.setStatus('optional')
if mibBuilder.loadTexts: job_input_auto_continue_mode.setDescription('Returns or sets the device behavior when the desired media is not currently available. cCancelJob - The device cancels the job. The device POS should explain what happens if this item is not the only item in the collection. cAutoMediaSizeOverride - The device is allowed to substitute a different size media. cAutoMediaNameOverride - The device is allowed to substitute a different media name. cUSMediaSizeOverride - The device is allowed to substitute US media sizes (letter, etc.) for ISO media sizes (A4, etc.). cISOMediaSizeOverride - The device is allowed to substitute ISO media sizes (A4, etc.) for US media sizes (letter, etc.). Additional information: Returns or sets the device behavior when the desired media is not currently available. cCancelJob - The device cancels the job regardless of other item settings. cAutoMediaSizeOverride - The device is allowed to substitute a different size media. cAutoMediaNameOverride - The device is allowed to substitute a different media name. -- cUSMediaSizeOverride - The device is allowed to substitute -- US media sizes (letter, etc.) for ISO media sizes -- (A4, etc.). -- cISOMediaSizeOverride - The device is allowed to substitute -- ISO media sizes (A4, etc.) for US media sizes -- (letter, etc.). At least one bit of the collection must be set; setting this object to zero will cause a status of <badValue> to be returned. This object describes the action that is performed when the JOB-INPUT-AUTO-CONTINUE-TIMEOUT expires. ')
job_output_auto_continue_timeout = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 40), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 3600))).setLabel("job-output-auto-continue-timeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: job_output_auto_continue_timeout.setStatus('optional')
if mibBuilder.loadTexts: job_output_auto_continue_timeout.setDescription('Returns or sets the time that the printer will wait after an output bin becomes full and the printer is trying to deliver a sheet of media to that output bin. When the timeout expires, the job is processed according to the OUTBINn-OVERRIDE-MODE. A value of -1 indicates that the printer will wait for a continue event. A non-negative value is the number of seconds to wait. Additional information: Returns or sets the time that the printer will wait after an output bin becomes full and the printer is trying to deliver a sheet of media to that output bin. When the timeout expires, the job is processed according to the OUTBINn-OVERRIDE-MODE. A value of -1 indicates that the printer will wait for a continue event. A non- negative value is the number of seconds to wait.')
model_number = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 1), DisplayString()).setLabel("model-number").setMaxAccess("readonly")
if mibBuilder.loadTexts: model_number.setStatus('optional')
if mibBuilder.loadTexts: model_number.setDescription('Identifies the device model number as listed in the HP corporate price list (e.g. C2121A for DeskJet 500C). The string is as specific as possible. Products should note in POS if the model number on the CPL changes but the device reports the previous model number. If the model number changes based on the installed options, the POS should indicate if only the base model number is returned, or if the device senses the installed options and returns the correct model number. If possible, encode the model number in a symbol set (like Roman-8) that matches the ASCII character set and limit the characters used to ASCII characters. Additional information: Identifies the device model number as listed in the HP corporate price list (e.g. C2121A for DeskJet 500C). The string is as specific as possible. The value of this object does not change based on the installed options. The default of this object is the same on all LaserJet 8150 printers.')
model_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setLabel("model-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: model_name.setStatus('optional')
if mibBuilder.loadTexts: model_name.setDescription("Identifies the device model name (e.g. ''DeskJet 1200C''). The string is as specific as possible. Capitalization and spacing should match family naming conventions. Products should note in POS if the model name on the HP corporate price list changes but the device reports the previous device name. If the model name changes based on the installed options, the POS should indicate if only the base model name is returned, or if the device senses the installed options and returns the correct model name. If possible, encode the model name in a symbol set (like Roman-8) that matches the ASCII character set and limit the characters used to ASCII characters. Additional information: Since the value of this object is frequently used in displaying a list of printers, it is kept relatively short in case systems have limited width for their display area. The model name does not change based on sensing of installed options.")
phd2_model = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 5, 2, 1), DisplayString()).setLabel("phd2-model").setMaxAccess("readonly")
if mibBuilder.loadTexts: phd2_model.setStatus('optional')
if mibBuilder.loadTexts: phd2_model.setDescription('Returns product information identifying PHD device 2. Example: XXXX. Additional information: Returns product information identifying the first paper handling device in the device chaing, PHD device 2. If PDH device 2 does not exist, ERROR-UNKNOWN-OBJECT-IDENTIFIER will be returned.')
phd2_manufacturing_info = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 5, 2, 2), DisplayString()).setLabel("phd2-manufacturing-info").setMaxAccess("readonly")
if mibBuilder.loadTexts: phd2_manufacturing_info.setStatus('optional')
if mibBuilder.loadTexts: phd2_manufacturing_info.setDescription('Returns information describing the manufacture of PHD 2. May include serial number and firmware revision. Additional information: Returns information describing the manufacture of PHD 2. If PHD 2 does not exist, ERROR-UNKNOWN-OBJECT-IDENTIFIER will be returned.')
phd2_type = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 5, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(11))).clone(namedValues=NamedValues(("eOutputPHD", 11)))).setLabel("phd2-type").setMaxAccess("readonly")
if mibBuilder.loadTexts: phd2_type.setStatus('optional')
if mibBuilder.loadTexts: phd2_type.setDescription('Returns an indication of the type of option installed in PHD interface 2. See SIMM1-TYPE for an explanation of the enumerations. Additional information: Returns an indication of the type of option installed in PHD interface 2. If PHD 2 does not exist, ERROR-UNKNOWN-OBJECT-IDENTIFIER will be returned.')
phd2_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 4, 5, 2, 4), Integer32()).setLabel("phd2-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: phd2_capacity.setStatus('optional')
if mibBuilder.loadTexts: phd2_capacity.setDescription('Returns an indication of the capacity of the installed option. See SIMM1-CAPACITY for an explanation of the meaning of the value of this object. Additional information: Returns an indication of the capacity of the installed option. For eInputPHD or eOutputPHD, or eBindingPHD the number of input trays or output bins is returned. If PHD 2 does not exist, ERROR-UNKNOWN-OBJECT-IDENTIFIER will be returned.')
engine_self_diagnostic = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 5, 7), OctetString()).setLabel("engine-self-diagnostic").setMaxAccess("readonly")
if mibBuilder.loadTexts: engine_self_diagnostic.setStatus('optional')
if mibBuilder.loadTexts: engine_self_diagnostic.setDescription('The ENGINE-SELF-DIAGNOSTIC object reveals current engine failures; it returns a binary string of two-byte motor, clutch, solenoid, and sensor failure codes. Additional information: The ENGINE-SELF-DIAGNOSTIC object reveals current Engine Failures; it returns a BINARY string of two-byte Motor, Clutch, Solenoid and Sensor failure codes.')
default_media_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 3, 1, 22), DisplayString()).setLabel("default-media-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_media_name.setStatus('optional')
if mibBuilder.loadTexts: default_media_name.setDescription('Returns or sets the media name that is used until the media name is changed by a print job command. Additional information: Returns or sets the media name that is used until the media name is changed by a print job command. This string must be one of the MEDIAn-NAME objects. This object is localized if the corresponding MEDIAn-NAME object is localized. The data for this object is stored in NVRAM.')
override_media_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 1, 2), DisplayString()).setLabel("override-media-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: override_media_name.setStatus('optional')
if mibBuilder.loadTexts: override_media_name.setDescription('Sets a string identifying the media name that is to be used in place of the currently requested media. The substitution will continue until another media is selected. If set to a named media that is not currently available the requested media is not overridden. Additional information: When a request is received to print on a size and type of media that is not currently available, this object contains the desired media name as set by the print job. This object should be set to a media name that is currently available in the printer. If a paper mount request is not pending, attempting to get or set this object will cause <noSuchName> to be returned. Setting this object to a string other than one of the MEDIAn-NAME objects (MEDIA-NAMES-AVAILABLE is applied) will cause a status of <badValue> to be returned.')
override_media_size = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 11, 17, 18, 19, 25, 26, 27, 45, 46, 65, 72, 80, 81, 90, 91, 100, 101, 258, 282, 32767))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eLedger", 11), ("eROC16K", 17), ("eJISExecutive", 18), ("eROC8K", 19), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eISOandJISA3", 27), ("eJISB5", 45), ("eJISB4", 46), ("eISOB5", 65), ("eJapanesePostcardDouble", 72), ("eMonarch", 80), ("eCommercial10", 81), ("eInternationalDL", 90), ("eInternationalC5", 91), ("eInternationalB5", 100), ("eCustom", 101), ("eUSLetterR", 258), ("eISOandJISA4R", 282), ("eUnknownMediaSize", 32767)))).setLabel("override-media-size").setMaxAccess("readwrite")
if mibBuilder.loadTexts: override_media_size.setStatus('optional')
if mibBuilder.loadTexts: override_media_size.setDescription('Sets the media size that is to be used in place of the currently requested media size. Additional information: When a request is received to print on a size and type of media that is not currently available, this object contains the desired size as set by the print job. This object should be set to a media size that is currently available to the printer. If a paper mount request is not pending, attempting to get or set this object will cause <noSuchName> to be returned.')
marking_agent_density_setting = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 1, 9, 1), Integer32()).setLabel("marking-agent-density-setting").setMaxAccess("readwrite")
if mibBuilder.loadTexts: marking_agent_density_setting.setStatus('optional')
if mibBuilder.loadTexts: marking_agent_density_setting.setDescription('Returns or sets the marking agent density setting for each of the marking agents installed. The Marking Agent (aka Print) density is the instantaneous amount of marking agent applied to the media while printing. A value of zero has the lowest print density, yielding a lighter page. A value of 10 has the highest print density, yielding a darker page. The device POS will document what values are supported. Additional information: Returns or sets the print density setting. Print density is the instantaneous amount of marking agent applied to the paper while printing. A value of zero has the lowest print density, yielding a lighter page. A value of 10 has the highest print density, yielding a darker page.')
duplex_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 2, 22), Integer32()).setLabel("duplex-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: duplex_page_count.setStatus('optional')
if mibBuilder.loadTexts: duplex_page_count.setDescription('Total number of sheets of media that have been duplex printed. A sheet is counted if it travels through the duplex page path, regardless of whether or not marks are made on the page. The POS will indicate if the value is kept in NVRAM. Additional information: Total number of sheets of media that have been duplex printed. A sheet is counted if it travels through the duplex page path, regardless of whether or not marks are made on the page. This value is kept in NVRAM however the NVRAM value is only updated every 10 sheets. NOTE: The value returned by this object will be incremented every sheet but if power is lost between NVRAM updates up to 9 sheets of the count may be lost. The counter will be reset to zero after 16,777,215 (2^24-1) pages. ')
print_engine_revision = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 2, 26), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setLabel("print-engine-revision").setMaxAccess("readonly")
if mibBuilder.loadTexts: print_engine_revision.setStatus('optional')
if mibBuilder.loadTexts: print_engine_revision.setDescription('Print engine revision string. Additional information: Print engine revision string. The symbol set for this string is Roman-8. ')
input_tray_auto_select = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eOff", 1), ("eOn", 2)))).setLabel("input-tray-auto-select").setMaxAccess("readwrite")
if mibBuilder.loadTexts: input_tray_auto_select.setStatus('optional')
if mibBuilder.loadTexts: input_tray_auto_select.setDescription('Indicates if the device will automatically try to load media from the next input media tray in the auto-select sequence (defined by each device) when it cannot load media from the current tray. Locked trays will not be permitted in the auto-select sequence. This object has no meaning if there is only one unlocked input media tray. Additional information: Indicates if the device will automatically try to load media from the next input media tray in the auto-select sequence (defined by each device) when it cannot load media from the current tray. Locked trays will not be permitted in the auto-select sequence. This object has no meaning if there is only one unlocked input media tray.')
default_custom_paper_dim_unit = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3, 4))).clone(namedValues=NamedValues(("eTenThousandthsOfInches", 3), ("eMicrometers", 4)))).setLabel("default-custom-paper-dim-unit").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_custom_paper_dim_unit.setStatus('optional')
if mibBuilder.loadTexts: default_custom_paper_dim_unit.setDescription("The units of measure used to specify the width and height of the printer's default custom paper size. The unit of measure of eTenThousandthsOfInches is 0.0001 inches. Additional information: The units of measure used to specify the width and height of the printer's default custom paper size. The unit of measure of eTenThousandthsOfInches is 0.0001 inches.")
default_custom_paper_feed_dim = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 11), Integer32()).setLabel("default-custom-paper-feed-dim").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_custom_paper_feed_dim.setStatus('optional')
if mibBuilder.loadTexts: default_custom_paper_feed_dim.setDescription("Sets the printer's default custom paper size dimension in the feed direction (direction parallel to the direction of paper feeding). The value for this object is specified in micrometers or tenthousandths of an inch, depending upon the value of DEFAULT-CUSTOM-PAPER-DIM-UNIT. The valid range is engine-dependent and should be documented in the POS for each product. Additional information: Sets the printer's default custom paper size dimension in the feed direction (direction parallel to the direction of paper feeding). The value for this object is specified in micrometers or tenthousandths of an inch, depending upon the value of DEFAULT-CUSTOM-PAPER-DIM-UNIT. The valid range is engine-dependent and should be documented in the POS for each product.")
default_custom_paper_xfeed_dim = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 12), Integer32()).setLabel("default-custom-paper-xfeed-dim").setMaxAccess("readwrite")
if mibBuilder.loadTexts: default_custom_paper_xfeed_dim.setStatus('optional')
if mibBuilder.loadTexts: default_custom_paper_xfeed_dim.setDescription("Sets the printer's default custom paper size dimension in the cross-feed direction (direction ninety degrees relative to the direction of paper feeding). The value for this object is specified in micrometers or tenthousandths of an inch, depending upon the value of DEFAULT-CUSTOM-PAPER-DIM-UNIT. The valid range is engine-dependent and should be documented in the POS for each product. Additional information: Sets the printer's default custom paper size dimension in the cross-feed direction (direction ninety degrees relative to the direction of paper feeding). The value for this object is specified in micrometers or tenthousandths of an inch, depending upon the value of DEFAULT-CUSTOM-PAPER-DIM-UNIT. The valid range is engine-dependent and should be documented in the POS for each product.")
input_tray_max_media_feed_dim = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 14), Integer32()).setLabel("input-tray-max-media-feed-dim").setMaxAccess("readonly")
if mibBuilder.loadTexts: input_tray_max_media_feed_dim.setStatus('optional')
if mibBuilder.loadTexts: input_tray_max_media_feed_dim.setDescription("The maximum physical media size in the feed direction of this input device expressed in units of measure specified by INPUT- TRAY-MIN-MAX-DIM-UNIT. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Additional information: The maximum physical media size in the feed direction of this input device expressed in units of measure specified by PrtInputDimUnit. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Refer to Printer Management Standards web page, http://bldlabs.boi.hp.com/BLDPrinterLab/Project/PrinterManagement, for more details in the original format of the Standard Printer MIB.")
input_tray_max_media_xfeed_dim = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 15), Integer32()).setLabel("input-tray-max-media-xfeed-dim").setMaxAccess("readonly")
if mibBuilder.loadTexts: input_tray_max_media_xfeed_dim.setStatus('optional')
if mibBuilder.loadTexts: input_tray_max_media_xfeed_dim.setDescription("The maximum physical media size across the feed direction of a particular input device expressed in units of measure specified by INPUT-TRAY-MIN-MAX-DIM-UNIT. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Additional information: The maximum physical media size across the feed direction of this input device expressed in units of measure specified by PrtInputDimUnit. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Refer to Printer Management Standards web page, http://bldlabs.boi.hp.com/BLDPrinterLab/Project/PrinterManagement, f or more details in the original format of the Standard Printer MIB.")
input_tray_min_media_feed_dim = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 16), Integer32()).setLabel("input-tray-min-media-feed-dim").setMaxAccess("readonly")
if mibBuilder.loadTexts: input_tray_min_media_feed_dim.setStatus('optional')
if mibBuilder.loadTexts: input_tray_min_media_feed_dim.setDescription("The minimum physical media size in the feed direction of a particular input device expressed in units of measure specified by PrtInputMinMaxDimUnit. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Additional information: The minimum physical media size in the feed direction of this input device expressed in units of measure specified by PrtInputDimUnit. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Refer to Printer Management Standards web page, http://bldlabs.boi.hp.com/BLDPrinterLab/Project/PrinterManagement, for more details in the original format of the Standard Printer MIB.")
input_tray_min_media_xfeed_dim = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 17), Integer32()).setLabel("input-tray-min-media-xfeed-dim").setMaxAccess("readonly")
if mibBuilder.loadTexts: input_tray_min_media_xfeed_dim.setStatus('optional')
if mibBuilder.loadTexts: input_tray_min_media_xfeed_dim.setDescription("The minimum physical media size across the feed direction of a particular input device expressed in units of measure specified by PrtInputMinMaxDimUnit. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Additional information: The minimum physical media size across the feed direction of this input device expressed in units of measure specified by PrtInputDimUnit. A value of (-1) implies 'unlimited', a value of (-2) implies 'unknown'. Refer to Printer Management Standards web page, http://bldlabs.boi.hp.com/BLDPrinterLab/Project/PrinterManagement, for more details in the original format of the Standard Printer MIB.")
manually_feed_prompt_test = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eFalse", 1), ("eTrue", 2)))).setLabel("manually-feed-prompt-test").setMaxAccess("readwrite")
if mibBuilder.loadTexts: manually_feed_prompt_test.setStatus('optional')
if mibBuilder.loadTexts: manually_feed_prompt_test.setDescription('Indicates if the Manual Feed Prompt will be displayed always (eTrue), or only if the tray is not loaded (eFalse). Additional information: Indicates if the Manual Feed Prompt will be displayed always (eFalse), or only if the tray is not loaded with the requested media. (eTrue).')
tray1_media_size_loaded = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 10, 15, 17, 18, 25, 26, 45, 72, 80, 81, 90, 91, 100, 101, 32764, 32765))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eFoolscap", 10), ("eStatement", 15), ("eROC16K", 17), ("eJISExecutive", 18), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eJISB5", 45), ("eJapansePostcardDouble", 72), ("eMonarch", 80), ("eCommercial10", 81), ("eInternationalDL", 90), ("eInternationalC5", 91), ("eInternationalB5", 100), ("eCustom", 101), ("eAnyCustomSize", 32764), ("eAnySize", 32765)))).setLabel("tray1-media-size-loaded").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray1_media_size_loaded.setStatus('optional')
if mibBuilder.loadTexts: tray1_media_size_loaded.setDescription("Returns the media size that is currently configuredconfigured in tray #1. This object can be set to indicate the media size currently loaded, if the printer supports input trays that can not sense the media size. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. Additional information: Returns the media size that is currently configuredconfigured in tray #1. This object can be set to indicate the media size currently loaded, if the printer supports input trays that can not sense the media size. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document.")
tray1_phd = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 1, 12), Integer32()).setLabel("tray1-phd").setMaxAccess("readonly")
if mibBuilder.loadTexts: tray1_phd.setStatus('optional')
if mibBuilder.loadTexts: tray1_phd.setDescription('Provides the number of the Paper Handling Device that contains this input tray. Additional information: Provides the number of the Paper Handling Device that contains this input tray.')
tray2_media_size_loaded = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 10, 17, 18, 25, 26, 45, 101, 32764, 32765))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eFoolscap", 10), ("eROC16K", 17), ("eJISExecutive", 18), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eJISB5", 45), ("eCustom", 101), ("eAnyCustomSize", 32764), ("eAnySize", 32765)))).setLabel("tray2-media-size-loaded").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray2_media_size_loaded.setStatus('optional')
if mibBuilder.loadTexts: tray2_media_size_loaded.setDescription("Returns the media size that is currently configured in tray #2. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. Additional information: Returns the media size that is currently configured in tray #2.")
tray2_phd = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 2, 12), Integer32()).setLabel("tray2-phd").setMaxAccess("readonly")
if mibBuilder.loadTexts: tray2_phd.setStatus('optional')
if mibBuilder.loadTexts: tray2_phd.setDescription('Provides the number of the Paper Handling Device that contains this input tray. Additional information: Provides the number of the Paper Handling Device that contains this input tray.')
tray3_media_size_loaded = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 10, 17, 18, 25, 26, 45, 101, 32764, 32765))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eFoolscap", 10), ("eROC16K", 17), ("eJISExecutive", 18), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eJISB5", 45), ("eCustom", 101), ("eAnyCustomSize", 32764), ("eAnySize", 32765)))).setLabel("tray3-media-size-loaded").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray3_media_size_loaded.setStatus('optional')
if mibBuilder.loadTexts: tray3_media_size_loaded.setDescription("Returns the media size that is currently configured in tray #3. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. Additional information: Returns the media size that is currently configured in tray #3.")
tray3_phd = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 3, 12), Integer32()).setLabel("tray3-phd").setMaxAccess("readonly")
if mibBuilder.loadTexts: tray3_phd.setStatus('optional')
if mibBuilder.loadTexts: tray3_phd.setDescription('Provides the number of the Paper Handling Device that contains this input tray. Additional information: Provides the number of the Paper Handling Device that contains this input tray.')
tray4_media_size_loaded = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(80, 81, 90, 91, 100, 32765))).clone(namedValues=NamedValues(("eMonarch", 80), ("eCommercial10", 81), ("eInternationalDL", 90), ("eInternationalC5", 91), ("eInternationalB5", 100), ("eAnySize", 32765)))).setLabel("tray4-media-size-loaded").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray4_media_size_loaded.setStatus('optional')
if mibBuilder.loadTexts: tray4_media_size_loaded.setDescription("Returns the media size that is currently configured in tray #4. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. Additional information: Returns the media size that is currently configured in tray #4.")
tray4_phd = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 4, 12), Integer32()).setLabel("tray4-phd").setMaxAccess("readonly")
if mibBuilder.loadTexts: tray4_phd.setStatus('optional')
if mibBuilder.loadTexts: tray4_phd.setDescription('Provides the number of the Paper Handling Device that contains this input tray. Additional information: Provides the number of the Paper Handling Device that contains this input tray.')
tray5_media_size_loaded = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 10, 17, 18, 25, 26, 45, 101, 32764, 32765))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eFoolscap", 10), ("eROC16K", 17), ("eJISExecutive", 18), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eJISB5", 45), ("eCustom", 101), ("eAnyCustomSize", 32764), ("eAnySize", 32765)))).setLabel("tray5-media-size-loaded").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray5_media_size_loaded.setStatus('optional')
if mibBuilder.loadTexts: tray5_media_size_loaded.setDescription("Returns the media size that is currently configured in tray #5. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. Additional information: Returns the media size that is currently configured in tray #5.")
tray5_phd = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 5, 12), Integer32()).setLabel("tray5-phd").setMaxAccess("readonly")
if mibBuilder.loadTexts: tray5_phd.setStatus('optional')
if mibBuilder.loadTexts: tray5_phd.setDescription('Provides the number of the Paper Handling Device that contains this input tray. Additional information: Provides the number of the Paper Handling Device that contains this input tray.')
tray6_media_size_loaded = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 10, 17, 18, 25, 26, 45, 101, 32764, 32765))).clone(namedValues=NamedValues(("eUSExecutive", 1), ("eUSLetter", 2), ("eUSLegal", 3), ("eFoolscap", 10), ("eROC16K", 17), ("eJISExecutive", 18), ("eISOandJISA5", 25), ("eISOandJISA4", 26), ("eJISB5", 45), ("eCustom", 101), ("eAnyCustomSize", 32764), ("eAnySize", 32765)))).setLabel("tray6-media-size-loaded").setMaxAccess("readwrite")
if mibBuilder.loadTexts: tray6_media_size_loaded.setStatus('optional')
if mibBuilder.loadTexts: tray6_media_size_loaded.setDescription("Returns the media size that is currently configured in tray #5. Complete list of supported media sizes along with their dimensions are listed in the ''Media Size Table'' near the end of this document. Additional information: Returns the media size that is currently configured in tray #6.")
tray6_phd = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 3, 3, 6, 12), Integer32()).setLabel("tray6-phd").setMaxAccess("readonly")
if mibBuilder.loadTexts: tray6_phd.setStatus('optional')
if mibBuilder.loadTexts: tray6_phd.setDescription('Provides the number of the Paper Handling Device that contains this input tray. Additional information: Provides the number of the Paper Handling Device that contains this input tray.')
overflow_bin = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 4, 1, 4), Integer32()).setLabel("overflow-bin").setMaxAccess("readwrite")
if mibBuilder.loadTexts: overflow_bin.setStatus('optional')
if mibBuilder.loadTexts: overflow_bin.setDescription('Returns or sets the bin that will be used for additional sheets of media when the current bin is full and printing is allowed to continue. Additional information: Returns or sets the bin that will be used for additional sheets of media when the current bin is full and printing is allowed to continue. The data for this object is stored in NVRAM.')
outbin1_override_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 4, 3, 1, 9), OctetString()).setLabel("outbin1-override-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: outbin1_override_mode.setStatus('optional')
if mibBuilder.loadTexts: outbin1_override_mode.setDescription('Returns or sets the device behavior when this output bin condition causes printing to stop. cCancelJob - The device cancels the job. cOutbinFullOverride - The device sends subsequent media to the overflow bin. cOutbinAttentionOverride - The device ignores the attention condition and continues printing. cBinderAttentionOverride - The device ignores the binder attention condition and continues printing. Additional information: Returns or sets the device behavior when this output bin condition causes printing to stop. cCancelJob - The device cancels the job, regardless of other bit settings. cOutbinFullOverride - The device sends subsequent media to the overflow bin. If this bin is the overflow bin, this bit is ignored. cOutbinAttentionOverride - The device ignores the attention condition and continues printing. cBinderAttentionOverride - The device ignores the binder attention condition and continues printing. This object describes the action that is performed when the JOB-OUTPUT-AUTO-CONTINUE-TIMEOUT expires. If no bits are set, no override action is taken (the printer will continue to wait).')
marker_density_calibration = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 5, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("eNotCalibrating", 1), ("eCalibrateALL", 2), ("eCalibrateColor", 3), ("eCalibrateHalftone", 4), ("eCalibrateColorPlaneRegistration", 5)))).setLabel("marker-density-calibration").setMaxAccess("readwrite")
if mibBuilder.loadTexts: marker_density_calibration.setStatus('optional')
if mibBuilder.loadTexts: marker_density_calibration.setDescription('Returns the calibration status or triggers a calibration cycle. All calibration procedures are used if set to eCalibrateALL. If set to eCalibrateDMAX then a full density engine calibration is triggered. If set to eCalibrateDHALF, then a Halftone calibration is triggered. Additional information: Returns the calibration status or triggers a calibration cycle. All calibration procedures are used if set to eCalibrateALL. If set to eCalibrateColor then a full density engine calibration is triggered. If set to eCalibrateHalftone, then a Halftone calibration is triggered. If set to eCalibrateColorPlaneRegistration, then a Color Plane Registration calibration is triggered. If set to eCalibrateDrumPhaseAdjustment, then a Drum Phase Adjustment calibration is triggered.')
phd2_device_specific_command = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 7, 3, 2, 2), OctetString()).setLabel("phd2-device-specific-command").setMaxAccess("writeonly")
if mibBuilder.loadTexts: phd2_device_specific_command.setStatus('optional')
if mibBuilder.loadTexts: phd2_device_specific_command.setDescription('This object is used to send device-specific data to the paper handling device. The meaning of the device-specific command is dependent on the paper handling device and must be specified in the POS. If the paper handling device does not accept the command, then an <badValue> will be returned. If the device accepts the command, it may still be processing the command after the response has been returned. Depending on the device, the application may need to query PHDx-DEVICE-MEMORY to see when the command has completed. Additional information: This object is used to send device-specific data to the paper handling device. The meaning of the device- specific command is dependent on the paper handling device and must be specified in the POS. If the paper handling device does not accept the command, then an <badValue> will be returned. If the device accepts the command, it may still be processing the command after the response has been returned. Depending on the device, the application may need to query PHDx-DEVICE-MEMORY to see when the command has completed.')
media_names_available = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 1, 1), OctetString()).setLabel("media-names-available").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media_names_available.setStatus('optional')
if mibBuilder.loadTexts: media_names_available.setDescription('The value of this object controls which of the MEDIAx-NAME objects are supported. If a bit is set to zero, then attempting to get or set the corresponding MEDIAx-NAME objects will return <noSuchName>. Additional information: Setting a bit to one will cause the corresponding MEDIAn- objects to be available (attempting to access an unavailable object will return <noSuchName>). MEDIA1- objects are always present, as this is the default media. If this object is set to a value that does not include cMediaName2Available, that bit will be set and a status of <noError> will be returned.')
north_edge_offset = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 1, 2), Integer32()).setLabel("north-edge-offset").setMaxAccess("readwrite")
if mibBuilder.loadTexts: north_edge_offset.setStatus('optional')
if mibBuilder.loadTexts: north_edge_offset.setDescription('Returns or sets the number of 300 dpi dots by which the image is shifted. Shift is relative to the leading edge of the medium as the medium flows through the marking engine with the side to be imaged facing the observer. The leading edge is the North edge and the other edges are defined by the normal compass layout of directions with the compass facing the observer. The adjustment is for all pages printed. A positive value moves the image away from the leading edge of the medium. A negative value moves the image closer to the leading edge of the medium. The value 0 will return the image to its factory default position. Additional information: Returns or sets the number of 300 dpi dots by which the image is shifted. Shift is relative to the leading edge of the medium as the medium flows through the marking engine with the side to be imaged facing the observer. The leading edge is the North edge and the other edges are defined by the normal compass layout of directions with the compass facing the ob server. The adjustment is for all pages printed. A positive value moves the image away from the leading edge of the medium. A negative value moves the image closer to the leading edge of the medium. The value 0 will return the image to its factory default position. The value of this object is stored in NVRAM.')
media1_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media1-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media1_name.setStatus('optional')
if mibBuilder.loadTexts: media1_name.setDescription('Media 1 name. Additional information: The symbol set for this string is Roman-8.')
media1_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media1-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media1_short_name.setStatus('optional')
if mibBuilder.loadTexts: media1_short_name.setDescription("Length restricted version of the media name 1. The length restriction is required to allow the media name to be displayed on the device's control panel. The device POS must state the maximum number of characters allowed. If the device also has a limitation on what characters in a character set can be used (e.g. only uppercase characters allowed), the POS should also state character restrictions. Additional information: Length restricted version of the media name 1. The length restriction is required to allow the media name to be displayed on the device's control panel. The maximum supported string length is 11 characters. If the user entered string is too long, the device will store the first 11 characters and will return the <noError> status. The characters must be in the range 20H to 7FH except 5C cannot be used. The default symbol set is Roman-8 for English; additional legal symbol sets are ISOLatin5, ISOLatin2 and Windows31J. Setting this object with characters outside of the range or of an illegal symbol set will cause an error status of <badValue> to be returned. This string is localized according to prtConsoleLocalization. If this object represents a standard type, and the user attempts to set the object, 'OK Nearest Legal Value Substituted' will be returned, and the standard value is retained. If this object represents a user defined type, and the user attempts to set the object, then the set will be successfull.")
media1_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 1, 3), Integer32()).setLabel("media1-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media1_page_count.setStatus('optional')
if mibBuilder.loadTexts: media1_page_count.setDescription('Number of sheets of media 1 that have been printed. The device POS should state whether this value is lost across a power cycle or kept in NVRAM. Additional information: Number of sheets of media 1 that have been printed. This page count is saved in NVRAM after every 10 pages. The maximum value is 4,294,967,295 which will never be reached in normal operation. The page count is incremented when a sheet of media is pulled from an input tray. A duplex printed sheet will cause this counter to be incremented by one.')
media1_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 1, 4), Integer32()).setLabel("media1-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media1_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media1_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: Displays the engine processing characterstics that are applied to this media type.')
media2_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media2-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media2_name.setStatus('optional')
if mibBuilder.loadTexts: media2_name.setDescription('Media 2 name. Additional information: See MEDIA1-NAME.')
media2_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media2-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media2_short_name.setStatus('optional')
if mibBuilder.loadTexts: media2_short_name.setDescription('Length restricted version of the media name 2. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media2_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 2, 3), Integer32()).setLabel("media2-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media2_page_count.setStatus('optional')
if mibBuilder.loadTexts: media2_page_count.setDescription('Number of sheets of media 2 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media2_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 2, 4), Integer32()).setLabel("media2-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media2_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media2_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media3_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media3-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media3_name.setStatus('optional')
if mibBuilder.loadTexts: media3_name.setDescription('Media 3 name. Additional information: See MEDIA1-NAME.')
media3_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media3-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media3_short_name.setStatus('optional')
if mibBuilder.loadTexts: media3_short_name.setDescription('Length restricted version of the media name 3. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media3_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 3, 3), Integer32()).setLabel("media3-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media3_page_count.setStatus('optional')
if mibBuilder.loadTexts: media3_page_count.setDescription('Number of sheets of media 3 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media3_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 3, 4), Integer32()).setLabel("media3-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media3_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media3_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media4_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 4, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media4-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media4_name.setStatus('optional')
if mibBuilder.loadTexts: media4_name.setDescription('Media 4 name. Additional information: See MEDIA1-NAME.')
media4_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 4, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media4-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media4_short_name.setStatus('optional')
if mibBuilder.loadTexts: media4_short_name.setDescription('Length restricted version of the media name 4. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media4_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 4, 3), Integer32()).setLabel("media4-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media4_page_count.setStatus('optional')
if mibBuilder.loadTexts: media4_page_count.setDescription('Number of sheets of media 4 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media4_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 4, 4), Integer32()).setLabel("media4-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media4_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media4_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media5_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 5, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media5-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media5_name.setStatus('optional')
if mibBuilder.loadTexts: media5_name.setDescription('Media 5 name. Additional information: See MEDIA1-NAME.')
media5_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 5, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media5-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media5_short_name.setStatus('optional')
if mibBuilder.loadTexts: media5_short_name.setDescription('Length restricted version of the media name 5. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media5_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 5, 3), Integer32()).setLabel("media5-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media5_page_count.setStatus('optional')
if mibBuilder.loadTexts: media5_page_count.setDescription('Number of sheets of media 5 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media5_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 5, 4), Integer32()).setLabel("media5-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media5_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media5_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media6_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 6, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media6-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media6_name.setStatus('optional')
if mibBuilder.loadTexts: media6_name.setDescription('Media 6 name. Additional information: See MEDIA1-NAME.')
media6_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 6, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media6-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media6_short_name.setStatus('optional')
if mibBuilder.loadTexts: media6_short_name.setDescription('Length restricted version of the media name 6. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media6_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 6, 3), Integer32()).setLabel("media6-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media6_page_count.setStatus('optional')
if mibBuilder.loadTexts: media6_page_count.setDescription('Number of sheets of media 6 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media6_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 6, 4), Integer32()).setLabel("media6-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media6_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media6_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media7_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 7, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media7-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media7_name.setStatus('optional')
if mibBuilder.loadTexts: media7_name.setDescription('Media 7 name. Additional information: See MEDIA1-NAME.')
media7_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 7, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media7-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media7_short_name.setStatus('optional')
if mibBuilder.loadTexts: media7_short_name.setDescription('Length restricted version of the media name 7. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media7_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 7, 3), Integer32()).setLabel("media7-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media7_page_count.setStatus('optional')
if mibBuilder.loadTexts: media7_page_count.setDescription('Number of sheets of media 7 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media7_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 7, 4), Integer32()).setLabel("media7-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media7_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media7_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media8_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 8, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media8-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media8_name.setStatus('optional')
if mibBuilder.loadTexts: media8_name.setDescription('Media 8 name. Additional information: See MEDIA1-NAME.')
media8_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 8, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media8-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media8_short_name.setStatus('optional')
if mibBuilder.loadTexts: media8_short_name.setDescription('Length restricted version of the media name 8. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media8_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 8, 3), Integer32()).setLabel("media8-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media8_page_count.setStatus('optional')
if mibBuilder.loadTexts: media8_page_count.setDescription('Number of sheets of media 8 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media8_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 8, 4), Integer32()).setLabel("media8-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media8_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media8_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media9_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 9, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media9-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media9_name.setStatus('optional')
if mibBuilder.loadTexts: media9_name.setDescription('Media 9 name. Additional information: See MEDIA1-NAME.')
media9_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 9, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media9-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media9_short_name.setStatus('optional')
if mibBuilder.loadTexts: media9_short_name.setDescription('Length restricted version of the media name 9. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media9_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 9, 3), Integer32()).setLabel("media9-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media9_page_count.setStatus('optional')
if mibBuilder.loadTexts: media9_page_count.setDescription('Number of sheets of media 9 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media9_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 9, 4), Integer32()).setLabel("media9-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media9_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media9_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media10_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 10, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media10-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media10_name.setStatus('optional')
if mibBuilder.loadTexts: media10_name.setDescription('Media 10 name. Additional information: See MEDIA1-NAME.')
media10_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 10, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media10-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media10_short_name.setStatus('optional')
if mibBuilder.loadTexts: media10_short_name.setDescription('Length restricted version of the media name 10. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media10_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 10, 3), Integer32()).setLabel("media10-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media10_page_count.setStatus('optional')
if mibBuilder.loadTexts: media10_page_count.setDescription('Number of sheets of media 10 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media10_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 10, 4), Integer32()).setLabel("media10-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media10_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media10_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media11_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 11, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media11-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media11_name.setStatus('optional')
if mibBuilder.loadTexts: media11_name.setDescription('Media 11 name. Additional information: See MEDIA1-NAME.')
media11_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 11, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media11-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media11_short_name.setStatus('optional')
if mibBuilder.loadTexts: media11_short_name.setDescription('Length restricted version of the media name 11. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media11_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 11, 3), Integer32()).setLabel("media11-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media11_page_count.setStatus('optional')
if mibBuilder.loadTexts: media11_page_count.setDescription('Number of sheets of media 11 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media11_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 11, 4), Integer32()).setLabel("media11-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media11_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media11_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media12_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 12, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media12-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media12_name.setStatus('optional')
if mibBuilder.loadTexts: media12_name.setDescription('Media 12 name. Additional information: See MEDIA1-NAME.')
media12_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 12, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media12-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media12_short_name.setStatus('optional')
if mibBuilder.loadTexts: media12_short_name.setDescription('Length restricted version of the media name 12. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media12_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 12, 3), Integer32()).setLabel("media12-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media12_page_count.setStatus('optional')
if mibBuilder.loadTexts: media12_page_count.setDescription('Number of sheets of media 12 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media12_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 12, 4), Integer32()).setLabel("media12-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media12_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media12_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media13_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 13, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media13-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media13_name.setStatus('optional')
if mibBuilder.loadTexts: media13_name.setDescription('Media 13 name. Additional information: See MEDIA1-NAME.')
media13_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 13, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media13-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media13_short_name.setStatus('optional')
if mibBuilder.loadTexts: media13_short_name.setDescription('Length restricted version of the media name 13. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media13_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 13, 3), Integer32()).setLabel("media13-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media13_page_count.setStatus('optional')
if mibBuilder.loadTexts: media13_page_count.setDescription('Number of sheets of media 13 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media13_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 13, 4), Integer32()).setLabel("media13-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media13_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media13_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media14_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 14, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media14-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media14_name.setStatus('optional')
if mibBuilder.loadTexts: media14_name.setDescription('Media 14 name. Additional information: See MEDIA1-NAME.')
media14_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 14, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media14-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media14_short_name.setStatus('optional')
if mibBuilder.loadTexts: media14_short_name.setDescription('Length restricted version of the media name 14. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media14_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 14, 3), Integer32()).setLabel("media14-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media14_page_count.setStatus('optional')
if mibBuilder.loadTexts: media14_page_count.setDescription('Number of sheets of media 14 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media14_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 14, 4), Integer32()).setLabel("media14-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media14_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media14_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media15_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 15, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media15-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media15_name.setStatus('optional')
if mibBuilder.loadTexts: media15_name.setDescription('Media 15 name. Additional information: See MEDIA1-NAME.')
media15_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 15, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media15-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media15_short_name.setStatus('optional')
if mibBuilder.loadTexts: media15_short_name.setDescription('Length restricted version of the media name 15. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media15_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 15, 3), Integer32()).setLabel("media15-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media15_page_count.setStatus('optional')
if mibBuilder.loadTexts: media15_page_count.setDescription('Number of sheets of media 15 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media15_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 15, 4), Integer32()).setLabel("media15-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media15_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media15_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media16_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 16, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media16-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media16_name.setStatus('optional')
if mibBuilder.loadTexts: media16_name.setDescription('Media 16 name. Additional information: See MEDIA1-NAME.')
media16_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 16, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media16-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media16_short_name.setStatus('optional')
if mibBuilder.loadTexts: media16_short_name.setDescription('Length restricted version of the media name 16. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media16_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 16, 3), Integer32()).setLabel("media16-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media16_page_count.setStatus('optional')
if mibBuilder.loadTexts: media16_page_count.setDescription('Number of sheets of media 16 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media16_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 16, 4), Integer32()).setLabel("media16-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media16_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media16_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media17_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 17, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media17-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media17_name.setStatus('optional')
if mibBuilder.loadTexts: media17_name.setDescription('Media 17 name. Additional information: See MEDIA1-NAME.')
media17_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 17, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media17-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media17_short_name.setStatus('optional')
if mibBuilder.loadTexts: media17_short_name.setDescription('Length restricted version of the media name 17. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media17_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 17, 3), Integer32()).setLabel("media17-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media17_page_count.setStatus('optional')
if mibBuilder.loadTexts: media17_page_count.setDescription('Number of sheets of media 17 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media17_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 17, 4), Integer32()).setLabel("media17-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media17_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media17_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media18_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 18, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media18-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media18_name.setStatus('optional')
if mibBuilder.loadTexts: media18_name.setDescription('Media 18 name. Additional information: See MEDIA1-NAME.')
media18_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 18, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media18-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media18_short_name.setStatus('optional')
if mibBuilder.loadTexts: media18_short_name.setDescription('Length restricted version of the media name 18. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media18_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 18, 3), Integer32()).setLabel("media18-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media18_page_count.setStatus('optional')
if mibBuilder.loadTexts: media18_page_count.setDescription('Number of sheets of media 18 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
media18_engine_media_mode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 18, 4), Integer32()).setLabel("media18-engine-media-mode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media18_engine_media_mode.setStatus('optional')
if mibBuilder.loadTexts: media18_engine_media_mode.setDescription('The engine processing characteristics that are to be applied to this media. The processing characteristics are device specific. Additional information: See the description for MEDIA1-ENGINE-MEDIA-MODE.')
media19_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 19, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24))).setLabel("media19-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media19_name.setStatus('optional')
if mibBuilder.loadTexts: media19_name.setDescription('Media 19 name. Additional information: See MEDIA1-NAME.')
media19_short_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 19, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11))).setLabel("media19-short-name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media19_short_name.setStatus('optional')
if mibBuilder.loadTexts: media19_short_name.setDescription('Length restricted version of the media name 19. See MEDIA1-SHORT-NAME for details. Additional information: See MEDIA1-SHORT-NAME.')
media19_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 3, 19, 3), Integer32()).setLabel("media19-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media19_page_count.setStatus('optional')
if mibBuilder.loadTexts: media19_page_count.setDescription('Number of sheets of media 19 that have been printed. See MEDIA1-PAGE-COUNT for details. Additional information: See MEDIA1-PAGE-COUNT.')
engine_media_modes_supported1 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 4, 1), DisplayString()).setLabel("engine-media-modes-supported1").setMaxAccess("readonly")
if mibBuilder.loadTexts: engine_media_modes_supported1.setStatus('optional')
if mibBuilder.loadTexts: engine_media_modes_supported1.setDescription("The list of engine media modes supported by the device. The modes are each separated by a comma character. An example string would be: 'Normal,Rough,Low,Vellum'. Additional information: The list of engine media modes supported by the device. The modes are each separated by a comma character. An example string would be: 'Normal,Rough,Low,Vellum'.")
media_number_of_type_supported = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 8, 1), Integer32()).setLabel("media-number-of-type-supported").setMaxAccess("readonly")
if mibBuilder.loadTexts: media_number_of_type_supported.setStatus('optional')
if mibBuilder.loadTexts: media_number_of_type_supported.setDescription('Indicates the maximum number of supported media types. Additional information: Indicates the number of supported media type. This also indicates which bit in MEDIA-NAMES-AVAILABLE is significant')
non_assured_oht_page_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 7, 1), Integer32()).setLabel("non-assured-oht-page-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: non_assured_oht_page_count.setStatus('optional')
if mibBuilder.loadTexts: non_assured_oht_page_count.setDescription('This is a count of the number of invalid (non-HP Laser Jet) transparencies that have been printed on. This value is incremented every time an invalid OHT is printed on It is reset whenever the fuser count is set to 0. Additional information: This is a count of the number of invalid (non HP Laser Jet) transparencies that have been printed on. This value is incremented every time an invalid OHT is printed on. It is reset whenever the fuser count is set to 0.')
media_size_west_edge_first_side_offset = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 5, 2), Integer32()).setLabel("media-size-west-edge-first-side-offset").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media_size_west_edge_first_side_offset.setStatus('optional')
if mibBuilder.loadTexts: media_size_west_edge_first_side_offset.setDescription('Returns or sets the number of 300 dpi dots by which the image is shifted relative to the west edge of the medium (see NORTH-EDGE-OFFSET). The adjustment is for the first printed side of the medium only. A positive value moves the image away from the west edge of the medium. A negative value moves the image closer to the west edge of the medium. The value 0 will return the image to its factory default position. Additional information: Returns or sets the number of 300 dpi dots by which the image is shifted relative to the west edge of the medium (see NORTH-EDGE-OFFSET). The adjustment is for the first printed side of the medium only. A positive value moves the image away from the west edge of the medium. A negative value moves the image closer to the west edge of the medium. The value 0 will return the image to its factory default position. The values in the sub array index are from the media size table in the hpmib. This adjustment is done on a paper size by paper size basis. The standard PCL5 codes for paper size are used for the value used in the OID. Please see S_ARRAY_SUB1 for legal ')
media_size_west_edge_second_side_offset = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 5, 3), Integer32()).setLabel("media-size-west-edge-second-side-offset").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media_size_west_edge_second_side_offset.setStatus('optional')
if mibBuilder.loadTexts: media_size_west_edge_second_side_offset.setDescription('Returns or sets the number of 300 dpi dots by which the image is shifted relative to the west edge of the medium (see NORTH-EDGE-OFFSET). The adjustment is for the second printed side of the medium only. A positive value moves the image away from the west edge of the medium. A negative value moves the image closer to the west edge of the medium. The value 0 will return the image to its factory default position. Additional information: Returns or sets the number of 300 dpi dots by which the image is shifted relative to the west edge of the medium (see NORTH-EDGE-OFFSET). The adjustment is for the second printed side of the medium only. A positive value moves the image away from the west edge of the medium. A negative value moves the image closer to the west edge of the medium. The value 0 will return the image to its factory default position. The values in the sub array index are from the media size table in the hpmib. This adjustment is done on a paper size by paper size basis. The standard PCL5 codes for paper size are used for the value used in the OID. Please see S_ARRAY_SUB1 for legal values. The data for this object is stored in NVRAM.')
media_size_west_edge_side_offset_by_tray = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 5, 4), Integer32()).setLabel("media-size-west-edge-side-offset-by-tray").setMaxAccess("readwrite")
if mibBuilder.loadTexts: media_size_west_edge_side_offset_by_tray.setStatus('optional')
if mibBuilder.loadTexts: media_size_west_edge_side_offset_by_tray.setDescription('Returns or sets the number of 300 dpi dots by which the image is shifted relative to the west edge of the medium (see NORTH-EDGE-OFFSET). A positive value moves the image away from the west edge of the medium. A negative value moves the image closer to the west edge of the medium. The value 0 will return the image to its factory default position. Each OID binding corresponds to a valid tray number for this product. There can be a maximum of 15 trays, a combination of internal and external trays. Products that support other combinations of trays will specify this information in the device pos. The data for this object is stored in NVRAM. Additional information: Returns or sets the number of 300 dpi dots by which the image is shifted relative to the west edge of the medium (see NORTH-EDGE-OFFSET). A positive value moves the image away from the west edge of the medium. A negative value moves the image closer to the west edge of the medium. The value 0 will return the image to its factory default position. Each OID binding corresponds to a valid tray number for this product. There can be a maximum of 15 trays, a combination of internal and external trays. Products that support other combinations of trays will specify this information in the device pos. The data for this object is stored in NVRAM.')
service_channel_printing_status = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 2, 77, 1), OctetString()).setLabel("service-channel-printing-status").setMaxAccess("readonly")
if mibBuilder.loadTexts: service_channel_printing_status.setStatus('optional')
if mibBuilder.loadTexts: service_channel_printing_status.setDescription('This object is used to determine status of service channel printing. cServiceChannelPrintingActive - indicates that Service Channel Printing is active on this printer. cServiceChannelPrintingEnabled - indicates that this is a Service Channel Printing enabled printer. cCartridgeServiceChannelPrintingEnabled - indicates that the cartridge installed is a Service Channel cartridge. Additional information: This object is used to determine status of service channel printing. cServiceChannelPrintingActive - indicates that Service Channel Printing is active on this printer. cServiceChannelPrintingEnabled - indicates that this is a Service Channel Printing enabled printer. cCartridgeServiceChannelPrintingEnabled - indicates that the cartridge installed is a Service Channel cartridge.')
printed_media_simplex_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 930576247))).setLabel("printed-media-simplex-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_media_simplex_count.setStatus('optional')
if mibBuilder.loadTexts: printed_media_simplex_count.setDescription("Total number of simplex pages printed in this media size. Additional information: The 5 usage (simplex/duplex count, simplex/duplex click charge and paper size total) objects described here and below detail the usage for the printer for each paper size defined in the PCL Implementor's Guide and in the Media Size Table in the hpmib.txt. The OID binding is based on the click attribute and paper size. For example: The format for the OID is as follows: 3.4.1.5.x.y <-----> | | | | \\ / | paper size / \\ PRINTER-CLICK_TOTALS ROOT \\ OID click attribute 1..5 x values are 1..5: 1) simplex count 2) simplex click charge 3) duplex count 4) duplex click charge 5) printer or scanner paper size total (i.e. depends if the root OID is referrring to the printer or scanner). y : paper size as defined in the Media Size Table in the hpmib.txt ")
printed_media_simplex_charge = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 2), OctetString()).setLabel("printed-media-simplex-charge").setMaxAccess("readwrite")
if mibBuilder.loadTexts: printed_media_simplex_charge.setStatus('optional')
if mibBuilder.loadTexts: printed_media_simplex_charge.setDescription('Charge for each simplex page printed in this media size.')
printed_media_duplex_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 930576247))).setLabel("printed-media-duplex-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_media_duplex_count.setStatus('optional')
if mibBuilder.loadTexts: printed_media_duplex_count.setDescription('Total number of duplex pages printed in this media size.')
printed_media_duplex_charge = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 4), OctetString()).setLabel("printed-media-duplex-charge").setMaxAccess("readwrite")
if mibBuilder.loadTexts: printed_media_duplex_charge.setStatus('optional')
if mibBuilder.loadTexts: printed_media_duplex_charge.setDescription('Charge for each duplex page printed in this media size. Additional information: The click charges for duplex printed media.')
printed_media_total_charge = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 5), OctetString()).setLabel("printed-media-total-charge").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_media_total_charge.setStatus('optional')
if mibBuilder.loadTexts: printed_media_total_charge.setDescription('The total charge for pages printed in this media size.')
printed_media_maximum_pixels_per_page = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 6), Integer32()).setLabel("printed-media-maximum-pixels-per-page").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_media_maximum_pixels_per_page.setStatus('optional')
if mibBuilder.loadTexts: printed_media_maximum_pixels_per_page.setDescription('The number of pixels required to completely fill a page of this media size. The device POS will specify the resolution at which this pixel count was calculated. Additional information: The number of pixels required to fill a page of a specific media size. The OID binding is the media size you to query for. Only media sizes which the printer supports will be available.')
printed_media_combined_total = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 7), OctetString()).setLabel("printed-media-combined-total").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_media_combined_total.setStatus('optional')
if mibBuilder.loadTexts: printed_media_combined_total.setDescription('Total number of letter equivalently weighted pages both color and mono combined with this printer. Additional information: The combined total per page size of simplex and duplex color pages plus simplex and duplex mono pages.')
printed_media_dimplex_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 930576247))).setLabel("printed-media-dimplex-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_media_dimplex_count.setStatus('optional')
if mibBuilder.loadTexts: printed_media_dimplex_count.setDescription('Total number of mono dimplex pages printed in this media size. A dimplex page is one that has been printed in duplex mode but the back side is blank. Dimplex pages occur when the printer firmware inserts a blank page in order to complete a duplexed job which is sent to the printer with an odd number of pages.')
usage_printer_total_charge = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 2), OctetString()).setLabel("usage-printer-total-charge").setMaxAccess("readonly")
if mibBuilder.loadTexts: usage_printer_total_charge.setStatus('optional')
if mibBuilder.loadTexts: usage_printer_total_charge.setDescription('Total printer charge for all paper sizes printed.')
usage_average_toner_coverage = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 3), OctetString()).setLabel("usage-average-toner-coverage").setMaxAccess("readonly")
if mibBuilder.loadTexts: usage_average_toner_coverage.setStatus('optional')
if mibBuilder.loadTexts: usage_average_toner_coverage.setDescription('Reports the average toner area coverage of all pages printed over the life of the device. Counts pixels on each page, divides pixel count by pixels possible on a page to give the percentage of coverage, and keeps a rolling average percent weighted by area printed.')
usage_staple_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 930576247))).setLabel("usage-staple-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: usage_staple_count.setStatus('optional')
if mibBuilder.loadTexts: usage_staple_count.setDescription('Total number of staples used.')
usage_instructions_line1 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setLabel("usage-instructions-line1").setMaxAccess("readwrite")
if mibBuilder.loadTexts: usage_instructions_line1.setStatus('optional')
if mibBuilder.loadTexts: usage_instructions_line1.setDescription('The first line of usage instructions for the device user.Appears on Line 1 of the usage page.')
usage_instructions_line2 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setLabel("usage-instructions-line2").setMaxAccess("readwrite")
if mibBuilder.loadTexts: usage_instructions_line2.setStatus('optional')
if mibBuilder.loadTexts: usage_instructions_line2.setDescription('The second line of usage instructions for the device user.Appears on Line 1 of the usage page.')
usage_instructions_line3 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setLabel("usage-instructions-line3").setMaxAccess("readwrite")
if mibBuilder.loadTexts: usage_instructions_line3.setStatus('optional')
if mibBuilder.loadTexts: usage_instructions_line3.setDescription('The third line of usage instructions for the device user.Appears on Line 1 of the usage page.')
usage_instructions_line4 = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setLabel("usage-instructions-line4").setMaxAccess("readwrite")
if mibBuilder.loadTexts: usage_instructions_line4.setStatus('optional')
if mibBuilder.loadTexts: usage_instructions_line4.setDescription('The fourth line of usage instructions for the device user.Appears on Line 1 of the usage page.')
printed_modes_usage_total = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 9), Integer32()).setLabel("printed-modes-usage-total").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_modes_usage_total.setStatus('optional')
if mibBuilder.loadTexts: printed_modes_usage_total.setDescription(' This object reports the total color and mono print modes usage for the life of the printer. This value is reported on the usage page.')
source_tray_usage_total = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 10), Integer32()).setLabel("source-tray-usage-total").setMaxAccess("readonly")
if mibBuilder.loadTexts: source_tray_usage_total.setStatus('optional')
if mibBuilder.loadTexts: source_tray_usage_total.setDescription('This object reports the total source tray usage for the life of the printer. This value is reported on the usage page.')
destination_bin_usage_total = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 1, 11), Integer32()).setLabel("destination-bin-usage-total").setMaxAccess("readonly")
if mibBuilder.loadTexts: destination_bin_usage_total.setStatus('optional')
if mibBuilder.loadTexts: destination_bin_usage_total.setDescription('This object reports the total destination bin usage for the life of the printer. This value is reported on the usage page.')
printed_modes_total_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 4, 1, 5), Integer32()).setLabel("printed-modes-total-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: printed_modes_total_count.setStatus('optional')
if mibBuilder.loadTexts: printed_modes_total_count.setDescription('The total count for pages printed in this fuser mode.')
source_tray_usage_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 5, 1, 1), Integer32()).setLabel("source-tray-usage-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: source_tray_usage_count.setStatus('optional')
if mibBuilder.loadTexts: source_tray_usage_count.setDescription('Total number of pages printed from this source tray. Additional information: This object will track how many images have been printed with the original source tray as one of the following: 1. Envelope Feeder 2. Manual Feed Tray 3. MP Tray 4. Tray 1 5. Tray 2 6. Tray 3 7. External Tray 8. Other')
destination_bin_usage_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 16, 6, 1, 1), Integer32()).setLabel("destination-bin-usage-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: destination_bin_usage_count.setStatus('optional')
if mibBuilder.loadTexts: destination_bin_usage_count.setDescription('Total number of pages printed to this destination bin. Additional information: This object will track how many images have been printed with the original destination bin as one of the following: 1. Face Down Bin 2. Face Up Bin 3. External Bin 4. Other')
low_marking_agent_processing = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eStop", 1), ("eCont", 2)))).setLabel("low-marking-agent-processing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: low_marking_agent_processing.setStatus('optional')
if mibBuilder.loadTexts: low_marking_agent_processing.setDescription('Returns or changes how the device processes a low marking agent event. If eCont, then the device continues to print. If eStop, then the device stops until a continue event occurs.')
out_marking_agent_processing = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 5, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eStop", 1), ("eCont", 2)))).setLabel("out-marking-agent-processing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: out_marking_agent_processing.setStatus('optional')
if mibBuilder.loadTexts: out_marking_agent_processing.setDescription('Returns or changes how the device processes an out marking agent event. If eCont, then the device continues to print. If eStop, then the device stops until a continue event occurs.')
media_size_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 8, 5, 1), Integer32()).setLabel("media-size-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: media_size_count.setStatus('optional')
if mibBuilder.loadTexts: media_size_count.setDescription("The number of impressions printed on sheets of this media size. The device POS should state whether this value is lost across a power cycle or is kept in permanent storage. Additional information: The value of this object is persistent across a power cycle. The index for these objects, the last number of the OID, uniquely identifies the paper size. This value corresponds to the page sizes listed below. These values are also documented in the PCL Implementor's Guide and the PML Master MIB. 1 US-Executive 2 US-Letter 3 US-Legal 15 Statement 17 ROC 16K 18 JIS Executive 25 ISO and JIS A5 26 ISO and JIS A4 45 JIS B5 65 ISO B5 72 eJapanesePostcardDouble 80 Monarch 81 Commercal-10 90 International DL 91 International C5 100 International B5 101 Custom 32764 AnyCustomSize 32765 AnySize 32767 Unknown Paper Size")
consumable_status_cartridge_model = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setLabel("consumable-status-cartridge-model").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_cartridge_model.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_cartridge_model.setDescription('This object is used to read the cartridge model number associated with this consumable. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_manufacturing_date = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setLabel("consumable-status-manufacturing-date").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_manufacturing_date.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_manufacturing_date.setDescription("This object is used to report the date on which this consumable was manufactured. The format of the string is 'YYYYMMDD', where YYYY is the year, MM is the month (1-12), and DD is the day (1-31). Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.")
consumable_status_serial_number = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setLabel("consumable-status-serial-number").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_serial_number.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_serial_number.setDescription('This object is used to report the serial number for this consumable. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_capacity_units = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ePagesAt5PercentCoverage", 1), ("e1KPagesAt5PercentCoverage", 2), ("e10KPagesAt5PercentCoverage", 3)))).setLabel("consumable-status-capacity-units").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_capacity_units.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_capacity_units.setDescription('This object is used to report the usage units used by the CONSUMABLE-STATUS-TOTAL-CAPACITY object. Additional information: This object will only exist on engines that are E-Label capable, but will exist on these engines regardless of the cartridge being Authentic HP or NonHP. This object can be used to ensure the capability of the E-Label feature for a given engine.')
consumable_status_total_capacity = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-total-capacity").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_total_capacity.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_total_capacity.setDescription('This object is used to report the total capacity of a new consumable of this type. The PML object CONSUMABLE-STATUS-CAPACITY-UNITS can be used to determine the units of measure for this PML object. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_info = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 7), OctetString()).setLabel("consumable-status-info").setMaxAccess("readwrite")
if mibBuilder.loadTexts: consumable_status_info.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_info.setDescription('This object is used to read and write the various status flags supported by this consumable. For LaserJet 4100, LaserJet 4550 the collection bits supported include: cMarkedAsReordered - indicates the consumable has been reordered. This is the only bit that can be both read and written. cTonerLowCondition - indicates a toner low condition has occured. This bit in the collection is read-only to PML. cTonerOutCondition - indicates a toner out condition has occured. This bit in the collection is read-only to PML. cDeveloperLowCondition - indicates a developer low condition has occured. This bit in the collection is read-only to PML. cDeveloperOutCondition - indicates a developer out condition has occured. This bit in the collection is read-only to PML. cDrumLowCondition - indicates a drum low condition has occured. This bit in the collection is read-only to PML. cDrumOutCondition - indicates a drum out condition has occured. This bit in the collection is read-only to PML. cWasteTonerFullCondition - indicates a waste toner full condition has occured. This bit in the collection is read-only to PML. cWasteTonerFullWarningCondition - indicates a waste toner full warning condition has occured. This bit in the collection is read-only to PML. cNewConsumable - indicates the consumable is new and has never been used. This bit in the collection is read-only to PML. cTonerLowNotificationSent - inidicates that toner low notification has been sent for this consumable. This bit in the collection is readonly. cTonerOutNotificationSent - inidicates that toner out notification has been sent for this consumable. This bit in the collection is read only. cAnyPartLowCondition - indicates that at least one part of this consumable has reached a low condition. This bit in the collection is read-only to PML. cAnyPartOutCondition - indicates that at least one part of this consumable has reached an out condition. This bit in the collection is read-only to PML. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_first_install_date = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setLabel("consumable-status-first-install-date").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_first_install_date.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_first_install_date.setDescription("This object is used to report the date on which this consumable was first installed. The format of the string is 'YYYYMMDD'. Where: YYYY is the year. MM is the month (1-12). DD is the day (1-31). The device POS needs to indicate what will be written in the event the printer does not have a real time clock. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist. For printers without internal clocks, the date will always be 20000101.")
consumable_status_last_use_date = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setLabel("consumable-status-last-use-date").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_last_use_date.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_last_use_date.setDescription("This object is used to report the date on which this consumable was last used. The format of the string is 'YYYYMMDD'. Where: YYYY is the year. MM is the month (1-12). DD is the day (1-31). The device POS needs to indicate what will be written in the event the printer does not have a real time clock. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist. For printers without internal clocks, the date will always be 20000101.")
consumable_status_page_count_legal = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-page-count-legal").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_page_count_legal.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_page_count_legal.setDescription('This object is used to report the number of pages that have been printed by this consumable that are of the specified media size - Legal. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_page_count_a4_letter = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-page-count-a4-letter").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_page_count_a4_letter.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_page_count_a4_letter.setDescription('This object is used to report the number of pages that have been printed by this consumable that are of the specified media size - A4 or Letter. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_page_count_b5_executive = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-page-count-b5-executive").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_page_count_b5_executive.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_page_count_b5_executive.setDescription('This object is used to report the number of pages that have been printed by this consumable that are of the specified media size - B5 or Executive. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_page_count_envelope = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-page-count-envelope").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_page_count_envelope.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_page_count_envelope.setDescription('This object is used to report the number of pages that have been printed by this consumable that are of the specified media size - Envelope. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_page_count_xy_other = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-page-count-xy-other").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_page_count_xy_other.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_page_count_xy_other.setDescription('This object is used to report the number of pages that have been printed by this consumable that are of the specified media size - XY or Other. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_job_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-job-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_job_count.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_job_count.setDescription('This object is used to report the number of jobs that have been printed by this consumable. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_usage_units = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("ePixels", 1), ("eTenthsOfGrams", 2), ("eGrams", 3), ("eRotations", 4), ("ePages", 5), ("eImpressions", 6), ("ePercentLifeRemaining", 7), ("eOther", 8)))).setLabel("consumable-status-usage-units").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_usage_units.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_usage_units.setDescription('This object is used to report the units used to measure the capacity of this consumable. Additional information: This object will only exist on engines that are E-Label capable, but will exist on these engines regardless of the cartridge being Authentic HP or NonHP. This object can be used to ensure the capability of the E-Label feature for a given engine.')
consumable_status_usage_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-usage-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_usage_count.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_usage_count.setDescription('This object is used to report the number of usage units that remain in this consumable. The units of measurement used by this object can be obtained by querying the CONSUMABLE-STATUS-USAGE-UNITS object. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_manufacturer_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 19), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setLabel("consumable-status-manufacturer-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_manufacturer_name.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_manufacturer_name.setDescription('This object is used to report the name of the manufacturer of this consumable. The device POS will specify the set of manufacturers that could be returned. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_oem_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 20), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setLabel("consumable-status-oem-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_oem_name.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_oem_name.setDescription("This object is used to report the name of the OEM of this consumable. This object will typically return the string 'HP'. The device POS will specify if other OEM consumables are supported. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist. This object should always return 'HP' as the OEM name")
consumable_status_engine_usage_units = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("ePixels", 1), ("eTenthsOfGrams", 2), ("eGrams", 3), ("eRotations", 4), ("ePages", 5), ("eImpressions", 6), ("ePercentLifeRemaining", 7), ("eOther", 8)))).setLabel("consumable-status-engine-usage-units").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_engine_usage_units.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_engine_usage_units.setDescription('This object is used to report the units used to measure the capacity of this consumable as reported by the Engine. Additional information: This object will only exist on engines that are E-Label capable, but will exist on these engines regardless of the cartridge being Authentic HP or NonHP. This object can be used to ensure the capability of the E-Label feature for a given engine.')
consumable_status_engine_usage_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-engine-usage-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_engine_usage_count.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_engine_usage_count.setDescription('This object is used to report the number of usage units, estimated by the engine, that remain in this consumable. The units of measurement used by this object can be obtained by querying the CONSUMABLE-STATUS-ENGINE-USAGE-UNITS object. Additional information: This object will only exist for Authentic HP consumables. If the cartridge is deemed to be NonHP, then this object will not exist.')
consumable_status_drum_life_units = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("ePixels", 1), ("eTenthsOfGrams", 2), ("eGrams", 3), ("eRotations", 4), ("ePages", 5), ("eImpressions", 6), ("ePercentLifeRemaining", 7), ("eOther", 8)))).setLabel("consumable-status-drum-life-units").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_drum_life_units.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_drum_life_units.setDescription('This object is used to report the units used to measure the drum life remaining. Additional information: This object will only exist on engines that are E-Label capable, but will exist on these engines regardless of the cartridge being Authentic HP or NonHP. This object can be used to ensure the capability of the E-Label feature for a given engine.')
consumable_status_drum_life = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 39), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("consumable-status-drum-life").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_drum_life.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_drum_life.setDescription('This object is used to report the number of life units, remaining in the drum. The units of measurement used by this object can be obtained by querying the CONSUMABLE- STATUS-DRUM-LIFE-UNITS object. This object will not exist if this consumable does not contain a drum. Additional information: This object will only exist on engines that are E-Label capable, but will exist on these engines regardless of the cartridge being Authentic HP or NonHP. This object can be used to ensure the capability of the E-Label feature for a given engine.')
consumable_status_authentication = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("eAuthLevel1", 1), ("eAuthLevel2", 2), ("eAuthLevel3", 3)))).setLabel("consumable-status-authentication").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_status_authentication.setStatus('optional')
if mibBuilder.loadTexts: consumable_status_authentication.setDescription('This object returns the authentication level of the elabel on the given supply. The elabel authentication is returned via the appropriate enum. Additional information: There are three levels of authentication of an ELabel cartridge supported. Each ELabel cartridges level of authentication is provided here, via this enumeration.')
consumable_string_information = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 8, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 287))).setLabel("consumable-string-information").setMaxAccess("readwrite")
if mibBuilder.loadTexts: consumable_string_information.setStatus('optional')
if mibBuilder.loadTexts: consumable_string_information.setDescription('This object is used to read and write the string value that describes the consumable information.')
consumable_string_information_reset = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 8, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("ePresetToNVRAM", 1)))).setLabel("consumable-string-information-reset").setMaxAccess("readwrite")
if mibBuilder.loadTexts: consumable_string_information_reset.setStatus('optional')
if mibBuilder.loadTexts: consumable_string_information_reset.setDescription('This object is used to reset (set back to factory default) or read the current status of the corresponding information string. When Set to eResetToDefault, this object can be used to reset the corresponding information object back to its factory default value. Setting this object with an enumerated value of eCustomized has no effect. When a Get operation is performed on this object it will return a value eResetToDefault if still set to its factory value. It will return eCustomized if the corresponding information value has been set to a custom value. Additional information: This object returns ePresetToNVRAM(1) if CONSUMABLE-STRING-INFORMATION is currently set to the default string. It will return eCustomized(2) otherwise. However, we can explicitly set this object only to ePresetToNVRAM(1) and not eCustomized(2).')
consumable_reorder_url = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setLabel("consumable-reorder-url").setMaxAccess("readwrite")
if mibBuilder.loadTexts: consumable_reorder_url.setStatus('optional')
if mibBuilder.loadTexts: consumable_reorder_url.setDescription('This object is used to read and write the URL that can be used to reorder consumables for this device. This URL is set at the factory but can be updated by a reseller or third party. Additional information: The URL can be up to 64 characters long.')
consumable_maintenance_interval = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 3), Integer32()).setLabel("consumable-maintenance-interval").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_maintenance_interval.setStatus('optional')
if mibBuilder.loadTexts: consumable_maintenance_interval.setDescription('This object reports the total number of pages that can be printed before the maintenance kit needs to be replaced.')
consumable_maintenance_pages_remaining = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 4), Integer32()).setLabel("consumable-maintenance-pages-remaining").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_maintenance_pages_remaining.setStatus('optional')
if mibBuilder.loadTexts: consumable_maintenance_pages_remaining.setDescription('This object reports the number of pages remaining until the Maintenance Interval is reached.')
consumable_current_state = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 7), OctetString()).setLabel("consumable-current-state").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_current_state.setStatus('optional')
if mibBuilder.loadTexts: consumable_current_state.setDescription('This PML object returns the current state of the particular consumable. cAuthLevel1 - Consumable is at Authentication Level 1 cAuthLevel2 - Consumable is at Authentication Level 2 cAuthLevel3 - Consumable is at Authentication Level 3 cGenuineHPUnsupported - Cartridge is GenuineHP intended for another product cDefectiveMemory - Cartridge has a defective memory tag cMissingMemory - Memory tag is missing from the cartridge cLowCondition - Consumable has reached the engine low threshold cOutCondition - Consumable has reached its out threshold cIncorrect - Cartridge inserted is not the correct one cMissing - Consumable is missing from the printer cConfigurableLow - Consumable has reached the configurable low threshold value cStatusArevalid - The status reported on other bits are valid only if the bit is set to 1. If it is 0, the values are invalid. These are the possible states and whenever the consumable is in any of these states, the appropriate bit will be set. The cLowCondition will be set when the consumable reaches the engine low threshold, and cConfigurableLow will be set when the consumable reaches the Configurable Low threshold value. For non-cartridge supplies only cLowCondition, cOutCondition, and cMissing will be supported. Additional information: This object returns the current state of the particular consuma ble. cAuthLevel1 - Consumable is at Authentication Level 1 cAuthLevel2 - Consumable is at Authentication Level 2 cAuthLevel3 - Consumable is at Authentication Level 3 cGenuineHPUnsupported - Cartridge is GenuineHP intended for ano ther product cDefectiveMemory - Cartridge has a Defective Memory cMissingMemory - Memory is Missing from the Cartridge cLowCondition - Consumable has reached its low threshold cOutCondition - Consumable has reached its out threshold cIncorrect - Cartridge inserted is not the correct one cMissing - Consumable is Missing from the Printer cStatusAreValid - The Status reported on other bits are valid o nly if this bit is set to 1. If it is 0, the values are invalid. These are the possible states and whenever a Consumable is in a ny of these states, the appropriate bit will be set. For Non Cartridge Supp lies only cLowCondition, cOutCondition and cMissing will be supported.')
consumable_life_usage_units_remaining = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 5, 1, 1), Integer32()).setLabel("consumable-life-usage-units-remaining").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_life_usage_units_remaining.setStatus('optional')
if mibBuilder.loadTexts: consumable_life_usage_units_remaining.setDescription('This object reports the current estimate of the number of usage units that remain before this supply is depleted. An installed supply that cannot report such a number will return a value of -2. It is the reponsibility of the host application to query each supply in order to determine an overall device USAGE-UNITS-REMAINING number - the lowest value returned. The unit of measure for this life estimate is determined by reading the corresponding CONSUMABLE-LIFE-USAGE-UNITS object. Additional information: This object will return the Pages or Estimated Pages remaining for the speciffic supply requested by the leaf node of this object.')
consumable_life_usage_units = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ePagesRemaining", 1), ("eEstimatedPagesRemaining", 2)))).setLabel("consumable-life-usage-units").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_life_usage_units.setStatus('optional')
if mibBuilder.loadTexts: consumable_life_usage_units.setDescription('This object reports current usage units in use by the corresponding CONSUMABLE-LIFE-USAGE-UNITS-REMAINING object. Additional information: This object returns the units that CONSUMABLE-LIFE-USAGE-UNITS-REMAINING is returned in. Either ePagesRemaining(1) or eEstimatedPagesRemaining(2).')
consumable_life_low_threshold = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 10, 5, 1, 3), Integer32()).setLabel("consumable-life-low-threshold").setMaxAccess("readonly")
if mibBuilder.loadTexts: consumable_life_low_threshold.setStatus('optional')
if mibBuilder.loadTexts: consumable_life_low_threshold.setDescription('This object is used to report and modify a threshold value indicating the point in the life of a consumable or supply at which a transition to a LOW state will occur.')
printer_average_marking_agent_coverage = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11, 1, 1), OctetString()).setLabel("printer-average-marking-agent-coverage").setMaxAccess("readonly")
if mibBuilder.loadTexts: printer_average_marking_agent_coverage.setStatus('optional')
if mibBuilder.loadTexts: printer_average_marking_agent_coverage.setDescription('This object is used to read the average marking agent coverage for a given color plane. It is a real number that represents percentage full and will read from 0 to 100%. This object obsoletes or replaces USAGE-AVERAGE-TONER-COVERAGE which could only be used for black toner coverage information. Additional information: The average percent of toner coverage for a specific color plane. The OID binding is the color plane. Only color planes which the printer supports will be available. This is only available if a hard disk is installed.')
printer_average_marking_agent_coverage_sum = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11, 1, 2), OctetString()).setLabel("printer-average-marking-agent-coverage-sum").setMaxAccess("readonly")
if mibBuilder.loadTexts: printer_average_marking_agent_coverage_sum.setStatus('optional')
if mibBuilder.loadTexts: printer_average_marking_agent_coverage_sum.setDescription('This object is used to read the accumulated sum of the percent coverage numbers calculated on a per page basis over the life of the printer. Additional information: The sum of the percent toner coverages for all impressions printed. The OID binding is the color plane. Only color planes which the printer supports will be available.')
printer_average_marking_agent_coverage_sum_squared = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11, 1, 3), OctetString()).setLabel("printer-average-marking-agent-coverage-sum-squared").setMaxAccess("readonly")
if mibBuilder.loadTexts: printer_average_marking_agent_coverage_sum_squared.setStatus('optional')
if mibBuilder.loadTexts: printer_average_marking_agent_coverage_sum_squared.setDescription('This object is used to read the accumulated sum of the squares of the percent coverage numbers calculated on a per page basis over the life of the printer. Additional information: The sum of the percent toner coverages squared for all impressions printed. The OID binding is the color plane. Only color planes which the printer supports will be available.')
printer_average_marking_agent_units_per_gram = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11, 1, 4), OctetString()).setLabel("printer-average-marking-agent-units-per-gram").setMaxAccess("readonly")
if mibBuilder.loadTexts: printer_average_marking_agent_units_per_gram.setStatus('optional')
if mibBuilder.loadTexts: printer_average_marking_agent_units_per_gram.setDescription('This object is used to read the marking agent units of measure per gram. This is typically a conversion factor for converting pixels of toner to grams. This pixels-per-gram value is used to calculate the estimated number of pages remaining for a given marking agent.')
printer_average_marking_agent_coverage_actual = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 4, 1, 11, 1, 5), OctetString()).setLabel("printer-average-marking-agent-coverage-actual").setMaxAccess("readonly")
if mibBuilder.loadTexts: printer_average_marking_agent_coverage_actual.setStatus('optional')
if mibBuilder.loadTexts: printer_average_marking_agent_coverage_actual.setDescription('This object is used to read the actual average marking agent coverage for a given color plane. It is a real number that represents percent coverage and will read from 0 to 100%. This object will return the same value as PRINTER-AVERAGE-MARKING-AGENT-COVERAGE except under certain conditions which will be specified in the device POS. Additional information: This object will return the historical page coverage based on the specific supply requested. It is not based on the printers historical value')
web_server_security = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 3, 9, 1, 3), OctetString()).setLabel("web-server-security").setMaxAccess("readwrite")
if mibBuilder.loadTexts: web_server_security.setStatus('optional')
if mibBuilder.loadTexts: web_server_security.setDescription('Each collection bit represents a device or Embedded Web Server feature that can be enabled or disabled via this object.')
firmware_download_write_status_supported = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eFalse", 1), ("eTrue", 2)))).setLabel("firmware-download-write-status-supported").setMaxAccess("readonly")
if mibBuilder.loadTexts: firmware_download_write_status_supported.setStatus('optional')
if mibBuilder.loadTexts: firmware_download_write_status_supported.setDescription('This object provides information on whether the printer has the ability to communicate the write-status of the firmware download while the download is taking place.')
firmware_download_write_time = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 2), Integer32()).setLabel("firmware-download-write-time").setMaxAccess("readonly")
if mibBuilder.loadTexts: firmware_download_write_time.setStatus('optional')
if mibBuilder.loadTexts: firmware_download_write_time.setDescription('If the design of the firmware-download implementation does not allow PML interaction during the download process, this value provides an estimation of the time where the printer will not be able to engage in PML communication. Additional information: If the design of the firmware-download implementation does not allow PML interaction during the download process, this value provides an estimation of the time where the printer will not be able to engage in PML communication. The calculated time is expected to be 140 secs. However, this time may increase as high as 280 secs as the flash part nears the maximum flash count.')
firmware_download_write_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 3), Integer32()).setLabel("firmware-download-write-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: firmware_download_write_count.setStatus('optional')
if mibBuilder.loadTexts: firmware_download_write_count.setDescription('Tells the number of times that firmware has been downloaded to the flash part. Additional information: Tells the number of times that firmware has been downloaded to the flash part. The default value will vary depending on how many times the firmware is rolled before shipping.')
firmware_download_current_state = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("eIdle", 1), ("eReceivingImage", 2), ("eReceivedImageError", 3), ("eVerifyingImage", 4), ("eVerifiedImageError", 5), ("eWritingImage", 6), ("eWritingImageError", 7), ("eDownloadComplete", 8), ("eOKtoShutDown", 9), ("eCancelDownload", 10), ("eShuttingDown", 11)))).setLabel("firmware-download-current-state").setMaxAccess("readwrite")
if mibBuilder.loadTexts: firmware_download_current_state.setStatus('optional')
if mibBuilder.loadTexts: firmware_download_current_state.setDescription('Provides the current or last reportable state of the firmware download process. The current state may not necessarily be the current state, but could be the post-mortem state.')
firmware_download_maximum_write_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 5), Integer32()).setLabel("firmware-download-maximum-write-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: firmware_download_maximum_write_count.setStatus('optional')
if mibBuilder.loadTexts: firmware_download_maximum_write_count.setDescription('Reports the manufacturer specified number of times that firmware can be downloaded to the flash part. A value of -1 means that there is no limit. A value of 0 means that downloading firmware is not permitted by this part. Any other positive integer value corresponds to the number of times that firmware can be downloaded to the flash part. Additional information: Reports the manufacturer specified number of times that firmware can be downloaded to the flash part. A value of -1 means that there is no limit. A value of 0 means that downloading firmware is not permitted by this part. Any other positive integer value corresponds to the number of times that firmware can be downloaded to the flash part. The current maximum write count is 500. This may change as flash technology evolves.')
firmware_download_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 6), DisplayString()).setLabel("firmware-download-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: firmware_download_name.setStatus('mandatory')
if mibBuilder.loadTexts: firmware_download_name.setDescription('This returns the name of the printer. This should match the name in the header of the upgrade image being sent to the flash part. If the name does not match with the name returned then the image that we are attempting to download does not upgrade the printer firmware. Additional information: This object returns the HP name of the printer. This should match what is in the PJL header of the RFU job Web Jet Admin uses this to ensure the printer is available to be upgraded.')
firmware_download_version = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 18, 7), DisplayString()).setLabel("firmware-download-version").setMaxAccess("readonly")
if mibBuilder.loadTexts: firmware_download_version.setStatus('mandatory')
if mibBuilder.loadTexts: firmware_download_version.setDescription('This object will return a string value representing the current revision of firmware that the printer is operating with. This is used to determine if code needs to be upgraded when an firmware bundle comes in, in an upgrade job. Additional information: Web Jet Admin uses the version string that is returned to determine what peices of an RFU bundle need to be upgraded when an RFU job is being built. This objects will return the version string of the printer.')
upgradable_devices_write_status_supported = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eFalse", 1), ("eTrue", 2)))).setLabel("upgradable-devices-write-status-supported").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_write_status_supported.setStatus('optional')
if mibBuilder.loadTexts: upgradable_devices_write_status_supported.setDescription('This object provides information on whether the upgradable device has the ability to communicate the write-status of the upgrade while the upgrade is taking place.')
upgradable_devices_write_time = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 2), Integer32()).setLabel("upgradable-devices-write-time").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_write_time.setStatus('optional')
if mibBuilder.loadTexts: upgradable_devices_write_time.setDescription('If the design of the device upgrade implementation does not allow PML interaction during the download process, this value provides an estimation of the time where the device will not be able to engage in PML communication. The time returned will depend upon what device is attempting to be upgraded.')
upgradable_devices_write_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 3), Integer32()).setLabel("upgradable-devices-write-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_write_count.setStatus('optional')
if mibBuilder.loadTexts: upgradable_devices_write_count.setDescription('Tells the number of times that firmware for device X has been downloaded to the flash part. The default value will vary depending on how many times the firmware is rolled before shipping.')
upgradable_devices_current_state = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("eIdle", 1), ("eReceivedImage", 2), ("eReceivedImageError", 3), ("eVerifiedImage", 4), ("eVerifiedImageError", 5), ("eWritingImage", 6), ("eWritingImageError", 7), ("eUpgradeComplete", 8)))).setLabel("upgradable-devices-current-state").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_current_state.setStatus('optional')
if mibBuilder.loadTexts: upgradable_devices_current_state.setDescription('Provides the current or last reportable state of the device upgrade process. The current state may not necessarily be the current state, but could be the post-mortem state.')
upgradable_devices_max_write_count = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 5), Integer32()).setLabel("upgradable-devices-max-write-count").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_max_write_count.setStatus('optional')
if mibBuilder.loadTexts: upgradable_devices_max_write_count.setDescription('Reports the manufacturer specified number of times that firmware for device X can be downloaded to the flash part. A value of -1 means that there is no limit. A value of 0 means that downloading firmware is not permitted by this part. Any other positive integer value corresponds to the number of times that the firmware for device X can be downloaded to the flash part. The current maximum write count is 500. This may change as flash technology evolves.')
upgradable_devices_name = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 6), DisplayString()).setLabel("upgradable-devices-name").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_name.setStatus('mandatory')
if mibBuilder.loadTexts: upgradable_devices_name.setDescription('This returns the name of the upgradable device. This should match the name in the header of the upgrade image being sent to the flash part. If the name does not match with any of the names returned then the device that we are attempting to upgrade is unavailable for upgrading. Additional information: This object returns the HP name of the printer and the upgradable devices. There is one Name object per upgradable device. This should match what is in the PJL header of the RFU job Web Jet Admin uses this to ensure the printer or device is available to be upgraded.')
upgradable_devices_version = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 7), DisplayString()).setLabel("upgradable-devices-version").setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradable_devices_version.setStatus('mandatory')
if mibBuilder.loadTexts: upgradable_devices_version.setDescription('This object will return a string value representing the current revision of firmware that device X is operating with. This is used to determine if code needs to be upgraded when a firmware bundle comes in, in an upgrade job. Additional information: Web Jet Admin uses the version string that is returned to determine what peices of an RFU bundle need to be upgraded when an RFU job is being built. This internal object will return the current version string of the printer or device that it corresponds to.')
remote_upgrade_enable = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 9, 4, 2, 1, 1, 20, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eOff", 1), ("eOn", 2)))).setLabel("remote-upgrade-enable").setMaxAccess("readwrite")
if mibBuilder.loadTexts: remote_upgrade_enable.setStatus('mandatory')
if mibBuilder.loadTexts: remote_upgrade_enable.setDescription('Enables or disables the ability to upgrade the firmware associated with any device that is available to be upgraded. This will be set to on or off at the RFU level in that if this mode is on any available device can be upgraded and if this mode is off none of the devices can be upgraded. There is not an individual mode for each device. Trying to set this to a value other than eOn or eOff will cause an UnsupportedValue error to occur.')
mibBuilder.exportSymbols("LJ4250-MIB", job_info_attr_13=job_info_attr_13, error38_code=error38_code, engine_self_diagnostic=engine_self_diagnostic, media6_name=media6_name, tray3_media_size_loaded=tray3_media_size_loaded, media_names_available=media_names_available, job_info_accounting_grayscale_impression_count=job_info_accounting_grayscale_impression_count, file_system4=file_system4, simm=simm, channelnumberofchannels=channelnumberofchannels, consumable_string_information_reset=consumable_string_information_reset, control_panel_display_graphical_contents=control_panel_display_graphical_contents, printed_media_usage=printed_media_usage, error14_time_stamp=error14_time_stamp, error2=error2, port1_parallel_speed=port1_parallel_speed, phd2_capacity=phd2_capacity, media9=media9, job_info_stage=job_info_stage, error46=error46, job_info_pages_printed=job_info_pages_printed, upgradable_devices_write_status_supported=upgradable_devices_write_status_supported, media2_engine_media_mode=media2_engine_media_mode, date_and_time=date_and_time, media3_page_count=media3_page_count, clearable_warning=clearable_warning, errorlog=errorlog, simm1=simm1, pdl=pdl, job=job, media8_name=media8_name, energy_star=energy_star, job_info=job_info, settings_marking_agent=settings_marking_agent, media4_name=media4_name, simm3_bank2_type=simm3_bank2_type, simm3_bank1_capacity=simm3_bank1_capacity, job_info_printed_originals=job_info_printed_originals, media6_page_count=media6_page_count, error36_code=error36_code, mio1_manufacturing_info=mio1_manufacturing_info, file_system_wipe_disk_status=file_system_wipe_disk_status, tray5_phd=tray5_phd, media13=media13, media3_short_name=media3_short_name, duplex_page_count=duplex_page_count, media10_page_count=media10_page_count, consumable_status_job_count=consumable_status_job_count, error8_time_stamp=error8_time_stamp, error17_code=error17_code, intray3=intray3, display_status=display_status, error12=error12, error31_code=error31_code, settings_rpc=settings_rpc, printer_average_marking_agent_coverage=printer_average_marking_agent_coverage, error24=error24, localization_countries_supported=localization_countries_supported, channelifindex=channelifindex, held_job_delete=held_job_delete, print_engine_revision=print_engine_revision, simm5_type=simm5_type, spooler=spooler, media5_engine_media_mode=media5_engine_media_mode, mio3_type=mio3_type, media11_short_name=media11_short_name, media_number_of_type_supported=media_number_of_type_supported, hp=hp, default_page_protect=default_page_protect, consumable_status_first_install_date=consumable_status_first_install_date, media7_page_count=media7_page_count, error41_code=error41_code, media_modes=media_modes, job_info_requested_originals=job_info_requested_originals, intray4=intray4, channel_io_errors=channel_io_errors, timestamp=timestamp, model_number=model_number, media2_page_count=media2_page_count, mio1_type=mio1_type, simm4_bank2_type=simm4_bank2_type, tray4_media_size_loaded=tray4_media_size_loaded, media14_engine_media_mode=media14_engine_media_mode, error36=error36, ram_disk_mode=ram_disk_mode, error38=error38, channel_bytes_sent=channel_bytes_sent, simm5_bank1=simm5_bank1, simm1_bank1_type=simm1_bank1_type, simm2_bank2_capacity=simm2_bank2_capacity, os_execute_file=os_execute_file, consumable_status_page_count_b5_executive=consumable_status_page_count_b5_executive, error20_code=error20_code, simm2_type=simm2_type, media15_engine_media_mode=media15_engine_media_mode, control_panel_display=control_panel_display, media11=media11, override_media_size=override_media_size, printed_modes_total_count=printed_modes_total_count, consumable_status_authentication=consumable_status_authentication, error5_time_stamp=error5_time_stamp, ph=ph, error31=error31, printer_average_marking_agent_coverage_actual=printer_average_marking_agent_coverage_actual, destination_subsystem=destination_subsystem, error44_code=error44_code, media14=media14, media10_engine_media_mode=media10_engine_media_mode, settings_print_media=settings_print_media, settings_webserver=settings_webserver, model_name=model_name, input_tray_auto_select=input_tray_auto_select, io_switch=io_switch, firmware_download=firmware_download, asset_number=asset_number, error26=error26, media19_name=media19_name, error41=error41, media19=media19, pcl_default_font_width=pcl_default_font_width, error29=error29, media15=media15, error38_time_stamp=error38_time_stamp, error25_code=error25_code, job_info_attr_2=job_info_attr_2, consumable_status_usage_count=consumable_status_usage_count, device=device, mass_storage_block_driver=mass_storage_block_driver, media_size_west_edge_second_side_offset=media_size_west_edge_second_side_offset, error48_code=error48_code, job_info_attr_14=job_info_attr_14, media12_page_count=media12_page_count, simm4=simm4, simm1_bank2_capacity=simm1_bank2_capacity, simm3_bank2=simm3_bank2, job_info_attribute=job_info_attribute, error39_code=error39_code, media13_name=media13_name, media2=media2, media11_engine_media_mode=media11_engine_media_mode, error27_time_stamp=error27_time_stamp, media_size_west_edge_side_offset_by_tray=media_size_west_edge_side_offset_by_tray, media16_name=media16_name, error50=error50, media3_engine_media_mode=media3_engine_media_mode, error42_time_stamp=error42_time_stamp, media11_page_count=media11_page_count, media19_page_count=media19_page_count, media1_name=media1_name, error15_code=error15_code, hold_job_timeout=hold_job_timeout, error24_time_stamp=error24_time_stamp, error26_code=error26_code, device_configure_printer_parameters=device_configure_printer_parameters, printed_modes_usage_total=printed_modes_usage_total, consumable_status_page_count_legal=consumable_status_page_count_legal, firmware_download_write_time=firmware_download_write_time, simm1_bank=simm1_bank, error3_time_stamp=error3_time_stamp, background_message1=background_message1, simm2_capacity=simm2_capacity, web_server_security=web_server_security, job_info_accounting_media_type=job_info_accounting_media_type, outbins=outbins, error44_time_stamp=error44_time_stamp, media8=media8, error1_code=error1_code, consumable_maintenance_pages_remaining=consumable_maintenance_pages_remaining, error50_time_stamp=error50_time_stamp, pdl_postscript=pdl_postscript, tray3_phd=tray3_phd, destination_bin_usage_total=destination_bin_usage_total, job_info_pages_processed=job_info_pages_processed, file_system=file_system, job_info_attr_1=job_info_attr_1, job_being_parsed=job_being_parsed, accounting=accounting, consumable_status_total_capacity=consumable_status_total_capacity, held_job_set_queue_size=held_job_set_queue_size, consumable_status_usage_units=consumable_status_usage_units, printed_media_maximum_pixels_per_page=printed_media_maximum_pixels_per_page, print_engine=print_engine, job_info_accounting_color_impression_count=job_info_accounting_color_impression_count, error2_time_stamp=error2_time_stamp, held_job_quantity=held_job_quantity, upgradable_devices_write_time=upgradable_devices_write_time, settings_file_system=settings_file_system, source_tray_usage_count=source_tray_usage_count, default_vertical_black_resolution=default_vertical_black_resolution, status_system=status_system, held_job_control=held_job_control, source_subsystem=source_subsystem, remote_procedure_call=remote_procedure_call, on_off_line=on_off_line, job_info_attr_15=job_info_attr_15, held_job_info=held_job_info, simm5_bank2_type=simm5_bank2_type, error11_time_stamp=error11_time_stamp, consumables_status=consumables_status, simm1_type=simm1_type, simm4_bank2_capacity=simm4_bank2_capacity, job_info_name1=job_info_name1, port1_parallel_bidirectionality=port1_parallel_bidirectionality, simm1_bank2=simm1_bank2, simm2_bank1_type=simm2_bank1_type, io_timeout=io_timeout, consumable_life_usage_units=consumable_life_usage_units, job_info_outcome=job_info_outcome, source_tray_usage=source_tray_usage, consumables=consumables, held_job_pin=held_job_pin, error39=error39, firmware_download_name=firmware_download_name, media4_short_name=media4_short_name, mass_storage_resource_change_counter=mass_storage_resource_change_counter, print_internal_page=print_internal_page, error45_time_stamp=error45_time_stamp, error22_time_stamp=error22_time_stamp, error22_code=error22_code, usage_instructions_line2=usage_instructions_line2, error13_code=error13_code, media13_short_name=media13_short_name, error46_time_stamp=error46_time_stamp, job_info_accounting_cyan_dots=job_info_accounting_cyan_dots, printed_media_total_charge=printed_media_total_charge, consumable_reorder_url=consumable_reorder_url, error15_time_stamp=error15_time_stamp, error14_code=error14_code, media4=media4, error47_time_stamp=error47_time_stamp, DisplayString=DisplayString, reprint=reprint, media15_page_count=media15_page_count, intray2=intray2, print_meter=print_meter, tables=tables, error11=error11, error47=error47, error6=error6, settings_job=settings_job, mopy_mode=mopy_mode, error6_time_stamp=error6_time_stamp, job_info_accounting_media_size=job_info_accounting_media_size, file_system_erase_mode=file_system_erase_mode, low_marking_agent_processing=low_marking_agent_processing, channeltype=channeltype, channel=channel, tray6_phd=tray6_phd, channel_jobs_received=channel_jobs_received, pjl=pjl, error41_time_stamp=error41_time_stamp, error21=error21, outbin=outbin, simm3_bank2_capacity=simm3_bank2_capacity)
mibBuilder.exportSymbols("LJ4250-MIB", error9_code=error9_code, media18_engine_media_mode=media18_engine_media_mode, perm_store_init_occurred=perm_store_init_occurred, media17_name=media17_name, job_info_attr_6=job_info_attr_6, job_info_attr_12=job_info_attr_12, job_info_attr_3=job_info_attr_3, tray2_media_size_loaded=tray2_media_size_loaded, pysmi_continue=pysmi_continue, error42=error42, error30_code=error30_code, service_channel=service_channel, media8_short_name=media8_short_name, default_media_size=default_media_size, mio3_manufacturing_info=mio3_manufacturing_info, media12=media12, consumable_status_engine_usage_count=consumable_status_engine_usage_count, job_info_attr_4=job_info_attr_4, job_info_size=job_info_size, resource_manager=resource_manager, usage_instructions_line4=usage_instructions_line4, service_id=service_id, job_input_auto_continue_timeout=job_input_auto_continue_timeout, consumable_string_information=consumable_string_information, error1_time_stamp=error1_time_stamp, manually_feed_prompt_test=manually_feed_prompt_test, postscript_print_errors=postscript_print_errors, media16_engine_media_mode=media16_engine_media_mode, media9_engine_media_mode=media9_engine_media_mode, media8_engine_media_mode=media8_engine_media_mode, phd2_device_specific_command=phd2_device_specific_command, error24_code=error24_code, error37=error37, media_types=media_types, simm2=simm2, held_job_enable=held_job_enable, usage_printer_total_charge=usage_printer_total_charge, deviceChannelTable=deviceChannelTable, consumable_status_page_count_xy_other=consumable_status_page_count_xy_other, error13_time_stamp=error13_time_stamp, job_info_change_id=job_info_change_id, error40=error40, held_job_retention=held_job_retention, display=display, media17_short_name=media17_short_name, error50_code=error50_code, media14_short_name=media14_short_name, media11_name=media11_name, job_info_accounting_black_dots=job_info_accounting_black_dots, error37_time_stamp=error37_time_stamp, source_tray_usage_total=source_tray_usage_total, simm4_capacity=simm4_capacity, default_media_name=default_media_name, fw_rom_revision=fw_rom_revision, marking_agent_density=marking_agent_density, error10_time_stamp=error10_time_stamp, error25_time_stamp=error25_time_stamp, consumables_life=consumables_life, mio1_model_name=mio1_model_name, job_info_accounting_yellow_dots=job_info_accounting_yellow_dots, file_system3_initialize_volume=file_system3_initialize_volume, media_info=media_info, background_message2=background_message2, printed_media_duplex_charge=printed_media_duplex_charge, error3_code=error3_code, default_custom_paper_feed_dim=default_custom_paper_feed_dim, media4_engine_media_mode=media4_engine_media_mode, error49_code=error49_code, error43=error43, error19_code=error19_code, simm1_bank2_type=simm1_bank2_type, media5_short_name=media5_short_name, firmware_download_maximum_write_count=firmware_download_maximum_write_count, mio2=mio2, media7_engine_media_mode=media7_engine_media_mode, job_info_accounting_job_type=job_info_accounting_job_type, job_info_name2=job_info_name2, error2_code=error2_code, media12_engine_media_mode=media12_engine_media_mode, consumable_status_cartridge_model=consumable_status_cartridge_model, error33_time_stamp=error33_time_stamp, marking_agent_density_setting=marking_agent_density_setting, mio=mio, status_mass_storage_bd=status_mass_storage_bd, job_info_attr_10=job_info_attr_10, status_pdl=status_pdl, printer_average_marking_agent_coverage_sum_squared=printer_average_marking_agent_coverage_sum_squared, printed_modes_usage=printed_modes_usage, simm2_bank1=simm2_bank1, printer_average=printer_average, usage_staple_count=usage_staple_count, upgradable_devices_version=upgradable_devices_version, consumable_status=consumable_status, default_custom_paper_xfeed_dim=default_custom_paper_xfeed_dim, simm3_bank1_type=simm3_bank1_type, simm3_type=simm3_type, channelEntry=channelEntry, error16=error16, media_counts=media_counts, media13_page_count=media13_page_count, file_system_max_open_files=file_system_max_open_files, test=test, destination_bin_usage=destination_bin_usage, consumable_status_drum_life=consumable_status_drum_life, media7=media7, outbin1=outbin1, error18_code=error18_code, media18_name=media18_name, device_name=device_name, printer_accounting=printer_accounting, firmware_download_current_state=firmware_download_current_state, error10_code=error10_code, consumable_status_oem_name=consumable_status_oem_name, upgradable_devices_current_state=upgradable_devices_current_state, error46_code=error46_code, error27_code=error27_code, mass_storage_resource_changed=mass_storage_resource_changed, media_size_count=media_size_count, pcl_default_font_number=pcl_default_font_number, media17_page_count=media17_page_count, held_job_print=held_job_print, error23=error23, error32_code=error32_code, job_info_accounting_magenta_dots=job_info_accounting_magenta_dots, consumable_status_engine_usage_units=consumable_status_engine_usage_units, error6_code=error6_code, error14=error14, device_configure=device_configure, printed_media_duplex_count=printed_media_duplex_count, error1=error1, error48=error48, north_edge_offset=north_edge_offset, error16_code=error16_code, error3=error3, postscript_total_page_count=postscript_total_page_count, media6=media6, intray6=intray6, error40_time_stamp=error40_time_stamp, media16_short_name=media16_short_name, pcl_default_font_height=pcl_default_font_height, media5=media5, engine_media_modes_supported1=engine_media_modes_supported1, error7_code=error7_code, held_job_user_name=held_job_user_name, settings_io=settings_io, install_date=install_date, maximum_ram_disk_memory=maximum_ram_disk_memory, firmware_download_write_status_supported=firmware_download_write_status_supported, error10=error10, error16_time_stamp=error16_time_stamp, error31_time_stamp=error31_time_stamp, error17=error17, outbin1_override_mode=outbin1_override_mode, media7_name=media7_name, error49_time_stamp=error49_time_stamp, consumable_status_manufacturing_date=consumable_status_manufacturing_date, background_status_msg_line2_part1=background_status_msg_line2_part1, media_size_west_edge_first_side_offset=media_size_west_edge_first_side_offset, control_panel_display_contents_crc=control_panel_display_contents_crc, mio3_model_name=mio3_model_name, mass_storage_resources=mass_storage_resources, job_info_pages_in_original=job_info_pages_in_original, input_tray_max_media_feed_dim=input_tray_max_media_feed_dim, mio2_manufacturing_info=mio2_manufacturing_info, media6_engine_media_mode=media6_engine_media_mode, simm3_bank1=simm3_bank1, simm4_bank1=simm4_bank1, simm1_capacity=simm1_capacity, active_print_jobs=active_print_jobs, error49=error49, error23_code=error23_code, simm5_bank2=simm5_bank2, error35_code=error35_code, interface=interface, intray1=intray1, pcl_default_font_source=pcl_default_font_source, time_display=time_display, mio1=mio1, media3=media3, media10=media10, error7=error7, remote_upgrade_enable=remote_upgrade_enable, error47_code=error47_code, media5_name=media5_name, job_input_auto_continue_mode=job_input_auto_continue_mode, show_address=show_address, rpc_bind_protocol_address=rpc_bind_protocol_address, upgradable_devices_max_write_count=upgradable_devices_max_write_count, settings_pdl=settings_pdl, media12_short_name=media12_short_name, error19=error19, media10_short_name=media10_short_name, marking_agent=marking_agent, localization_languages_supported=localization_languages_supported, media10_name=media10_name, simm1_bank1=simm1_bank1, error7_time_stamp=error7_time_stamp, intray=intray, ph_devices=ph_devices, consumable_status_info=consumable_status_info, phd=phd, phd2_manufacturing_info=phd2_manufacturing_info, error43_time_stamp=error43_time_stamp, simm5_bank=simm5_bank, control_panel_display_contents_change_counter=control_panel_display_contents_change_counter, error25=error25, date_display=date_display, upgradable_devices=upgradable_devices, error4_code=error4_code, error34_code=error34_code, tray6_media_size_loaded=tray6_media_size_loaded, media17=media17, error5=error5, media9_page_count=media9_page_count, consumable_life_usage_units_remaining=consumable_life_usage_units_remaining, consumable_life_low_threshold=consumable_life_low_threshold, io=io, job_info_attr_7=job_info_attr_7, media17_engine_media_mode=media17_engine_media_mode, error11_code=error11_code, self_test=self_test, channelinformation=channelinformation, printer_average_marking_agent_coverage_sum=printer_average_marking_agent_coverage_sum, settings_prt_eng=settings_prt_eng, upgradable_devices_name=upgradable_devices_name, consumable_status_serial_number=consumable_status_serial_number, job_info_io_source=job_info_io_source, job_info_physical_outbins_used=job_info_physical_outbins_used, printed_media_combined_total=printed_media_combined_total, PYSNMP_MODULE_ID=hp, job_info_attr_16=job_info_attr_16, default_horizontal_black_resolution=default_horizontal_black_resolution, print_media=print_media, tray_prompt=tray_prompt, default_custom_paper_dim_unit=default_custom_paper_dim_unit, channel_bytes_received=channel_bytes_received, file_system_set_system_partition_writeable=file_system_set_system_partition_writeable, device_system=device_system, form_feed=form_feed, background_status_msg_line1_part1=background_status_msg_line1_part1, file_system_wipe_disk=file_system_wipe_disk, current_job_parsing_id=current_job_parsing_id, simm4_bank1_type=simm4_bank1_type, simm2_bank2=simm2_bank2, file_system_set_system_partition_readonly=file_system_set_system_partition_readonly, consumable_status_drum_life_units=consumable_status_drum_life_units, error8=error8, override_media_name=override_media_name, media7_short_name=media7_short_name, media18_page_count=media18_page_count, error34=error34, printed_media_simplex_count=printed_media_simplex_count, intray5=intray5, error4_time_stamp=error4_time_stamp)
mibBuilder.exportSymbols("LJ4250-MIB", simm5_bank2_capacity=simm5_bank2_capacity, destination_bin_accounting=destination_bin_accounting, file_systems=file_systems, pcl_total_page_count=pcl_total_page_count, simm5_bank1_capacity=simm5_bank1_capacity, consumables_1=consumables_1, error30_time_stamp=error30_time_stamp, file_system_delete_files=file_system_delete_files, firmware_download_version=firmware_download_version, phd2_type=phd2_type, error35_time_stamp=error35_time_stamp, error21_time_stamp=error21_time_stamp, tray1_phd=tray1_phd, port1=port1, simm5_capacity=simm5_capacity, tray4_phd=tray4_phd, status_prt_eng=status_prt_eng, fw_rom_datecode=fw_rom_datecode, settings_mass_storage_bd=settings_mass_storage_bd, error43_code=error43_code, out_marking_agent_processing=out_marking_agent_processing, error32=error32, error8_code=error8_code, channelTable=channelTable, simm3_bank=simm3_bank, input_tray_min_media_feed_dim=input_tray_min_media_feed_dim, media1_engine_media_mode=media1_engine_media_mode, rpc_bound_protocol_address=rpc_bound_protocol_address, error32_time_stamp=error32_time_stamp, error9=error9, media1_page_count=media1_page_count, consumable_string=consumable_string, media3_name=media3_name, simm2_bank1_capacity=simm2_bank1_capacity, job_info_outbins_used=job_info_outbins_used, held_job_job_name=held_job_job_name, settings_system=settings_system, consumable_current_state=consumable_current_state, phd2_model=phd2_model, media9_short_name=media9_short_name, error17_time_stamp=error17_time_stamp, source_tray_accounting=source_tray_accounting, channelstate=channelstate, overflow_bin=overflow_bin, error23_time_stamp=error23_time_stamp, consumable_status_page_count_a4_letter=consumable_status_page_count_a4_letter, error28=error28, media6_short_name=media6_short_name, error20=error20, usage_average_toner_coverage=usage_average_toner_coverage, error_log_clear=error_log_clear, collated_originals_support=collated_originals_support, file_system4_initialize_volume=file_system4_initialize_volume, consumable_status_page_count_envelope=consumable_status_page_count_envelope, netPMLmgmt=netPMLmgmt, media14_name=media14_name, error35=error35, error40_code=error40_code, host_application_available_memory=host_application_available_memory, socket_ping_job_events_version=socket_ping_job_events_version, simm2_bank2_type=simm2_bank2_type, job_info_attr_11=job_info_attr_11, error48_time_stamp=error48_time_stamp, error4=error4, printed_media_simplex_charge=printed_media_simplex_charge, held_job=held_job, error15=error15, error9_time_stamp=error9_time_stamp, error18=error18, simm5_bank1_type=simm5_bank1_type, status_rpc=status_rpc, media4_page_count=media4_page_count, ports=ports, media1=media1, job_info_attr_8=job_info_attr_8, error28_code=error28_code, error5_code=error5_code, device_location=device_location, mio2_model_name=mio2_model_name, deviceChannelEntry=deviceChannelEntry, error12_code=error12_code, serial_number=serial_number, auto_continue=auto_continue, job_info_accounting_media_simplex_count=job_info_accounting_media_simplex_count, mio2_type=mio2_type, error29_code=error29_code, intrays=intrays, error36_time_stamp=error36_time_stamp, tray1_media_size_loaded=tray1_media_size_loaded, firmware_download_write_count=firmware_download_write_count, id=id, media16=media16, error27=error27, cold_reset_media_size=cold_reset_media_size, error13=error13, error37_code=error37_code, error42_code=error42_code, background_message=background_message, job_info_attr_5=job_info_attr_5, file_system_security_access_password=file_system_security_access_password, media5_page_count=media5_page_count, media12_name=media12_name, usage_instructions_line1=usage_instructions_line1, error45=error45, media8_page_count=media8_page_count, media18_short_name=media18_short_name, pdl_pcl=pdl_pcl, webserver_proc_sub=webserver_proc_sub, marker_density_calibration=marker_density_calibration, simm3=simm3, simm4_bank1_capacity=simm4_bank1_capacity, default_vmi=default_vmi, file_system_external_access_capabilities=file_system_external_access_capabilities, printer_average_marking_agent_units_per_gram=printer_average_marking_agent_units_per_gram, simm3_capacity=simm3_capacity, default_bits_per_pixel=default_bits_per_pixel, simm4_type=simm4_type, channelstatus=channelstatus, settings_outbin=settings_outbin, job_info_accounting=job_info_accounting, error29_time_stamp=error29_time_stamp, operating_system=operating_system, media13_engine_media_mode=media13_engine_media_mode, phd2=phd2, job_info_page_count_current_original=job_info_page_count_current_original, processing_subsystem=processing_subsystem, usage_instructions_line3=usage_instructions_line3, error18_time_stamp=error18_time_stamp, printed_media_dimplex_count=printed_media_dimplex_count, sleep_mode=sleep_mode, socket_ping=socket_ping, ph2=ph2, media2_short_name=media2_short_name, media14_page_count=media14_page_count, held_job_security=held_job_security, settings_intray=settings_intray, error12_time_stamp=error12_time_stamp, printed_modes_accounting=printed_modes_accounting, job_info_state=job_info_state, non_assured_oht_page_count=non_assured_oht_page_count, simm4_bank2=simm4_bank2, error21_code=error21_code, job_info_accounting_media_duplex_count=job_info_accounting_media_duplex_count, media1_short_name=media1_short_name, channelprinteralert=channelprinteralert, consumable_status_manufacturer_name=consumable_status_manufacturer_name, job_output_auto_continue_timeout=job_output_auto_continue_timeout, consumable_status_capacity_units=consumable_status_capacity_units, consumable_status_last_use_date=consumable_status_last_use_date, control_panel_button_press=control_panel_button_press, error20_time_stamp=error20_time_stamp, channel_mio=channel_mio, mio3=mio3, job_info_accounting_finishing_options=job_info_accounting_finishing_options, upgradable_devices_write_count=upgradable_devices_write_count, media15_name=media15_name, destination_bin_usage_count=destination_bin_usage_count, simm4_bank=simm4_bank, error28_time_stamp=error28_time_stamp, error39_time_stamp=error39_time_stamp, cancel_job=cancel_job, file_system3=file_system3, settings_spooler=settings_spooler, error33=error33, error44=error44, media18=media18, error30=error30, consumable_maintenance_interval=consumable_maintenance_interval, simm1_bank1_capacity=simm1_bank1_capacity, tray5_media_size_loaded=tray5_media_size_loaded, ram_disk_size=ram_disk_size, simm2_bank=simm2_bank, channelprotocolversion=channelprotocolversion, error26_time_stamp=error26_time_stamp, input_tray_max_media_xfeed_dim=input_tray_max_media_xfeed_dim, media2_name=media2_name, media9_name=media9_name, error22=error22, media15_short_name=media15_short_name, tray2_phd=tray2_phd, default_copies=default_copies, error33_code=error33_code, error19_time_stamp=error19_time_stamp, job_info_attr_9=job_info_attr_9, default_lines_per_page=default_lines_per_page, file_system2=file_system2, media16_page_count=media16_page_count, media_size=media_size, file_system2_initialize_volume=file_system2_initialize_volume, error45_code=error45_code, service_channel_printing_status=service_channel_printing_status, form_feed_needed=form_feed_needed, error34_time_stamp=error34_time_stamp, simm5=simm5, input_tray_min_media_xfeed_dim=input_tray_min_media_xfeed_dim, media19_short_name=media19_short_name)
| [
"[email protected]"
] | |
280d992fa5b09c52dc7e19f51da135e40cdd64ec | d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4 | /AtCoder/ABC-C/112probC3.py | c3bbb316c6ac065245c203087a439022efbf8c8b | [] | no_license | wattaihei/ProgrammingContest | 0d34f42f60fa6693e04c933c978527ffaddceda7 | c26de8d42790651aaee56df0956e0b206d1cceb4 | refs/heads/master | 2023-04-22T19:43:43.394907 | 2021-05-02T13:05:21 | 2021-05-02T13:05:21 | 264,400,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | N = int(input())
P = [list(map(int, input().split())) for _ in range(N)]
for cx in range(101):
for cy in range(101):
H = -1
ok = True
for x, y, h in P:
if h == 0:
continue
Hi = abs(x-cx) + abs(y-cy) + h
if H == -1:
H = Hi
continue
if Hi != H:
ok = False
break
if not ok:
continue
for x, y, h in P:
if h != max(H-abs(cx-x)-abs(cy-y), 0):
ok = False
if ok:
print(cx, cy, H)
break
if ok:
break | [
"[email protected]"
] | |
caf26437cd133ac65dc531abaf3190336ec1705c | a9e608a9af7ccd94f4581b29c61e6cf0ac7a5080 | /macOS/bee1.app/Contents/Resources/python/lib/python3.6/test/test_platform.py | 5756e1cccd15dfc38b5fa177d74c9ffd849485b0 | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | leewonmoh/macrepo1 | 0d32ec61ee57af6b5018457dec5604848f246338 | d8f15145fac127c1c7294f01ad39187f76b76ac2 | refs/heads/master | 2022-11-12T20:51:18.832798 | 2018-05-31T04:35:17 | 2018-05-31T04:35:17 | 134,810,313 | 0 | 1 | BSD-3-Clause | 2022-10-26T07:05:14 | 2018-05-25T06:01:32 | Python | UTF-8 | Python | false | false | 15,022 | py | from unittest import mock
import os
import platform
import subprocess
import sys
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
@unittest.skipUnless(os.allows_subprocesses, 'Test requires support for subprocesses.')
def test_architecture_via_symlink(self): # issue3762
# On Windows, the EXE needs to know where pythonXY.dll and *.pyd is at
# so we add the directory to the path and PYTHONPATH.
if sys.platform == "win32":
def restore_environ(old_env):
os.environ.clear()
os.environ.update(old_env)
self.addCleanup(restore_environ, dict(os.environ))
os.environ["Path"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["Path"])
os.environ["PYTHONPATH"] = os.path.dirname(sys.executable)
def get(python):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
# branch and revision are not "parsed", but fetched
# from sys._git. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, subversion, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if subversion is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = subversion
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin' and sys.platform not in ('ios', 'tvos', 'watchos'):
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
res = platform.dist()
def test_libc_ver(self):
import os
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
else:
executable = sys.executable
res = platform.libc_ver(executable)
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
@unittest.skipUnless(os.allows_subprocesses, 'Test requires support for subprocesses.')
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
# Issue #17429
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
class DeprecationTest(unittest.TestCase):
def test_dist_deprecation(self):
with self.assertWarns(PendingDeprecationWarning) as cm:
platform.dist()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
def test_linux_distribution_deprecation(self):
with self.assertWarns(PendingDeprecationWarning) as cm:
platform.linux_distribution()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
388b1376ecae66dea21f6eb31224085c51938a87 | 426f216e3d38d2030d337c8be6463cc4cd7af6c3 | /day08/monitor/server/conf/services/generic.py | abc781d82c426c6dae6cf7b1f236a2e0e65bc7f2 | [
"Apache-2.0"
] | permissive | zhangyage/Python-oldboy | c7b43801935fc9e08e973ee0b852daa8e8667fb7 | a95c1b465929e2be641e425fcb5e15b366800831 | refs/heads/master | 2021-01-23T02:59:37.574638 | 2019-10-27T05:35:58 | 2019-10-27T05:35:58 | 86,039,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
通用的
定义的是基本的监控项目 service
'''
class BaseService(object):
def __init__(self):
self.name = 'BaseService'
self.interval = 300 #监控间隔
self.last_time = 0
self.plugin_name = 'your_plugin' #监控插件
self.triggers = {} #监控阈值 | [
"[email protected]"
] | |
3af48aef1784b977a278aaf5bba88fe616c0c1b5 | cc64a1dfa57d4b667129efdadb97506bafce73f0 | /letsencrypt-nginx/setup.py | a37b8222b05d005279f96320447da3dd50ac18f6 | [
"Apache-2.0",
"MIT"
] | permissive | ryanwinchester-forks/letsencrypt | 83e027e3f4e78c5b4fad5fc3cc5676d2cde1f8e9 | 9bff9c0edf0f1fa28684332c17729473aa42ebca | refs/heads/master | 2021-01-18T02:00:13.041678 | 2015-10-20T16:43:55 | 2015-10-20T16:43:55 | 44,626,314 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | import sys
from setuptools import setup
from setuptools import find_packages
version = '0.1.0.dev0'
install_requires = [
'acme=={0}'.format(version),
'letsencrypt=={0}'.format(version),
'PyOpenSSL',
'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
'setuptools', # pkg_resources
'zope.interface',
]
if sys.version_info < (2, 7):
install_requires.append('mock<1.1.0')
else:
install_requires.append('mock')
setup(
name='letsencrypt-nginx',
version=version,
description="Nginx plugin for Let's Encrypt client",
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
entry_points={
'letsencrypt.plugins': [
'nginx = letsencrypt_nginx.configurator:NginxConfigurator',
],
},
)
| [
"[email protected]"
] | |
68a7b0ac67b7143ea278c5563671863e3fc5a2e8 | b8ba03b7aa90556aebda6940f7d49daf32bd5ed9 | /debug_toolbar_autoreload/__init__.py | e12f64e055d013d7e5df1f0be7ecc4d9aafaf9d2 | [
"BSD-3-Clause"
] | permissive | OndrejIT/django-debug-toolbar-autoreload | 056df2f92213e5f4189cda4d7236dbc8811a7eff | 8cbdb06b9b40f2548bcccfd9dcb04ef56166771a | refs/heads/master | 2020-03-21T05:32:28.276348 | 2014-04-23T19:20:42 | 2014-04-23T19:20:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | from .panels import AutoreloadPanel
__version__ = '0.2.0'.split('.')
| [
"[email protected]"
] | |
ae580464cc83075118e66de8f98e34b16370cc90 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fractions_20200802121517.py | abb8224836def842553093f1a7ab4d84e93a4fd7 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = []
if numerator == 0:
return "0"
if denominator == 0:
return "undefined"
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator <0):
res.append("-")
numerator = abs(numerator)
denominator = abs(denominator)
if numerator % denominator == 0:
return str(numerator // denominator)
else:
# this means its has a remainder
res.append(str(numerator // denominator))
res.append(".")
newDict = {}
rem = numerator % denominator
while rem != 0:
print(newDict)
if rem in newDict:
res.insert(newDict[rem],"(")
res.append(")")
break
newDict[rem] = len(res)
rem *=10
res_part = rem // denominator
res .append(str(res_part))
rem = rem % denominator
return "".join(res)
print(frac(4,333)) | [
"[email protected]"
] | |
aed016ed1a15167c9ddb335b87695a86c7f128b7 | 0738d9f85b98a7e30d7f33b4fe94ceb58f44b123 | /Algorithm/Backtracking/Python/N_queen_problem.py | 69015ddf23b8dd022f18b5baa90d480ccbfc7420 | [] | no_license | arifkhan1990/Competitive-Programming | e51606b6bafc0671e8799df4297529b27eb6488e | 9b8ca6e8f2ec4c74ea314e8d80657ac97489a315 | refs/heads/master | 2022-10-28T19:31:16.818123 | 2022-10-14T21:32:51 | 2022-10-14T21:32:51 | 199,027,360 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | def solveNQ(n):
col = set()
posDiag = set() # r+c
negDiag = set() # r-c
ans = []
board = [['0'] * n for _ in range(n)]
def backtraking(r):
if r == n:
copy = ["".join(r) for r in board]
ans.append(copy)
return
for c in range(n):
if c in col or (r+c) in posDiag or (r-c) in negDiag:
continue
col.add(c)
posDiag.add(r+c)
negDiag.add(r-c)
board[r][c] = "Q"
backtraking(r+1)
col.remove(c)
posDiag.remove(r+c)
negDiag.remove(r-c)
board[r][c] = "0"
backtraking(0)
return ans
print(solveNQ(4))
| [
"[email protected]"
] | |
2e559d799618df295505bb747e712d4de41097c4 | 09efb7c148e82c22ce6cc7a17b5140aa03aa6e55 | /env/lib/python3.6/site-packages/pandas/tests/tslibs/test_parse_iso8601.py | a6e7aee46b485b6b524f363351f1bb011b8b0b0e | [
"MIT"
] | permissive | harryturr/harryturr_garmin_dashboard | 53071a23b267116e1945ae93d36e2a978c411261 | 734e04f8257f9f84f2553efeb7e73920e35aadc9 | refs/heads/master | 2023-01-19T22:10:57.374029 | 2020-01-29T10:47:56 | 2020-01-29T10:47:56 | 235,609,069 | 4 | 0 | MIT | 2023-01-05T05:51:27 | 2020-01-22T16:00:13 | Python | UTF-8 | Python | false | false | 2,108 | py | from datetime import datetime
import pytest
from pandas._libs import tslib
@pytest.mark.parametrize(
"date_str, exp",
[
("2011-01-02", datetime(2011, 1, 2)),
("2011-1-2", datetime(2011, 1, 2)),
("2011-01", datetime(2011, 1, 1)),
("2011-1", datetime(2011, 1, 1)),
("2011 01 02", datetime(2011, 1, 2)),
("2011.01.02", datetime(2011, 1, 2)),
("2011/01/02", datetime(2011, 1, 2)),
("2011\\01\\02", datetime(2011, 1, 2)),
("2013-01-01 05:30:00", datetime(2013, 1, 1, 5, 30)),
("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)),
],
)
def test_parsers_iso8601(date_str, exp):
# see gh-12060
#
# Test only the ISO parser - flexibility to
# different separators and leading zero's.
actual = tslib._test_parse_iso8601(date_str)
assert actual == exp
@pytest.mark.parametrize(
"date_str",
[
"2011-01/02",
"2011=11=11",
"201401",
"201111",
"200101",
# Mixed separated and unseparated.
"2005-0101",
"200501-01",
"20010101 12:3456",
"20010101 1234:56",
# HHMMSS must have two digits in
# each component if unseparated.
"20010101 1",
"20010101 123",
"20010101 12345",
"20010101 12345Z",
],
)
def test_parsers_iso8601_invalid(date_str):
msg = 'Error parsing datetime string "{s}"'.format(s=date_str)
with pytest.raises(ValueError, match=msg):
tslib._test_parse_iso8601(date_str)
def test_parsers_iso8601_invalid_offset_invalid():
date_str = "2001-01-01 12-34-56"
msg = "Timezone hours offset out of range " 'in datetime string "{s}"'.format(
s=date_str
)
with pytest.raises(ValueError, match=msg):
tslib._test_parse_iso8601(date_str)
def test_parsers_iso8601_leading_space():
# GH#25895 make sure isoparser doesn't overflow with long input
date_str, expected = ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30))
actual = tslib._test_parse_iso8601(" " * 200 + date_str)
assert actual == expected
| [
"[email protected]"
] | |
ea41839b6a0a26d593362635192d222831c3f1b4 | 18ad97292b34a679b8dea8a85090541c5bbf6174 | /candlebox.py | 72e9d35d1aae4302ce3baca36368789c9c7073a3 | [] | no_license | Jyotirm0y/kattis | b941044e39dc36d169450480fc33fd33bd2e0f8e | 2b9c1819ba29419bbea3db2e8ad7851155abbb3a | refs/heads/master | 2023-05-31T21:11:38.350044 | 2021-06-12T08:21:47 | 2021-06-12T08:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import math
d = int(input())
r = int(input())
t = int(input())
x = math.ceil(math.sqrt(2*(r+6)))
y = x - d
while (x*(x+1)//2)-6 + (y*(y+1)//2)-3 != r + t:
x -= 1
y -= 1
print(r-((x*(x+1)//2)-6))
| [
"[email protected]"
] | |
9684ed8877dc1d5f50b40a958207c71a45c2687a | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/contrib/eager/python/datasets.py | f9b6f54e4482a424f8f775e0fbbf659cfb0a31e7 | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,168 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iteration over tf.data.Datasets when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
class Iterator(object):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.executing_eagerly():
raise RuntimeError(
"{} objects can only be used when eager execution is enabled, use "
"tf.data.Dataset.make_initializable_iterator or "
"tf.data.Dataset.make_one_shot_iterator for graph construction".
format(type(self)))
with ops.device("/device:CPU:0"):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_classes = dataset.output_classes
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes))
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._output_shapes, self._output_classes))
self._resource = gen_dataset_ops.iterator(
shared_name="",
container=_generate_shared_name("eageriterator"),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="/device:CPU:0")
self._device = context.context().device_name
self._buffer_resource_handle = None
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
if is_remote_device:
with ops.device("/device:CPU:0"):
iter_string_handle = gen_dataset_ops.iterator_to_string_handle(
self._resource)
@function.Defun(dtypes.string)
def remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, self._output_types, self._output_shapes)
return remote_iterator.get_next()
remote_fn.add_to_graph(None)
target = constant_op.constant("/device:CPU:0")
with ops.device(self._device):
self._buffer_resource_handle = prefetching_ops.function_buffering_resource( # pylint: disable=line-too-long
string_arg=iter_string_handle,
f=remote_fn,
target_device=target,
buffer_size=10,
thread_pool_size=1,
container="",
shared_name=_generate_shared_name("function_buffer_resource"))
self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter( # pylint: disable=line-too-long
handle=self._buffer_resource_handle,
handle_device=self._device)
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
with ops.device(self._device):
if self._buffer_resource_handle is not None:
ret = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=self._buffer_resource_handle,
output_types=self._flat_output_types)
else:
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
# NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
# because in eager mode this code will run synchronously on the calling
# thread. Therefore we do not need to make a defensive context switch
# to a background thread, and can achieve a small constant performance
# boost by invoking the iterator synchronously.
ret = gen_dataset_ops.iterator_get_next_sync(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
def next(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._output_classes
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._output_types
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation. Currently unused.
Returns:
A nested structure of `tf.Tensor` objects.
Raises:
`tf.errors.OutOfRangeError`: If the end of the dataset has been reached.
"""
del name
return self._next_internal()
| [
"[email protected]"
] | |
5b645ba1468371a926405216b7d6b9b209180c2f | a87f791370ca99a5bbc45e9230f786aa212ba34c | /gif.py | acb1515dcdbeb869aacce18692864e311889db67 | [] | no_license | muffleyd/fullgif | 97129b64d499273de97c08e7df9489a20cdb6c39 | fcbc375f3f135a07ecd9db28659c2868e0cc0428 | refs/heads/main | 2023-07-18T06:23:37.777841 | 2023-07-10T08:22:50 | 2023-07-10T08:22:50 | 403,776,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,814 | py | import sys
import time
VERBOSE = False
USABLE = True
ENFORCE_VERSION = False
COERCE_DISPOSAL_METHOD = True
# If the gif provides 0 as a frame delay, use this instead
DEFAULT_FRAME_DELAY = 10
# The minimum allowed frame delay. Any non-zero lower frame delay will be set to this instead.
MINIMUM_FRAME_DELAY = 1
# between 0 and 65535
def chr16(num):
return chr(num % 256) + chr(num // 256)
def ord16(letters):
return letters[0] + letters[1] * 256
class Gif_Image(object):
def __init__(self):
self.comments = []
self.graphics_extension_block = False
self.user_input_required = None
self.disposal_method = None
self.frame_delay = DEFAULT_FRAME_DELAY
self.transparent_color_index = None
self.image_block = None
self.x = None
self.y = None
self.width = None
self.height = None
self.lzw_data = None
self.decompressed_data = None
self.interlaced = None
self.color_table = None
def set_frame_delay(self, frame_delay):
self.frame_delay = frame_delay or DEFAULT_FRAME_DELAY
# Enforce the minimum frame delay
if self.frame_delay < MINIMUM_FRAME_DELAY:
self.frame_delay = MINIMUM_FRAME_DELAY
def clear_graphics_extension_block(self):
self.graphics_extension_block = False
self.user_input_required = None
self.disposal_method = None
self.frame_delay = None
self.transparent_color_index = None
def set_decompressed_data(self, data):
self.decompressed_data = data
self.lzw_data = None
def decompress_data(self):
if not self.decompressed_data and self.lzw_data:
self.set_decompressed_data(self.lzw_data.parse_stream_data())
def process_data(self):
'''Set additional data after decompressing.'''
# If the data is too short, append transparent color indexes, or default to 0, until it's the right length.
if len(self.decompressed_data) < self.width * self.height:
if self.transparent_color_index is None:
color_index = 0
else:
color_index = self.transparent_color_index
self.decompressed_data += bytes(
[color_index] * (self.width * self.height - len(self.decompressed_data))
)
# Deinterlace if needed.
if self.interlaced:
self.deinterlace()
# In some gifs transparent_color_index is beyond the end of the color table, so clamp it to the end.
if self.transparent_color_index is not None:
if self.transparent_color_index >= len(self.color_table):
self.transparent_color_index = len(self.color_table) - 1
def deinterlace(self):
# The rows of an Interlaced image are arranged in the following order:
# Group 1 : Every 8th row, starting with row 0.
# Group 2 : Every 8th row, starting with row 4.
# Group 3 : Every 4th row, starting with row 2.
# Group 4 : Every 2nd row, starting with row 1.
new_data = bytearray(len(self.decompressed_data))
original_row = 0
for interlace_range in (
range(0, self.height, 8),
range(4, self.height, 8),
range(2, self.height, 4),
range(1, self.height, 2)
):
for new_row in interlace_range:
self.add_deinterlaced_row(new_row, original_row, new_data)
original_row += 1
self.decompressed_data = new_data[:len(self.decompressed_data)]
def add_deinterlaced_row(self, new_row, original_row, new_data):
# Copy the data over from the original data one row at a time.
new_data[new_row * self.width:new_row * self.width + self.width] = \
self.decompressed_data[original_row * self.width:original_row * self.width + self.width]
# Counting bits starting at 0
class Gif(object):
"""class to manipulate gifs"""
GIF87a = b'GIF87a'
GIF89a = b'GIF89a'
def __init__(self, filename, decompress=False, data=None):
if VERBOSE:
print('loading', filename)
start_time = time.time()
self.decompress = decompress
self.images = []
self.filename = filename
if not data:
data = open(filename, 'rb').read()
self.data = data
self.version = data[:6]
assert self.version in (self.GIF87a, self.GIF89a)
# Where in self.data is the next piece of data, if variability is needed.
self.tell = 6
self.parse_headers()
self.current_image = None
self.parse_blocks()
if VERBOSE:
print('took %.2f seconds' % (time.time() - start_time))
def __repr__(self):
return '<Gif: "%s" %s>' % (self.filename, self.dims)
def parse_headers(self):
self.dims = (ord16(self.data[6:8]), ord16(self.data[8:10]))
screen_descriptor = self.data[10]
# bits 0-2 are the bits pixel in the image minus 1 (0-7 => 1-8)
global_color_table_entry_size = 1 + (screen_descriptor & 7)
# the number of entries in the global color table can be calculated as such
global_color_table_entries = 1 << global_color_table_entry_size # = 2 ** global_color_table_entry_size
# bit 3 is whether the global color table is sorted by most used colors
self.global_color_table_sorted = screen_descriptor & 8
if self.global_color_table_sorted and self.version == self.GIF87a:
self.global_color_table_sorted = 0
# bits 4-6 are the bits in an entry of the original color palette minus 1 (0-7 => 1-8)
self.color_resolution = 1 + (screen_descriptor >> 4 & 7) # bitshift to go to bits 0-2, then (1 + 2 + 4)
# bit 7 is whether the global color table exists
self.global_color_table_exists = bool(screen_descriptor & 128)
# The index in the global color table (if it exists) for the background of the screen
self.background_color_index = self.data[11]
# The aspect ratio of a pixel. I'm going to ignore it.
# the ratio defines width:height
aspect_ratio_byte = self.data[12]
if aspect_ratio_byte:
# This is the specific math it uses to define the ratio
self.pixel_aspect_ratio = (aspect_ratio_byte + 15) / 64
else:
# If not set then it's disabled
self.pixel_aspect_ratio = 1
self.tell = 13
if self.global_color_table_exists:
self.global_color_table = self.parse_color_table(global_color_table_entries)
else:
self.global_color_table = []
def parse_color_table(self, table_entries):
bytes = 3 * table_entries
data = self.data[self.tell:self.tell + bytes]
self.tell += bytes
return [data[i:i+3] for i in range(0, bytes, 3)]
def parse_blocks(self):
while 1:
try:
separator = self.data[self.tell]
# Some gifs don't include the end of file char.
except IndexError:
break
self.tell += 1
# '\x3b' = 59 is the end of file char
if separator == 59:
break
if self.current_image is None:
# graphics extension block, image block
self.current_image = Gif_Image()
# '\x2c' = 44 is the local image block
if separator == 44:
self.parse_image_block()
self.images.append(self.current_image)
self.current_image = None
elif separator == 33: # 89a '\x21' = 33
if ENFORCE_VERSION and self.version == self.GIF87a:
raise GIFError('87a gif has 89a block')
label = self.data[self.tell]
self.tell += 1
if label == 249: # '\xf9' = 249
self.parse_graphics_control_block()
elif label == 1: # '\x01' = 1
self.parse_plain_text_block()
elif label == 255: # '\xff' = 255
self.parse_application_block()
elif label == 254: # '\xfe' = 254
self.parse_comment_block()
else:
raise GIFError('Unknown \\x21/33 block label %s' % label)
elif separator == 0: # Invalid, but probably a dangling terminator.
continue
else:
raise GIFError('Unknown separator label %s' % separator)
def parse_image_block(self):
self.current_image.x = ord16(self.data[self.tell:self.tell + 2])
self.tell += 2
self.current_image.y = ord16(self.data[self.tell:self.tell + 2])
self.tell += 2
self.current_image.width = ord16(self.data[self.tell:self.tell + 2])
self.tell += 2
self.current_image.height = ord16(self.data[self.tell:self.tell + 2])
self.tell += 2
packed = self.data[self.tell]
self.tell += 1
# Bits 0-2 The size of the local color table (see same bits for global color table)
color_table_entry_size = 1 + (packed & 7) # (1 + 2 + 4)
color_table_entries = 1 << color_table_entry_size # = 2 ** color_table_entry_size
# Bits 3-4 Reserved
# Bit 5 Is the local color table sorted
self.current_image.color_table_sorted = packed & 32
# Sorted color table isn't allowed in 87a
if self.current_image.color_table_sorted and ENFORCE_VERSION and self.version == self.GIF87a:
self.current_image.color_table_sorted = 0
# Bit 6 Is the image interlaced
self.current_image.interlaced = bool(packed & 64)
# Bit 7 Is there a local color table
local_color_table = bool(packed & 128)
if local_color_table:
self.current_image.color_table = self.parse_color_table(color_table_entries)
else:
# Use the global color table if there's no local one
self.current_image.color_table = self.global_color_table
# Find cases where the transparent index is out of bounds and force it off.
# TODO See if there's a usual way to handle this.
if self.current_image.transparent_color_index is not None and self.current_image.transparent_color_index > len(self.current_image.color_table):
self.current_image.transparent_color_index = None
self.current_image.lzw_data = self.parse_image_data()
if self.decompress:
self.current_image.decompress_data()
def parse_image_data(self):
# Make these local due to the tight loops.
tell = self.tell
data = self.data
lzw_data = bytearray()
minimum_lzw_code_size = data[tell]
tell += 1
while 1:
length = data[tell]
if not length:
break
# This tell usage is backwards from the norm so we can do a single assignment to self.tell.
tell += length + 1
lzw_data += data[tell - length:tell]
# Re-assign to self.tell and add 1 from the length check that was just done.
self.tell = tell + 1
return Gif_LZW(minimum_lzw_code_size, lzw_data)
def parse_graphics_control_block(self):
block_size = self.data[self.tell]
if block_size != 4:
raise GIFError(
'Unexpected block size in graphics control extension block (expected 4, got %d)' % block_size
)
self.tell += 1
self.current_image.graphics_extension_block = True
packed_bit = self.data[self.tell]
self.tell += 1
# Bit 0 Is the later color index byte has data
has_transparent_color_index = packed_bit & 1
# Bit 1 Is user input is required to move to the next image (ignored)
self.current_image.user_input_required = packed_bit & 2
# Bits 2-4 Gives one of 4 methods to dispose of the previous image
disposal_method = packed_bit >> 2 & 7 # bitshift to go to bits 0-2, then (1 + 2 + 4)
if disposal_method > 3:
if COERCE_DISPOSAL_METHOD:
disposal_method = 0
else:
raise GIFError(
'Previous image disposal method is invalid (expected 0, 1, 2, or 3, got %d' % disposal_method
)
self.current_image.disposal_method = disposal_method
# Bits 5-7 last 3 bits are reserved
# Set frame delay, or use the default if it's zero.
self.current_image.set_frame_delay(ord16(self.data[self.tell:self.tell + 2]))
self.tell += 2
if has_transparent_color_index:
self.current_image.transparent_color_index = self.data[self.tell]
else:
self.current_image.transparent_color_index = None
self.tell += 1
if self.data[self.tell] != 0:
raise GIFError('Graphics control block terminator not found')
self.tell += 1
# Ignored
def parse_plain_text_block(self):
# block is 15 bytes long, the first 2 are read in parse_blocks(), and last 1 is terminator
self.tell += 12
if self.data[self.tell] != 0:
raise GIFError('Plain text block terminator not found')
self.tell += 1
# graphics extension block affects the next block of plain text or image type. Since we're ignoring plain text,
# drop the related graphics extension info
if self.current_image.graphics_extension_block:
self.current_image.clear_graphics_extension_block()
# Ignored
def parse_application_block(self):
# Block header indicates this is 11 bytes long, so jump forward this 1 byte and those 11.
self.tell += 12
data_length = self.data[self.tell]
# Process sub-blocks until a 0.
while data_length:
self.tell += data_length + 1
data_length = self.data[self.tell]
self.tell += 1
def parse_comment_block(self):
# Process sub-blocks until a 0.
while 1:
comment_length = self.data[self.tell]
self.tell += 1
if not comment_length:
break
self.current_image.comments.append(self.data[self.tell:self.tell + comment_length])
self.tell += comment_length
class Gif_LZW(object):
# If the code table has reached the 2**12 limit, the code table may not be added to
maximum_bit_size = 12
bit_ands = [2 ** i - 1 for i in range(13)]
def __init__(self, minimum_size, data):
self.minimum_size = minimum_size
self.clear_code = 1 << self.minimum_size
self.end_of_information_code = self.clear_code + 1
# The code_table will be reset to the inside of reset_code_table.
self.code_table = []
self.default_code_table = [None] * (1 << self.maximum_bit_size)
self.default_code_table[:1 << minimum_size] = [bytes((i,)) for i in range(1 << minimum_size)]
self.stream = bytearray()
self.data = data
def _get_next_code(self):
value_buffer = 0
value_buffer_bits = 0
bit_ands = self.bit_ands
code_size = self.code_size
for byte in self.data:
if value_buffer_bits >= code_size:
value = value_buffer & bit_ands[code_size]
value_buffer >>= code_size
value_buffer_bits -= code_size
yield value
code_size = self.code_size
value_buffer += (byte << value_buffer_bits)
value_buffer_bits += 8
def parse_stream_data(self):
self.reset_code_table()
get_next_code = self._get_next_code()
while 1:
response = self._parse_stream_data(get_next_code)
# Fake tail recursion by returning end_of_information_code.
if response == self.end_of_information_code:
continue
else:
break
return self.stream
def _parse_stream_data(self, get_next_code):
# Localize variable due to the loop.
next_code_index = self.next_code_index
code_table = self.code_table
clear_code = self.clear_code
end_of_information_code = self.end_of_information_code
stream = self.stream
# The code table is only allowed to grow to a specific size.
table_immutable = False
prev_code = clear_code
# Some gifs don't respect the standard, so we don't ensure there's at least one clear code.
# The while loop is because clear codes can appear at any time, even right after another one.
while prev_code == clear_code:
try:
prev_code = next(get_next_code)
except StopIteration:
return
if prev_code == end_of_information_code:
return
# The first code must be in the initial code table.
stream += code_table[prev_code]
for code in get_next_code:
# If it's going to reference an existing code.
if code < next_code_index:
# Handle clear code and end of info code.
if code == clear_code:
self.reset_code_table()
# No tail recursion, so here we are. Wipe out prev_code like this.
return self.end_of_information_code
if code == end_of_information_code:
return
K_code = code
# If it's referencing a new code.
else:
K_code = prev_code
# If the code table can still grow.
if not table_immutable:
# This is what the gif LZW algorithm does to add entries to the code table.
# K_code depends on the above if/else block.
code_table[next_code_index] = code_table[prev_code] + bytes((code_table[K_code][0],))
next_code_index += 1
# If the code index is crossing the next threshold (2**x).
if next_code_index == self.next_code_table_grow:
if self.code_size == self.maximum_bit_size:
# Gifs aren't allowed to grow beyond this hard limit per code.
table_immutable = True
else:
self.set_code_size(self.code_size + 1)
# Add to the stream.
stream += code_table[code]
# Set the previous code for the next loop.
prev_code = code
return 0
def reset_code_table(self):
self.code_table[:] = self.default_code_table
# Track what the next index for a code in self.code_table will be.
self.next_code_index = self.end_of_information_code + 1
self.set_code_size(self.minimum_size + 1)
def set_code_size(self, size):
self.code_size = size
self.next_code_table_grow = 1 << size
def assure_clear_code(self, code):
if code != self.clear_code:
raise GIFError('Expected clear code, got something else (%d != %d)' % (self.clear_code, code))
class GIFError(Exception):
pass
def fit_to(start_dims, dims=(1920, 1080)):
width, height = start_dims
w = dims[0] / width
h = dims[1] / height
if w < h:
w2 = dims[0]
h2 = height * w
else:
w2 = width * h
h2 = dims[1]
return (int(float(w2)), int(float(h2)))
# Threaded decompression, doesn't speed up with the python implementation of LZW.
def decompress_gif(g):
from dmgen import threaded_worker, gen, cores
with gen.timer():
with threaded_worker.threaded_worker(threads=max(1, cores.CORES - 1)) as tw:
for i in g.images:
tw.put(i.decompress_data)
for i in g.images:
tw.get()
# Multiprocessed decompression, does speed up with the python implementation of LZW.
def decompress_gif_mp(g):
from dmgen import gen, cores
import multiprocessing as mp
q_put = mp.Queue()
q_get = mp.Queue()
args = (q_put, q_get)
processes = []
with gen.timer():
# Start the processes.
for i in range(max(1, cores.CORES)):
p = mp.Process(target=atomic_decompress, args=args)
p.daemon = True
p.start()
processes.append(p)
# Put the data.
for index, data in enumerate(g.images):
q_put.put((index, data))
# Put the data to end the processes.
for i in range(len(processes)):
q_put.put(0)
# Get the data and set on the objects.
for i in g.images:
index, data = q_get.get()
g.images[index].set_decompressed_data(data)
# Multiprocess target function for decompression.
def atomic_decompress(q_get, q_put):
while 1:
# Get the data.
data = q_get.get()
# End process if no value.
if not data:
return
# Run the function and return the data.
index, gif_img = data
gif_img.decompress_data()
q_put.put((index, gif_img.decompressed_data))
| [
"devnull@localhost"
] | devnull@localhost |
66358a8cd5f8c3683912d15e44b84dc84ab92762 | 0b2ffe7092e4008c73fdbf8791d107a2ce583c5d | /terraform_builder/release.py | e459a6e688e3c9d51565d16f56827ef2e2a73d4d | [
"MIT"
] | permissive | mrlesmithjr/terraform-builder | 1f960741ca5a37a862e2498b8ad81a31dffc13b2 | 08ed71333e988682ce50c6ef865fdd8ba27de395 | refs/heads/master | 2022-10-16T03:05:34.351002 | 2020-06-19T03:53:53 | 2020-06-19T03:53:53 | 248,327,103 | 8 | 0 | MIT | 2022-09-30T20:02:28 | 2020-03-18T19:43:30 | Python | UTF-8 | Python | false | false | 160 | py | """terraform_builder/release.py"""
# Version tracking for package.
__author__ = 'Larry Smith Jr.'
__version__ = '0.1.0'
__package_name__ = 'terraform_builder'
| [
"[email protected]"
] | |
bcabfd47909ebc6b12e84806dd30400748c428f8 | 60aae6fe961b6fadcbefa0154976012e84d29e6a | /molotov/tests/example5.py | 4228ca09bfcc30279829df5c3a827295e5b6c979 | [
"Apache-2.0"
] | permissive | tarekziade/molotov | 33aefd27e08be35b0f010a4d125f287e798a24c2 | 27f7599d9d04d86964878f3cac78e91c8b231d61 | refs/heads/main | 2023-08-03T12:07:54.036876 | 2023-07-18T06:58:50 | 2023-07-18T06:58:50 | 35,025,578 | 45 | 4 | Apache-2.0 | 2023-07-20T16:33:12 | 2015-05-04T09:17:25 | Python | UTF-8 | Python | false | false | 534 | py | """
This Molotov script demonstrates how to hook events.
"""
import molotov
@molotov.events()
async def print_request(event, **info):
if event == "sending_request":
print("=>")
@molotov.events()
async def print_response(event, **info):
if event == "response_received":
print("<=")
@molotov.scenario(100)
async def scenario_one(session):
async with session.get("http://localhost:8080") as resp:
res = await resp.json()
assert res["result"] == "OK"
assert resp.status == 200
| [
"[email protected]"
] | |
28f0d6e887ed8e595a30430734c4e3014c1d8068 | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /third_party/typ/typ/runner.py | a2f5c3df76df6af61852427cadf5157e4188e34b | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 34,092 | py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import importlib
import inspect
import json
import os
import pdb
import sys
import unittest
import traceback
from collections import OrderedDict
# This ensures that absolute imports of typ modules will work when
# running typ/runner.py as a script even if typ is not installed.
# We need this entry in addition to the one in __main__.py to ensure
# that typ/runner.py works when invoked via subprocess on windows in
# _spawn_main().
path_to_file = os.path.realpath(__file__)
if path_to_file.endswith('.pyc'): # pragma: no cover
path_to_file = path_to_file[:-1]
dir_above_typ = os.path.dirname(os.path.dirname(path_to_file))
if dir_above_typ not in sys.path: # pragma: no cover
sys.path.append(dir_above_typ)
from typ import json_results
from typ.arg_parser import ArgumentParser
from typ.host import Host
from typ.pool import make_pool
from typ.stats import Stats
from typ.printer import Printer
from typ.test_case import TestCase as TypTestCase
from typ.version import VERSION
Result = json_results.Result
ResultSet = json_results.ResultSet
ResultType = json_results.ResultType
def main(argv=None, host=None, win_multiprocessing=None, **defaults):
host = host or Host()
runner = Runner(host=host)
if win_multiprocessing is not None:
runner.win_multiprocessing = win_multiprocessing
return runner.main(argv, **defaults)
class TestInput(object):
def __init__(self, name, msg='', timeout=None, expected=None):
self.name = name
self.msg = msg
self.timeout = timeout
self.expected = expected
class TestSet(object):
def __init__(self, parallel_tests=None, isolated_tests=None,
tests_to_skip=None):
def promote(tests):
tests = tests or []
return [test if isinstance(test, TestInput) else TestInput(test)
for test in tests]
self.parallel_tests = promote(parallel_tests)
self.isolated_tests = promote(isolated_tests)
self.tests_to_skip = promote(tests_to_skip)
class WinMultiprocessing(object):
ignore = 'ignore'
importable = 'importable'
spawn = 'spawn'
values = [ignore, importable, spawn]
class _AddTestsError(Exception):
pass
class Runner(object):
def __init__(self, host=None):
self.args = None
self.classifier = None
self.cov = None
self.context = None
self.coverage_source = None
self.host = host or Host()
self.loader = unittest.loader.TestLoader()
self.printer = None
self.setup_fn = None
self.stats = None
self.teardown_fn = None
self.top_level_dir = None
self.win_multiprocessing = WinMultiprocessing.spawn
self.final_responses = []
# initialize self.args to the defaults.
parser = ArgumentParser(self.host)
self.parse_args(parser, [])
def main(self, argv=None, **defaults):
parser = ArgumentParser(self.host)
self.parse_args(parser, argv, **defaults)
if parser.exit_status is not None:
return parser.exit_status
try:
ret, _, _ = self.run()
return ret
except KeyboardInterrupt:
self.print_("interrupted, exiting", stream=self.host.stderr)
return 130
def parse_args(self, parser, argv, **defaults):
for attrname in defaults:
if not hasattr(self.args, attrname):
parser.error("Unknown default argument name '%s'" % attrname,
bailout=False)
return
parser.set_defaults(**defaults)
self.args = parser.parse_args(args=argv)
if parser.exit_status is not None:
return
def print_(self, msg='', end='\n', stream=None):
self.host.print_(msg, end, stream=stream)
def run(self, test_set=None):
ret = 0
h = self.host
if self.args.version:
self.print_(VERSION)
return ret, None, None
should_spawn = self._check_win_multiprocessing()
if should_spawn:
return self._spawn(test_set)
ret = self._set_up_runner()
if ret: # pragma: no cover
return ret, None, None
find_start = h.time()
if self.cov: # pragma: no cover
self.cov.erase()
self.cov.start()
full_results = None
result_set = ResultSet()
if not test_set:
ret, test_set = self.find_tests(self.args)
find_end = h.time()
if not ret:
ret, full_results = self._run_tests(result_set, test_set)
if self.cov: # pragma: no cover
self.cov.stop()
self.cov.save()
test_end = h.time()
trace = self._trace_from_results(result_set)
if full_results:
self._summarize(full_results)
self._write(self.args.write_full_results_to, full_results)
upload_ret = self._upload(full_results)
if not ret:
ret = upload_ret
reporting_end = h.time()
self._add_trace_event(trace, 'run', find_start, reporting_end)
self._add_trace_event(trace, 'discovery', find_start, find_end)
self._add_trace_event(trace, 'testing', find_end, test_end)
self._add_trace_event(trace, 'reporting', test_end, reporting_end)
self._write(self.args.write_trace_to, trace)
self.report_coverage()
else:
upload_ret = 0
return ret, full_results, trace
def _check_win_multiprocessing(self):
wmp = self.win_multiprocessing
ignore, importable, spawn = WinMultiprocessing.values
if wmp not in WinMultiprocessing.values:
raise ValueError('illegal value %s for win_multiprocessing' %
wmp)
h = self.host
if wmp == ignore and h.platform == 'win32': # pragma: win32
raise ValueError('Cannot use WinMultiprocessing.ignore for '
'win_multiprocessing when actually running '
'on Windows.')
if wmp == ignore or self.args.jobs == 1:
return False
if wmp == importable:
if self._main_is_importable():
return False
raise ValueError('The __main__ module (%s) ' # pragma: no cover
'may not be importable' %
sys.modules['__main__'].__file__)
assert wmp == spawn
return True
def _main_is_importable(self): # pragma: untested
path = sys.modules['__main__'].__file__
if not path:
return False
if path.endswith('.pyc'):
path = path[:-1]
if not path.endswith('.py'):
return False
if path.endswith('__main__.py'):
# main modules are not directly importable.
return False
path = self.host.realpath(path)
for d in sys.path:
if path.startswith(self.host.realpath(d)):
return True
return False # pragma: no cover
def _spawn(self, test_set):
# TODO: Handle picklable hooks, rather than requiring them to be None.
assert self.classifier is None
assert self.context is None
assert self.setup_fn is None
assert self.teardown_fn is None
assert test_set is None
h = self.host
if self.args.write_trace_to: # pragma: untested
should_delete_trace = False
else:
should_delete_trace = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_trace_to = fp.name
if self.args.write_full_results_to: # pragma: untested
should_delete_results = False
else:
should_delete_results = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_full_results_to = fp.name
argv = ArgumentParser(h).argv_from_args(self.args)
ret = h.call_inline([h.python_interpreter, path_to_file] + argv)
trace = self._read_and_delete(self.args.write_trace_to,
should_delete_trace)
full_results = self._read_and_delete(self.args.write_full_results_to,
should_delete_results)
return ret, full_results, trace
def _set_up_runner(self):
h = self.host
args = self.args
self.stats = Stats(args.status_format, h.time, args.jobs)
self.printer = Printer(
self.print_, args.overwrite, args.terminal_width)
self.top_level_dir = args.top_level_dir
if not self.top_level_dir:
if args.tests and h.isdir(args.tests[0]):
# TODO: figure out what to do if multiple files are
# specified and they don't all have the same correct
# top level dir.
d = h.realpath(h.dirname(args.tests[0]))
if h.exists(d, '__init__.py'):
top_dir = d
else:
top_dir = args.tests[0]
else:
top_dir = h.getcwd()
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
self.top_level_dir = h.realpath(top_dir)
h.add_to_path(self.top_level_dir)
for path in args.path:
h.add_to_path(path)
if args.coverage: # pragma: no cover
try:
import coverage
except ImportError:
h.print_("Error: coverage is not installed")
return 1
source = self.args.coverage_source
if not source:
source = [self.top_level_dir] + self.args.path
self.coverage_source = source
self.cov = coverage.coverage(source=self.coverage_source,
data_suffix=True)
self.cov.erase()
return 0
def find_tests(self, args):
test_set = TestSet()
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if args.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
try:
names = self._name_list_from_args(args)
classifier = self.classifier or _default_classifier(args)
for name in names:
try:
self._add_tests_to_set(test_set, args.suffixes,
self.top_level_dir, classifier,
name)
except (AttributeError, ImportError, SyntaxError) as e:
ex_str = traceback.format_exc()
self.print_('Failed to load "%s" in find_tests: %s' %
(name, e))
self.print_(' %s' %
'\n '.join(ex_str.splitlines()))
self.print_(ex_str)
return 1, None
except _AddTestsError as e:
self.print_(str(e))
return 1, None
# TODO: Add support for discovering setupProcess/teardownProcess?
shard_index = args.shard_index
total_shards = args.total_shards
assert total_shards >= 1
assert shard_index >= 0 and shard_index < total_shards, (
'shard_index (%d) must be >= 0 and < total_shards (%d)' %
(shard_index, total_shards))
test_set.parallel_tests = _sort_inputs(
test_set.parallel_tests)[shard_index::total_shards]
test_set.isolated_tests = _sort_inputs(
test_set.isolated_tests)[shard_index::total_shards]
test_set.tests_to_skip = _sort_inputs(
test_set.tests_to_skip)[shard_index::total_shards]
return 0, test_set
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
def _name_list_from_args(self, args):
if args.tests:
names = args.tests
elif args.file_list:
if args.file_list == '-':
s = self.host.stdin.read()
else:
s = self.host.read_text_file(args.file_list)
names = [line.strip() for line in s.splitlines()]
else:
names = [self.top_level_dir]
return names
def _add_tests_to_set(self, test_set, suffixes, top_level_dir, classifier,
name):
h = self.host
loader = self.loader
add_tests = _test_adder(test_set, classifier)
if h.isfile(name):
rpath = h.relpath(name, top_level_dir)
if rpath.endswith('.py'):
rpath = rpath[:-3]
module = rpath.replace(h.sep, '.')
add_tests(loader.loadTestsFromName(module))
elif h.isdir(name):
for suffix in suffixes:
add_tests(loader.discover(name, suffix, top_level_dir))
else:
possible_dir = name.replace('.', h.sep)
if h.isdir(top_level_dir, possible_dir):
for suffix in suffixes:
path = h.join(top_level_dir, possible_dir)
suite = loader.discover(path, suffix, top_level_dir)
add_tests(suite)
else:
add_tests(loader.loadTestsFromName(name))
# pylint: disable=no-member
if hasattr(loader, 'errors') and loader.errors: # pragma: python3
# In Python3's version of unittest, loader failures get converted
# into failed test cases, rather than raising exceptions. However,
# the errors also get recorded so you can err out immediately.
raise ImportError(loader.errors)
def _run_tests(self, result_set, test_set):
h = self.host
if not test_set.parallel_tests and not test_set.isolated_tests:
self.print_('No tests to run.')
return 1, None
all_tests = [ti.name for ti in
_sort_inputs(test_set.parallel_tests +
test_set.isolated_tests +
test_set.tests_to_skip)]
if self.args.list_only:
self.print_('\n'.join(all_tests))
return 0, None
self._run_one_set(self.stats, result_set, test_set)
failed_tests = sorted(json_results.failed_test_names(result_set))
retry_limit = self.args.retry_limit
while retry_limit and failed_tests:
if retry_limit == self.args.retry_limit:
self.flush()
self.args.overwrite = False
self.printer.should_overwrite = False
self.args.verbose = min(self.args.verbose, 1)
self.print_('')
self.print_('Retrying failed tests (attempt #%d of %d)...' %
(self.args.retry_limit - retry_limit + 1,
self.args.retry_limit))
self.print_('')
stats = Stats(self.args.status_format, h.time, 1)
stats.total = len(failed_tests)
tests_to_retry = TestSet(isolated_tests=list(failed_tests))
retry_set = ResultSet()
self._run_one_set(stats, retry_set, tests_to_retry)
result_set.results.extend(retry_set.results)
failed_tests = json_results.failed_test_names(retry_set)
retry_limit -= 1
if retry_limit != self.args.retry_limit:
self.print_('')
full_results = json_results.make_full_results(self.args.metadata,
int(h.time()),
all_tests, result_set)
return (json_results.exit_code_from_full_results(full_results),
full_results)
def _run_one_set(self, stats, result_set, test_set):
stats.total = (len(test_set.parallel_tests) +
len(test_set.isolated_tests) +
len(test_set.tests_to_skip))
self._skip_tests(stats, result_set, test_set.tests_to_skip)
self._run_list(stats, result_set,
test_set.parallel_tests, self.args.jobs)
self._run_list(stats, result_set,
test_set.isolated_tests, 1)
def _skip_tests(self, stats, result_set, tests_to_skip):
for test_input in tests_to_skip:
last = self.host.time()
stats.started += 1
self._print_test_started(stats, test_input)
now = self.host.time()
result = Result(test_input.name, actual=ResultType.Skip,
started=last, took=(now - last), worker=0,
expected=[ResultType.Skip],
out=test_input.msg)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
def _run_list(self, stats, result_set, test_inputs, jobs):
h = self.host
running_jobs = set()
jobs = min(len(test_inputs), jobs)
if not jobs:
return
child = _Child(self)
pool = make_pool(h, jobs, _run_one_test, child,
_setup_process, _teardown_process)
try:
while test_inputs or running_jobs:
while test_inputs and (len(running_jobs) < self.args.jobs):
test_input = test_inputs.pop(0)
stats.started += 1
pool.send(test_input)
running_jobs.add(test_input.name)
self._print_test_started(stats, test_input)
result = pool.get()
running_jobs.remove(result.name)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
pool.close()
finally:
self.final_responses.extend(pool.join())
def _print_test_started(self, stats, test_input):
if self.args.quiet:
# Print nothing when --quiet was passed.
return
# If -vvv was passed, print when the test is queued to be run.
# We don't actually know when the test picked up to run, because
# that is handled by the child process (where we can't easily
# print things). Otherwise, only print when the test is started
# if we know we can overwrite the line, so that we do not
# get multiple lines of output as noise (in -vvv, we actually want
# the noise).
test_start_msg = stats.format() + test_input.name
if self.args.verbose > 2:
self.update(test_start_msg + ' queued', elide=False)
if self.args.overwrite:
self.update(test_start_msg, elide=(not self.args.verbose))
def _print_test_finished(self, stats, result):
stats.add_time()
assert result.actual in [ResultType.Failure, ResultType.Skip,
ResultType.Pass]
if result.actual == ResultType.Failure:
result_str = ' failed'
elif result.actual == ResultType.Skip:
result_str = ' was skipped'
elif result.actual == ResultType.Pass:
result_str = ' passed'
if result.unexpected:
result_str += ' unexpectedly'
if self.args.timing:
timing_str = ' %.4fs' % result.took
else:
timing_str = ''
suffix = '%s%s' % (result_str, timing_str)
out = result.out
err = result.err
if result.code:
if out or err:
suffix += ':\n'
self.update(stats.format() + result.name + suffix, elide=False)
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
elif not self.args.quiet:
if self.args.verbose > 1 and (out or err):
suffix += ':\n'
self.update(stats.format() + result.name + suffix,
elide=(not self.args.verbose))
if self.args.verbose > 1:
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
if self.args.verbose:
self.flush()
def update(self, msg, elide):
self.printer.update(msg, elide)
def flush(self):
self.printer.flush()
def _summarize(self, full_results):
num_tests = self.stats.finished
num_failures = json_results.num_failures(full_results)
if self.args.quiet and num_failures == 0:
return
if self.args.timing:
timing_clause = ' in %.1fs' % (self.host.time() -
self.stats.started_time)
else:
timing_clause = ''
self.update('%d test%s run%s, %d failure%s.' %
(num_tests,
'' if num_tests == 1 else 's',
timing_clause,
num_failures,
'' if num_failures == 1 else 's'), elide=False)
self.print_()
def _read_and_delete(self, path, delete):
h = self.host
obj = None
if h.exists(path):
contents = h.read_text_file(path)
if contents:
obj = json.loads(contents)
if delete:
h.remove(path)
return obj
def _write(self, path, obj):
if path:
self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')
def _upload(self, full_results):
h = self.host
if not self.args.test_results_server:
return 0
url, content_type, data = json_results.make_upload_request(
self.args.test_results_server, self.args.builder_name,
self.args.master_name, self.args.test_type,
full_results)
try:
h.fetch(url, data, {'Content-Type': content_type})
return 0
except Exception as e:
h.print_('Uploading the JSON results raised "%s"' % str(e))
return 1
def report_coverage(self):
if self.args.coverage: # pragma: no cover
self.host.print_()
import coverage
cov = coverage.coverage(data_suffix=True)
cov.combine()
cov.report(show_missing=self.args.coverage_show_missing,
omit=self.args.coverage_omit)
if self.args.coverage_annotate:
cov.annotate(omit=self.args.coverage_omit)
def _add_trace_event(self, trace, name, start, end):
event = {
'name': name,
'ts': int((start - self.stats.started_time) * 1000000),
'dur': int((end - start) * 1000000),
'ph': 'X',
'pid': self.host.getpid(),
'tid': 0,
}
trace['traceEvents'].append(event)
def _trace_from_results(self, result_set):
trace = OrderedDict()
trace['traceEvents'] = []
trace['otherData'] = {}
for m in self.args.metadata:
k, v = m.split('=')
trace['otherData'][k] = v
for result in result_set.results:
started = int((result.started - self.stats.started_time) * 1000000)
took = int(result.took * 1000000)
event = OrderedDict()
event['name'] = result.name
event['dur'] = took
event['ts'] = started
event['ph'] = 'X' # "Complete" events
event['pid'] = result.pid
event['tid'] = result.worker
args = OrderedDict()
args['expected'] = sorted(str(r) for r in result.expected)
args['actual'] = str(result.actual)
args['out'] = result.out
args['err'] = result.err
args['code'] = result.code
args['unexpected'] = result.unexpected
args['flaky'] = result.flaky
event['args'] = args
trace['traceEvents'].append(event)
return trace
def _matches(name, globs):
return any(fnmatch.fnmatch(name, glob) for glob in globs)
def _default_classifier(args):
def default_classifier(test_set, test):
name = test.id()
if not args.all and _matches(name, args.skip):
test_set.tests_to_skip.append(TestInput(name,
'skipped by request'))
elif _matches(name, args.isolate):
test_set.isolated_tests.append(TestInput(name))
else:
test_set.parallel_tests.append(TestInput(name))
return default_classifier
def _test_adder(test_set, classifier):
def add_tests(obj):
if isinstance(obj, unittest.suite.TestSuite):
for el in obj:
add_tests(el)
elif (obj.id().startswith('unittest.loader.LoadTestsFailure') or
obj.id().startswith('unittest.loader.ModuleImportFailure')):
# Access to protected member pylint: disable=W0212
module_name = obj._testMethodName
try:
method = getattr(obj, obj._testMethodName)
method()
except Exception as e:
if 'LoadTests' in obj.id():
raise _AddTestsError('%s.load_tests() failed: %s'
% (module_name, str(e)))
else:
raise _AddTestsError(str(e))
else:
assert isinstance(obj, unittest.TestCase)
classifier(test_set, obj)
return add_tests
class _Child(object):
def __init__(self, parent):
self.host = None
self.worker_num = None
self.all = parent.args.all
self.debugger = parent.args.debugger
self.coverage = parent.args.coverage and parent.args.jobs > 1
self.coverage_source = parent.coverage_source
self.dry_run = parent.args.dry_run
self.loader = parent.loader
self.passthrough = parent.args.passthrough
self.context = parent.context
self.setup_fn = parent.setup_fn
self.teardown_fn = parent.teardown_fn
self.context_after_setup = None
self.top_level_dir = parent.top_level_dir
self.loaded_suites = {}
self.cov = None
def _setup_process(host, worker_num, child):
child.host = host
child.worker_num = worker_num
# pylint: disable=protected-access
if child.coverage: # pragma: no cover
import coverage
child.cov = coverage.coverage(source=child.coverage_source,
data_suffix=True)
child.cov._warn_no_data = False
child.cov.start()
if child.setup_fn:
child.context_after_setup = child.setup_fn(child, child.context)
else:
child.context_after_setup = child.context
return child
def _teardown_process(child):
res = None
e = None
if child.teardown_fn:
try:
res = child.teardown_fn(child, child.context_after_setup)
except Exception as e:
pass
if child.cov: # pragma: no cover
child.cov.stop()
child.cov.save()
return (child.worker_num, res, e)
def _run_one_test(child, test_input):
h = child.host
pid = h.getpid()
test_name = test_input.name
start = h.time()
# It is important to capture the output before loading the test
# to ensure that
# 1) the loader doesn't logs something we don't captured
# 2) neither the loader nor the test case grab a reference to the
# uncaptured stdout or stderr that later is used when the test is run.
# This comes up when using the FakeTestLoader and testing typ itself,
# but could come up when testing non-typ code as well.
h.capture_output(divert=not child.passthrough)
ex_str = ''
try:
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if child.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
try:
suite = child.loader.loadTestsFromName(test_name)
except Exception as e:
ex_str = ('loadTestsFromName("%s") failed: %s\n%s\n' %
(test_name, e, traceback.format_exc()))
try:
suite = _load_via_load_tests(child, test_name)
ex_str += ('\nload_via_load_tests(\"%s\") returned %d tests\n' %
(test_name, len(list(suite))))
except Exception as e: # pragma: untested
suite = []
ex_str += ('\nload_via_load_tests("%s") failed: %s\n%s\n' %
(test_name, e, traceback.format_exc()))
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
tests = list(suite)
if len(tests) != 1:
err = 'Failed to load "%s" in run_one_test' % test_name
if ex_str: # pragma: untested
err += '\n ' + '\n '.join(ex_str.splitlines())
h.restore_output()
return Result(test_name, ResultType.Failure, start, 0,
child.worker_num, unexpected=True, code=1,
err=err, pid=pid)
test_case = tests[0]
if isinstance(test_case, TypTestCase):
test_case.child = child
test_case.context = child.context_after_setup
test_result = unittest.TestResult()
out = ''
err = ''
try:
if child.dry_run:
pass
elif child.debugger: # pragma: no cover
_run_under_debugger(h, test_case, suite, test_result)
else:
suite.run(test_result)
finally:
out, err = h.restore_output()
took = h.time() - start
return _result_from_test_result(test_result, test_name, start, took, out,
err, child.worker_num, pid)
def _run_under_debugger(host, test_case, suite,
test_result): # pragma: no cover
# Access to protected member pylint: disable=W0212
test_func = getattr(test_case, test_case._testMethodName)
fname = inspect.getsourcefile(test_func)
lineno = inspect.getsourcelines(test_func)[1] + 1
dbg = pdb.Pdb(stdout=host.stdout.stream)
dbg.set_break(fname, lineno)
dbg.runcall(suite.run, test_result)
def _result_from_test_result(test_result, test_name, start, took, out, err,
worker_num, pid):
flaky = False
if test_result.failures:
expected = [ResultType.Pass]
actual = ResultType.Failure
code = 1
unexpected = True
err = err + test_result.failures[0][1]
elif test_result.errors:
expected = [ResultType.Pass]
actual = ResultType.Failure
code = 1
unexpected = True
err = err + test_result.errors[0][1]
elif test_result.skipped:
expected = [ResultType.Skip]
actual = ResultType.Skip
err = err + test_result.skipped[0][1]
code = 0
unexpected = False
elif test_result.expectedFailures:
expected = [ResultType.Failure]
actual = ResultType.Failure
code = 1
err = err + test_result.expectedFailures[0][1]
unexpected = False
elif test_result.unexpectedSuccesses:
expected = [ResultType.Failure]
actual = ResultType.Pass
code = 0
unexpected = True
else:
expected = [ResultType.Pass]
actual = ResultType.Pass
code = 0
unexpected = False
return Result(test_name, actual, start, took, worker_num,
expected, unexpected, flaky, code, out, err, pid)
def _load_via_load_tests(child, test_name):
# If we couldn't import a test directly, the test may be only loadable
# via unittest's load_tests protocol. See if we can find a load_tests
# entry point that will work for this test.
loader = child.loader
comps = test_name.split('.')
new_suite = unittest.TestSuite()
while comps:
name = '.'.join(comps)
module = None
suite = None
if name not in child.loaded_suites:
try:
module = importlib.import_module(name)
except ImportError:
pass
if module:
suite = loader.loadTestsFromModule(module)
child.loaded_suites[name] = suite
suite = child.loaded_suites[name]
if suite:
for test_case in suite:
assert isinstance(test_case, unittest.TestCase)
if test_case.id() == test_name: # pragma: untested
new_suite.addTest(test_case)
break
comps.pop()
return new_suite
def _sort_inputs(inps):
return sorted(inps, key=lambda inp: inp.name)
if __name__ == '__main__': # pragma: no cover
sys.modules['__main__'].__file__ = path_to_file
sys.exit(main(win_multiprocessing=WinMultiprocessing.importable))
| [
"[email protected]"
] | |
1d65e5ff19c250a211d18eccd6cf2e6535690ff3 | 17c0eeede746d8dc164d27ef1f1eea3167aa0484 | /array/215.kth_largest.py | 0fce094deb14b5fdfde0e6bb5179c82d2c0c7d95 | [] | no_license | mirzasaad/leetcode-solutions | a778c70e6ea5a94f9874fb90ec24d16d877ca5f2 | 53a3eb91411d5b732c91cbe7dafe44ed0ea7335f | refs/heads/master | 2022-12-11T11:45:57.830633 | 2020-09-03T16:23:01 | 2020-09-03T16:23:01 | 264,158,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | def findKthLargest(self, nums: List[int], k: int) -> int:
def partition(A, lo, hi):
pivot = A[hi]
left = lo
for i in range(lo, hi):
if A[i] < pivot:
A[left], A[i] = A[i], A[left]
left += 1
A[left], A[hi] = A[hi], A[left]
return left
lo, hi = 0, len(nums) - 1
while lo <= hi:
index = partition(nums, lo, hi)
if index == len(nums) - k:
return nums[index]
elif index < len(nums) - k:
lo = index + 1
else:
hi = index - 1
return -1
#random pivot
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def partition(l, r):
ri = randint(l, r)
nums[r], nums[ri] = nums[ri], nums[r]
for i, v in enumerate(nums[l: r+1], l):
if v >= nums[r]:
nums[l], nums[i] = nums[i], nums[l]
l += 1
return l - 1
l, r, k = 0, len(nums) - 1, k - 1
while True:
pos = partition(l, r)
if pos < k:
l = pos + 1
elif pos > k:
r = pos - 1
else:
return nums[pos] | [
"[email protected]"
] | |
94426764e95cea8186ebe4c61ad187969f144777 | 010c5fbc97731286be00028ff33fc981d943bca3 | /primal/src/code/impute/tests/color/TestHapColorGrouping.py | 88894fe62015aa4406f73eff414b5c2598929575 | [] | no_license | orenlivne/ober | 6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30 | 810b16b2611f32c191182042240851152784edea | refs/heads/master | 2021-01-23T13:48:49.172653 | 2014-04-03T13:57:44 | 2014-04-03T13:57:44 | 6,902,212 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,304 | py | '''
============================================================
Test haplotype coloring - older Group algorithm.
Created on January 11, 2012
@author: Oren Livne <[email protected]>
============================================================
'''
import unittest, numpy as np, impute as im
from numpy.ma.testutils import assert_equal
class TestHapColorGrouping(unittest.TestCase):
#---------------------------------------------
# Constants
#---------------------------------------------
#---------------------------------------------
# Setup Methods
#---------------------------------------------
def setUp(self):
pass
#---------------------------------------------
# Test Methods
#---------------------------------------------
def test_grouping(self):
'''Check segment group functions.'''
s = im.segment.segment_data_to_segment_set(family_segments())
d2 = im.segment.to_dict(s)
assert_equal(s.length, 17, 'Wrong segment set size')
s.group_to_disjoint()
assert_equal(s.length, 75, 'Wrong segment set size')
d = s.to_group_by_snp_range()
assert_equal(d.length, 19, 'Wrong segment set size')
assert_equal(d, d2, 'Mutative and Non-mutative transformations should yield the same result')
def test_coloring(self):
'''Check segment coloring functions.'''
s = im.segment.segment_data_to_segment_set(family_segments())
sub_segments, groups = im.color.hap_color_grouping.to_group_to_color(s, segment_gap=0)
assert_equal(len(sub_segments), 19, 'Wrong # disjoint sub-segments')
assert_equal(len(groups), 31, 'Wrong # distinct haplotype colors')
assert_equal([len(x) for x in groups if len(x) > 1], np.array([76, 53, 65, 45]),
'Wrong large color group sizes')
assert_equal(sub_segments, [(0, 1), (1, 2), (2, 4), (4, 301), (301, 334), (334, 576), (576, 600), (600, 795), (795, 805), (805, 1411), (1411, 1413), (1413, 1978), (1978, 2019), (2019, 2551), (2551, 2569), (2569, 2650), (2650, 2657), (2657, 3215), (3215, 3218)],
'Wrong sub-segments')
assert_equal(groups, [[(14, (3, 1)), (11, (6, 1)), (3, (5, 1)), (5, (6, 1)), (8, (6, 1)), (17, (5, 1)), (9, (3, 1)), (7, (4, 1)), (3, (1, 0)), (9, (5, 1)), (0, (1, 0)), (12, (1, 0)), (8, (3, 1)), (11, (4, 1)), (9, (6, 1)), (5, (5, 1)), (17, (2, 1)), (8, (5, 1)), (2, (4, 1)), (15, (5, 1)), (1, (1, 0)), (13, (1, 0)), (18, (1, 0)), (16, (5, 1)), (14, (1, 0)), (4, (5, 1)), (10, (1, 0)), (15, (1, 0)), (14, (5, 1)), (18, (2, 1)), (10, (5, 1)), (3, (4, 1)), (11, (5, 1)), (18, (6, 1)), (7, (5, 1)), (11, (1, 0)), (16, (1, 0)), (12, (6, 1)), (6, (1, 0)), (18, (3, 1)), (8, (1, 0)), (13, (3, 1)), (4, (4, 1)), (1, (5, 1)), (7, (1, 0)), (13, (5, 1)), (4, (1, 0)), (6, (5, 1)), (12, (3, 1)), (8, (4, 1)), (17, (1, 0)), (7, (3, 1)), (5, (4, 1)), (13, (6, 1)), (9, (1, 0)), (6, (6, 1)), (16, (6, 1)), (10, (6, 1)), (0, (5, 1)), (17, (3, 1)), (12, (5, 1)), (9, (4, 1)), (15, (3, 1)), (5, (1, 0)), (7, (6, 1)), (2, (1, 0)), (16, (3, 1)), (14, (6, 1)), (10, (3, 1)), (11, (3, 1)), (18, (5, 1)), (17, (6, 1)), (6, (4, 1)), (15, (6, 1)), (2, (5, 1)), (10, (4, 1))], [(6, (5, 0)), (3, (6, 0)), (15, (0, 0)), (9, (0, 0)), (4, (2, 0)), (15, (4, 0)), (16, (5, 0)), (2, (0, 0)), (9, (2, 0)), (7, (2, 0)), (5, (5, 0)), (5, (0, 0)), (13, (5, 0)), (0, (5, 0)), (8, (0, 0)), (18, (0, 0)), (7, (6, 0)), (9, (5, 0)), (11, (0, 0)), (17, (5, 0)), (4, (0, 0)), (8, (5, 0)), (8, (2, 0)), (6, (2, 0)), (3, (2, 0)), (5, (6, 0)), (7, (0, 0)), (1, (0, 0)), (1, (5, 0)), (6, (6, 0)), (3, (5, 0)), (15, (5, 0)), (11, (5, 0)), (17, (4, 0)), (14, (0, 0)), (17, (0, 0)), (0, (0, 0)), (10, (5, 0)), (10, (0, 0)), (7, (5, 0)), (18, (5, 0)), (3, (0, 0)), (16, (4, 0)), (13, (0, 0)), (5, (2, 0)), (16, (0, 0)), (6, (0, 0)), (2, (5, 0)), (4, (5, 0)), (4, (6, 0)), (12, (5, 0)), (12, (0, 0)), (14, (5, 0))], [(10, (4, 0)), (9, (0, 1)), (8, (0, 1)), (7, (0, 1)), (4, (3, 0)), (2, (3, 0)), (17, (0, 1)), (17, (3, 0)), (2, (0, 1)), (12, (4, 0)), (5, (4, 0)), (12, (2, 0)), (9, (6, 0)), (13, (3, 0)), (10, (0, 1)), (18, (3, 0)), (4, (4, 0)), (17, (6, 0)), (17, (2, 0)), (5, (3, 0)), (10, (3, 0)), (14, (2, 0)), (3, (0, 1)), (11, (4, 0)), (13, (4, 0)), (11, (0, 1)), (3, (4, 0)), (8, (3, 0)), (14, (0, 1)), (11, (6, 0)), (16, (6, 0)), (9, (3, 0)), (15, (6, 0)), (4, (0, 1)), (16, (2, 0)), (11, (2, 0)), (13, (6, 0)), (12, (0, 1)), (10, (6, 0)), (15, (0, 1)), (14, (6, 0)), (7, (4, 0)), (5, (0, 1)), (9, (4, 0)), (0, (3, 0)), (13, (0, 1)), (6, (3, 0)), (0, (0, 1)), (18, (0, 1)), (11, (3, 0)), (16, (3, 0)), (6, (4, 0)), (6, (0, 1)), (8, (4, 0)), (3, (3, 0)), (13, (2, 0)), (1, (3, 0)), (16, (0, 1)), (12, (6, 0)), (15, (3, 0)), (1, (0, 1)), (12, (3, 0)), (7, (3, 0)), (14, (3, 0)), (15, (2, 0))], [(6, (1, 1)), (2, (3, 1)), (11, (1, 1)), (11, (2, 1)), (10, (1, 1)), (2, (6, 1)), (5, (1, 1)), (3, (2, 1)), (6, (2, 1)), (8, (2, 1)), (17, (4, 1)), (3, (3, 1)), (9, (1, 1)), (15, (1, 1)), (18, (4, 1)), (14, (2, 1)), (1, (3, 1)), (14, (1, 1)), (4, (1, 1)), (7, (2, 1)), (9, (2, 1)), (12, (2, 1)), (14, (4, 1)), (16, (1, 1)), (3, (6, 1)), (4, (2, 1)), (15, (4, 1)), (18, (1, 1)), (8, (1, 1)), (13, (1, 1)), (3, (1, 1)), (15, (2, 1)), (2, (2, 1)), (2, (1, 1)), (5, (3, 1)), (13, (2, 1)), (17, (1, 1)), (16, (4, 1)), (5, (2, 1)), (13, (4, 1)), (10, (2, 1)), (12, (1, 1)), (1, (1, 1)), (7, (1, 1)), (4, (3, 1))], [(18, (6, 0))], [(1, (6, 0))], [(1, (4, 1))], [(16, (2, 1))], [(0, (1, 1))], [(14, (4, 0))], [(1, (2, 1))], [(1, (2, 0))], [(0, (6, 1))], [(0, (2, 1))], [(18, (4, 0))], [(8, (6, 0))], [(2, (4, 0))], [(0, (2, 0))], [(0, (3, 1))], [(1, (6, 1))], [(12, (4, 1))], [(1, (4, 0))], [(4, (6, 1))], [(10, (2, 0))], [(6, (3, 1))], [(18, (2, 0))], [(2, (6, 0))], [(0, (6, 0))], [(0, (4, 0))], [(2, (2, 0))], [(0, (4, 1))]],
'Wrong color groups')
def test_coloring_of_overlap(self):
'''Check segment coloring functions when segments slightly overlap.'''
s = im.segment.segment_data_to_segment_set(overlapping_segments())
sub_segments, groups = im.color.hap_color_grouping.to_group_to_color(s, segment_gap=25)
assert_equal(len(sub_segments), 19, 'Wrong # disjoint sub-segments')
assert_equal(len(groups), 30, 'Wrong # distinct haplotype colors')
assert_equal([len(x) for x in groups if len(x) > 1], np.array([76, 54, 66, 45]),
'Wrong large color group sizes')
#---------------------------------------------
# Private Methods
#---------------------------------------------
def family_segments():
'''Segment set test case. Taken from nuclear family phasing; cf. TestIbdDistantFamily.'''
return [((4 , 1411), (17087656, 32965049, 15.877, 0), ((2, 0), (0, 0))),
((1413, 3215), (33013062, 51089213, 18.076, 1), ((0, 1), (2, 0))),
((0 , 3218), (16484792, 51156933, 34.672, 1), ((0, 1), (3, 0))),
((4 , 2551), (17087656, 44974493, 27.887, 1), ((0, 1), (4, 0))),
((2569, 3215), (45198494, 51089213, 5.891, 0), ((0, 0), (4, 0))),
((0 , 3218), (16484792, 51156933, 34.672, 1), ((0, 0), (5, 0))),
((4 , 795), (17087656, 27006698, 9.919, 0), ((0, 0), (6, 0))),
((805 , 3215), (27119061, 51089213, 23.970, 1), ((0, 1), (6, 0))),
((2 , 2650), (17075353, 45881315, 28.806, 0), ((1, 1), (2, 1))),
((2657, 3218), (45940934, 51156933, 5.216, 2), ((1, 0), (2, 1))),
((1 , 576), (17065079, 25228089, 8.163, 0), ((3, 1), (1, 1))),
((600 , 3218), (25444874, 51156933, 25.712, 2), ((1, 0), (3, 1))),
((2 , 1978), (17075353, 37206341, 20.131, 2), ((1, 0), (4, 1))),
((2019, 3218), (37509844, 51156933, 13.647, 0), ((4, 1), (1, 1))),
((0 , 3218), (16484792, 51156933, 34.672, 3), ((5, 1), (1, 0))),
((2 , 301), (17075353, 20993519, 3.918, 0), ((6, 1), (1, 1))),
((334 , 3218), (21363960, 51156933, 29.793, 2), ((1, 0), (6, 1)))]
def overlapping_segments():
'''The same as family_segments(), but tweaked so that two large segments are slightly overlapping.'''
data = family_segments()
data[0] = ((4 , 1420), (17087656, 32965049, 15.877, 0), ((2, 0), (0, 0)))
return data
| [
"[email protected]"
] | |
7ab2d68fe32381cc7e34646cb4a849d9d429ff60 | 9981e61fd113fac5af9825b78b57617e001160e0 | /test/test/spiders/dmoz_spider.py | a88147643fae839c825207112f340fcb96388b17 | [] | no_license | yfjelley/scrapy | 9191ee94e4ed3732287bd040b5d2d2c16476ec12 | dc9f1dc1e76603ea623e0ab9608084f0aedba802 | refs/heads/master | 2020-03-29T12:02:38.981328 | 2014-09-30T08:29:28 | 2014-09-30T08:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from scrapy.spider import Spider
class DmozSpider(Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
filename = response.url.split("/")[-2]
open(filename, 'wb').write(response.body)
| [
"[email protected]"
] | |
38b7548ce83a179d7b6f7597294f350513664810 | d7516481db51c31ae1690892a4bb19246c831ac4 | /examples/reinforcement_learning_examples/dueling_ddqn_cartpole.py | 4ebc65acb4014e727a5930cfdab6c1a1c501ad0b | [
"MIT"
] | permissive | gyunt/polyaxon | 0f7c3b026635ad62d28316bf68fc806c51fc4ccb | 0c99cca9ae9a2a4e957febe1970bf6508225f292 | refs/heads/master | 2021-09-01T07:02:51.453682 | 2017-12-20T23:02:37 | 2017-12-20T23:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.losses import HuberLossConfig
from polyaxon_schemas.optimizers import SGDConfig
from polyaxon_schemas.rl.explorations import DecayExplorationConfig
import polyaxon as plx
import tensorflow as tf
def main(*args):
"""Creates an dqn agent for the openai gym CartPole environment."""
env = plx.envs.GymEnvironment('CartPole-v0')
def graph_fn(mode, features):
return plx.layers.Dense(units=512)(features['state'])
def model_fn(features, labels, mode):
model = plx.models.DDQNModel(
mode,
graph_fn=graph_fn,
loss=HuberLossConfig(),
num_states=env.num_states,
num_actions=env.num_actions,
optimizer=SGDConfig(learning_rate=0.01),
exploration_config=DecayExplorationConfig(),
target_update_frequency=10,
summaries='all')
return model(features, labels)
memory = plx.rl.memories.Memory()
estimator = plx.estimators.Agent(
model_fn=model_fn, memory=memory, model_dir="/tmp/polyaxon_logs/ddqn_cartpole")
estimator.train(env)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"[email protected]"
] | |
29ad398e603eb50a181b953682529ba792218ca0 | a8750439f200e4efc11715df797489f30e9828c6 | /HackerEarth/lcs_3.py | dbc637ba24752327cc743e3495ecfbebd8558e33 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | def longest_increasing_subsequence(d):
'Return one of the L.I.S. of list d'
l = []
for i in range(len(d)):
l.append(max([l[j] for j in range(i) if l[j][-1] < d[i]] or [[]], key=len)
+ [d[i]])
return max(l, key=len)
if __name__ == '__main__':
for d in [[4,2,6,3,8]]:
print('a L.I.S. of %s is %s' % (d, longest_increasing_subsequence(d)))
| [
"[email protected]"
] | |
8b80f1b6b8ed8568ac76e0489c295bc5f828cb2f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/codewar/_CodeWars-Python-master/solutions/All_that_is_open_must_be_closed.py | d1806dce261468de3df26a597cc6e422df9c3fe6 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,259 | py | """
All that is open must be closed...
http://www.codewars.com/kata/55679d644c58e2df2a00009c/train/python
"""
___ is_balanced(source, caps
count # dict
stack # list
___ c __ source:
__ c __ caps:
i caps.i.. c)
__ i % 2 __ 0:
__ caps[i] __ caps[i + 1]:
__ caps[i] __ count:
count[caps[i]] += 1
____
count[caps[i]] 1
____
stack.a..(c)
____
__ caps[i - 1] __ caps[i]:
__ caps[i] __ count:
count[caps[i]] += 1
____
count[caps[i]] 1
____
__ l..(stack) __ 0 o. stack.p.. ) !_ caps[i - 1]:
r.. F..
r.. (l..(stack) __ 0) a.. ((s..([v ___ k, v __ count.i..] % 2 __ 0)
print(is_balanced("(Sensei says yes!)", "()") __ T..)
print(is_balanced("(Sensei says no!", "()") __ F..)
print(is_balanced("(Sensei [says] yes!)", "()[]") __ T..)
print(is_balanced("(Sensei [says) no!]", "()[]") __ F..)
print(is_balanced("Sensei says -yes-!", "--") __ T..)
print(is_balanced("Sensei -says no!", "--") __ F..)
| [
"[email protected]"
] | |
7bb259878309e08221c4eed095a0919d1ca02770 | 314cf05e7acdfb2b83bf4a56de4ee65310bd28f2 | /tests/outcomes/plot/bar/universal_tests/data_simple/matplotlib_x_column_string_plt.py | c460d965e0851ce752035f391d3b6331c3e2f2a1 | [] | no_license | hyperskill/hs-test-python | 9f0201904cb68f3eb35275bb0c3b9bb70164a1e7 | 260313395d0534d148738e031753eb8f60de2e13 | refs/heads/master | 2023-05-10T17:49:26.400853 | 2023-04-26T11:49:52 | 2023-04-26T11:49:52 | 214,279,373 | 20 | 7 | null | 2023-04-26T11:49:53 | 2019-10-10T20:28:03 | Python | UTF-8 | Python | false | false | 343 | py | def plot():
try:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
except ModuleNotFoundError:
return
df = pd.DataFrame(np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]),
columns=['one', 'two'])
plt.bar(df['one'], df['two'])
plt.show()
plot()
| [
"[email protected]"
] | |
73af4638f94ae74093fb40dec034e500c1ac23d7 | 5b3bd326998606188b45a7870852643eda024a97 | /utils/test_utils_test.py | 9ad7254bc0a5ca873d88901f4d4b83e24ee085cc | [] | no_license | KuznetsovIllya/clearml_od_toy | 31556d0726d15a054c1c18317c361d97801381a4 | 92f15f04a023d4e0e165a250fddc3129144913d0 | refs/heads/main | 2023-04-11T05:55:56.248478 | 2021-04-14T15:59:40 | 2021-04-14T15:59:40 | 357,827,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:27f15ba16a39d8a04be71ec29510f423d102dac19cdfa5d0e7f09e8e55f55b4b
size 3328
| [
"[email protected]"
] | |
883988c4f6204ea5858a3976c048f2751e24b4f0 | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/feedbacks/migrations/0006_auto_20181007_2139.py | b9a3a92a2f8c2db58839655d1c35c38662b0de2a | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 556 | py | # Generated by Django 2.0.8 on 2018-10-07 19:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedbacks', '0005_auto_20181007_1951'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='survey',
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ManyToManyField(blank=True, null=True, related_name='questions', to='feedbacks.Survey'),
),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.