code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
'use strict';
/** Highest positive signed 32-bit float value */
const maxInt = 2147483647; // aka. 0x7FFFFFFF or 2^31-1
/** Bootstring parameters */
const base = 36;
const tMin = 1;
const tMax = 26;
const skew = 38;
const damp = 700;
const initialBias = 72;
const initialN = 128; // 0x80
const delimiter = '-'; // '\x2D'
/** Regular expressions */
const regexPunycode = /^xn--/;
const regexNonASCII = /[^\0-\x7E]/; // non-ASCII chars
const regexSeparators = /[\x2E\u3002\uFF0E\uFF61]/g; // RFC 3490 separators
/** Error messages */
const errors = {
'overflow': 'Overflow: input needs wider integers to process',
'not-basic': 'Illegal input >= 0x80 (not a basic code point)',
'invalid-input': 'Invalid input'
};
/** Convenience shortcuts */
const baseMinusTMin = base - tMin;
const floor = Math.floor;
const stringFromCharCode = String.fromCharCode;
/*--------------------------------------------------------------------------*/
/**
* A generic error utility function.
* @private
* @param {String} type The error type.
* @returns {Error} Throws a `RangeError` with the applicable error message.
*/
function error(type) {
throw new RangeError(errors[type]);
}
/**
* A generic `Array#map` utility function.
* @private
* @param {Array} array The array to iterate over.
* @param {Function} callback The function that gets called for every array
* item.
* @returns {Array} A new array of values returned by the callback function.
*/
function map(array, fn) {
const result = [];
let length = array.length;
while (length--) {
result[length] = fn(array[length]);
}
return result;
}
/**
* A simple `Array#map`-like wrapper to work with domain name strings or email
* addresses.
* @private
* @param {String} domain The domain name or email address.
* @param {Function} callback The function that gets called for every
* character.
* @returns {Array} A new string of characters returned by the callback
* function.
*/
function mapDomain(string, fn) {
const parts = string.split('@');
let result = '';
if (parts.length > 1) {
// In email addresses, only the domain name should be punycoded. Leave
// the local part (i.e. everything up to `@`) intact.
result = parts[0] + '@';
string = parts[1];
}
// Avoid `split(regex)` for IE8 compatibility. See #17.
string = string.replace(regexSeparators, '\x2E');
const labels = string.split('.');
const encoded = map(labels, fn).join('.');
return result + encoded;
}
/**
* Creates an array containing the numeric code points of each Unicode
* character in the string. While JavaScript uses UCS-2 internally,
* this function will convert a pair of surrogate halves (each of which
* UCS-2 exposes as separate characters) into a single code point,
* matching UTF-16.
* @see `punycode.ucs2.encode`
* @see <https://mathiasbynens.be/notes/javascript-encoding>
* @memberOf punycode.ucs2
* @name decode
* @param {String} string The Unicode input string (UCS-2).
* @returns {Array} The new array of code points.
*/
function ucs2decode(string) {
const output = [];
let counter = 0;
const length = string.length;
while (counter < length) {
const value = string.charCodeAt(counter++);
if (value >= 0xD800 && value <= 0xDBFF && counter < length) {
// It's a high surrogate, and there is a next character.
const extra = string.charCodeAt(counter++);
if ((extra & 0xFC00) == 0xDC00) { // Low surrogate.
output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000);
} else {
// It's an unmatched surrogate; only append this code unit, in case the
// next code unit is the high surrogate of a surrogate pair.
output.push(value);
counter--;
}
} else {
output.push(value);
}
}
return output;
}
/**
* Creates a string based on an array of numeric code points.
* @see `punycode.ucs2.decode`
* @memberOf punycode.ucs2
* @name encode
* @param {Array} codePoints The array of numeric code points.
* @returns {String} The new Unicode string (UCS-2).
*/
const ucs2encode = array => String.fromCodePoint(...array);
/**
* Converts a basic code point into a digit/integer.
* @see `digitToBasic()`
* @private
* @param {Number} codePoint The basic numeric code point value.
* @returns {Number} The numeric value of a basic code point (for use in
* representing integers) in the range `0` to `base - 1`, or `base` if
* the code point does not represent a value.
*/
const basicToDigit = function(codePoint) {
if (codePoint - 0x30 < 0x0A) {
return codePoint - 0x16;
}
if (codePoint - 0x41 < 0x1A) {
return codePoint - 0x41;
}
if (codePoint - 0x61 < 0x1A) {
return codePoint - 0x61;
}
return base;
};
/**
* Converts a digit/integer into a basic code point.
* @see `basicToDigit()`
* @private
* @param {Number} digit The numeric value of a basic code point.
* @returns {Number} The basic code point whose value (when used for
* representing integers) is `digit`, which needs to be in the range
* `0` to `base - 1`. If `flag` is non-zero, the uppercase form is
* used; else, the lowercase form is used. The behavior is undefined
* if `flag` is non-zero and `digit` has no uppercase form.
*/
const digitToBasic = function(digit, flag) {
// 0..25 map to ASCII a..z or A..Z
// 26..35 map to ASCII 0..9
return digit + 22 + 75 * (digit < 26) - ((flag != 0) << 5);
};
/**
* Bias adaptation function as per section 3.4 of RFC 3492.
* https://tools.ietf.org/html/rfc3492#section-3.4
* @private
*/
const adapt = function(delta, numPoints, firstTime) {
let k = 0;
delta = firstTime ? floor(delta / damp) : delta >> 1;
delta += floor(delta / numPoints);
for (/* no initialization */; delta > baseMinusTMin * tMax >> 1; k += base) {
delta = floor(delta / baseMinusTMin);
}
return floor(k + (baseMinusTMin + 1) * delta / (delta + skew));
};
/**
* Converts a Punycode string of ASCII-only symbols to a string of Unicode
* symbols.
* @memberOf punycode
* @param {String} input The Punycode string of ASCII-only symbols.
* @returns {String} The resulting string of Unicode symbols.
*/
const decode = function(input) {
// Don't use UCS-2.
const output = [];
const inputLength = input.length;
let i = 0;
let n = initialN;
let bias = initialBias;
// Handle the basic code points: let `basic` be the number of input code
// points before the last delimiter, or `0` if there is none, then copy
// the first basic code points to the output.
let basic = input.lastIndexOf(delimiter);
if (basic < 0) {
basic = 0;
}
for (let j = 0; j < basic; ++j) {
// if it's not a basic code point
if (input.charCodeAt(j) >= 0x80) {
error('not-basic');
}
output.push(input.charCodeAt(j));
}
// Main decoding loop: start just after the last delimiter if any basic code
// points were copied; start at the beginning otherwise.
for (let index = basic > 0 ? basic + 1 : 0; index < inputLength; /* no final expression */) {
// `index` is the index of the next character to be consumed.
// Decode a generalized variable-length integer into `delta`,
// which gets added to `i`. The overflow checking is easier
// if we increase `i` as we go, then subtract off its starting
// value at the end to obtain `delta`.
let oldi = i;
for (let w = 1, k = base; /* no condition */; k += base) {
if (index >= inputLength) {
error('invalid-input');
}
const digit = basicToDigit(input.charCodeAt(index++));
if (digit >= base || digit > floor((maxInt - i) / w)) {
error('overflow');
}
i += digit * w;
const t = k <= bias ? tMin : (k >= bias + tMax ? tMax : k - bias);
if (digit < t) {
break;
}
const baseMinusT = base - t;
if (w > floor(maxInt / baseMinusT)) {
error('overflow');
}
w *= baseMinusT;
}
const out = output.length + 1;
bias = adapt(i - oldi, out, oldi == 0);
// `i` was supposed to wrap around from `out` to `0`,
// incrementing `n` each time, so we'll fix that now:
if (floor(i / out) > maxInt - n) {
error('overflow');
}
n += floor(i / out);
i %= out;
// Insert `n` at position `i` of the output.
output.splice(i++, 0, n);
}
return String.fromCodePoint(...output);
};
/**
* Converts a string of Unicode symbols (e.g. a domain name label) to a
* Punycode string of ASCII-only symbols.
* @memberOf punycode
* @param {String} input The string of Unicode symbols.
* @returns {String} The resulting Punycode string of ASCII-only symbols.
*/
const encode = function(input) {
const output = [];
// Convert the input in UCS-2 to an array of Unicode code points.
input = ucs2decode(input);
// Cache the length.
let inputLength = input.length;
// Initialize the state.
let n = initialN;
let delta = 0;
let bias = initialBias;
// Handle the basic code points.
for (const currentValue of input) {
if (currentValue < 0x80) {
output.push(stringFromCharCode(currentValue));
}
}
let basicLength = output.length;
let handledCPCount = basicLength;
// `handledCPCount` is the number of code points that have been handled;
// `basicLength` is the number of basic code points.
// Finish the basic string with a delimiter unless it's empty.
if (basicLength) {
output.push(delimiter);
}
// Main encoding loop:
while (handledCPCount < inputLength) {
// All non-basic code points < n have been handled already. Find the next
// larger one:
let m = maxInt;
for (const currentValue of input) {
if (currentValue >= n && currentValue < m) {
m = currentValue;
}
}
// Increase `delta` enough to advance the decoder's <n,i> state to <m,0>,
// but guard against overflow.
const handledCPCountPlusOne = handledCPCount + 1;
if (m - n > floor((maxInt - delta) / handledCPCountPlusOne)) {
error('overflow');
}
delta += (m - n) * handledCPCountPlusOne;
n = m;
for (const currentValue of input) {
if (currentValue < n && ++delta > maxInt) {
error('overflow');
}
if (currentValue == n) {
// Represent delta as a generalized variable-length integer.
let q = delta;
for (let k = base; /* no condition */; k += base) {
const t = k <= bias ? tMin : (k >= bias + tMax ? tMax : k - bias);
if (q < t) {
break;
}
const qMinusT = q - t;
const baseMinusT = base - t;
output.push(
stringFromCharCode(digitToBasic(t + qMinusT % baseMinusT, 0))
);
q = floor(qMinusT / baseMinusT);
}
output.push(stringFromCharCode(digitToBasic(q, 0)));
bias = adapt(delta, handledCPCountPlusOne, handledCPCount == basicLength);
delta = 0;
++handledCPCount;
}
}
++delta;
++n;
}
return output.join('');
};
/**
* Converts a Punycode string representing a domain name or an email address
* to Unicode. Only the Punycoded parts of the input will be converted, i.e.
* it doesn't matter if you call it on a string that has already been
* converted to Unicode.
* @memberOf punycode
* @param {String} input The Punycoded domain name or email address to
* convert to Unicode.
* @returns {String} The Unicode representation of the given Punycode
* string.
*/
const toUnicode = function(input) {
return mapDomain(input, function(string) {
return regexPunycode.test(string)
? decode(string.slice(4).toLowerCase())
: string;
});
};
/**
* Converts a Unicode string representing a domain name or an email address to
* Punycode. Only the non-ASCII parts of the domain name will be converted,
* i.e. it doesn't matter if you call it with a domain that's already in
* ASCII.
* @memberOf punycode
* @param {String} input The domain name or email address to convert, as a
* Unicode string.
* @returns {String} The Punycode representation of the given domain name or
* email address.
*/
const toASCII = function(input) {
return mapDomain(input, function(string) {
return regexNonASCII.test(string)
? 'xn--' + encode(string)
: string;
});
};
/*--------------------------------------------------------------------------*/
/** Define the public API */
const punycode = {
/**
* A string representing the current Punycode.js version number.
* @memberOf punycode
* @type String
*/
'version': '2.1.0',
/**
* An object of methods to convert from JavaScript's internal character
* representation (UCS-2) to Unicode code points, and back.
* @see <https://mathiasbynens.be/notes/javascript-encoding>
* @memberOf punycode
* @type Object
*/
'ucs2': {
'decode': ucs2decode,
'encode': ucs2encode
},
'decode': decode,
'encode': encode,
'toASCII': toASCII,
'toUnicode': toUnicode
};
module.exports = punycode; | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/punycode/punycode.js | punycode.js |
module.exports = ForeverAgent
ForeverAgent.SSL = ForeverAgentSSL
var util = require('util')
, Agent = require('http').Agent
, net = require('net')
, tls = require('tls')
, AgentSSL = require('https').Agent
function getConnectionName(host, port) {
var name = ''
if (typeof host === 'string') {
name = host + ':' + port
} else {
// For node.js v012.0 and iojs-v1.5.1, host is an object. And any existing localAddress is part of the connection name.
name = host.host + ':' + host.port + ':' + (host.localAddress ? (host.localAddress + ':') : ':')
}
return name
}
function ForeverAgent(options) {
var self = this
self.options = options || {}
self.requests = {}
self.sockets = {}
self.freeSockets = {}
self.maxSockets = self.options.maxSockets || Agent.defaultMaxSockets
self.minSockets = self.options.minSockets || ForeverAgent.defaultMinSockets
self.on('free', function(socket, host, port) {
var name = getConnectionName(host, port)
if (self.requests[name] && self.requests[name].length) {
self.requests[name].shift().onSocket(socket)
} else if (self.sockets[name].length < self.minSockets) {
if (!self.freeSockets[name]) self.freeSockets[name] = []
self.freeSockets[name].push(socket)
// if an error happens while we don't use the socket anyway, meh, throw the socket away
var onIdleError = function() {
socket.destroy()
}
socket._onIdleError = onIdleError
socket.on('error', onIdleError)
} else {
// If there are no pending requests just destroy the
// socket and it will get removed from the pool. This
// gets us out of timeout issues and allows us to
// default to Connection:keep-alive.
socket.destroy()
}
})
}
util.inherits(ForeverAgent, Agent)
ForeverAgent.defaultMinSockets = 5
ForeverAgent.prototype.createConnection = net.createConnection
ForeverAgent.prototype.addRequestNoreuse = Agent.prototype.addRequest
ForeverAgent.prototype.addRequest = function(req, host, port) {
var name = getConnectionName(host, port)
if (typeof host !== 'string') {
var options = host
port = options.port
host = options.host
}
if (this.freeSockets[name] && this.freeSockets[name].length > 0 && !req.useChunkedEncodingByDefault) {
var idleSocket = this.freeSockets[name].pop()
idleSocket.removeListener('error', idleSocket._onIdleError)
delete idleSocket._onIdleError
req._reusedSocket = true
req.onSocket(idleSocket)
} else {
this.addRequestNoreuse(req, host, port)
}
}
ForeverAgent.prototype.removeSocket = function(s, name, host, port) {
if (this.sockets[name]) {
var index = this.sockets[name].indexOf(s)
if (index !== -1) {
this.sockets[name].splice(index, 1)
}
} else if (this.sockets[name] && this.sockets[name].length === 0) {
// don't leak
delete this.sockets[name]
delete this.requests[name]
}
if (this.freeSockets[name]) {
var index = this.freeSockets[name].indexOf(s)
if (index !== -1) {
this.freeSockets[name].splice(index, 1)
if (this.freeSockets[name].length === 0) {
delete this.freeSockets[name]
}
}
}
if (this.requests[name] && this.requests[name].length) {
// If we have pending requests and a socket gets closed a new one
// needs to be created to take over in the pool for the one that closed.
this.createSocket(name, host, port).emit('free')
}
}
function ForeverAgentSSL (options) {
ForeverAgent.call(this, options)
}
util.inherits(ForeverAgentSSL, ForeverAgent)
ForeverAgentSSL.prototype.createConnection = createConnectionSSL
ForeverAgentSSL.prototype.addRequestNoreuse = AgentSSL.prototype.addRequest
function createConnectionSSL (port, host, options) {
if (typeof port === 'object') {
options = port;
} else if (typeof host === 'object') {
options = host;
} else if (typeof options === 'object') {
options = options;
} else {
options = {};
}
if (typeof port === 'number') {
options.port = port;
}
if (typeof host === 'string') {
options.host = host;
}
return tls.connect(options);
} | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/forever-agent/index.js | index.js |
# safer-buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![javascript style guide][standard-image]][standard-url] [![Security Responsible Disclosure][secuirty-image]][secuirty-url]
[travis-image]: https://travis-ci.org/ChALkeR/safer-buffer.svg?branch=master
[travis-url]: https://travis-ci.org/ChALkeR/safer-buffer
[npm-image]: https://img.shields.io/npm/v/safer-buffer.svg
[npm-url]: https://npmjs.org/package/safer-buffer
[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg
[standard-url]: https://standardjs.com
[secuirty-image]: https://img.shields.io/badge/Security-Responsible%20Disclosure-green.svg
[secuirty-url]: https://github.com/nodejs/security-wg/blob/master/processes/responsible_disclosure_template.md
Modern Buffer API polyfill without footguns, working on Node.js from 0.8 to current.
## How to use?
First, port all `Buffer()` and `new Buffer()` calls to `Buffer.alloc()` and `Buffer.from()` API.
Then, to achieve compatibility with outdated Node.js versions (`<4.5.0` and 5.x `<5.9.0`), use
`const Buffer = require('safer-buffer').Buffer` in all files where you make calls to the new
Buffer API. _Use `var` instead of `const` if you need that for your Node.js version range support._
Also, see the
[porting Buffer](https://github.com/ChALkeR/safer-buffer/blob/master/Porting-Buffer.md) guide.
## Do I need it?
Hopefully, not — dropping support for outdated Node.js versions should be fine nowdays, and that
is the recommended path forward. You _do_ need to port to the `Buffer.alloc()` and `Buffer.from()`
though.
See the [porting guide](https://github.com/ChALkeR/safer-buffer/blob/master/Porting-Buffer.md)
for a better description.
## Why not [safe-buffer](https://npmjs.com/safe-buffer)?
_In short: while `safe-buffer` serves as a polyfill for the new API, it allows old API usage and
itself contains footguns._
`safe-buffer` could be used safely to get the new API while still keeping support for older
Node.js versions (like this module), but while analyzing ecosystem usage of the old Buffer API
I found out that `safe-buffer` is itself causing problems in some cases.
For example, consider the following snippet:
```console
$ cat example.unsafe.js
console.log(Buffer(20))
$ ./node-v6.13.0-linux-x64/bin/node example.unsafe.js
<Buffer 0a 00 00 00 00 00 00 00 28 13 de 02 00 00 00 00 05 00 00 00>
$ standard example.unsafe.js
standard: Use JavaScript Standard Style (https://standardjs.com)
/home/chalker/repo/safer-buffer/example.unsafe.js:2:13: 'Buffer()' was deprecated since v6. Use 'Buffer.alloc()' or 'Buffer.from()' (use 'https://www.npmjs.com/package/safe-buffer' for '<4.5.0') instead.
```
This is allocates and writes to console an uninitialized chunk of memory.
[standard](https://www.npmjs.com/package/standard) linter (among others) catch that and warn people
to avoid using unsafe API.
Let's now throw in `safe-buffer`!
```console
$ cat example.safe-buffer.js
const Buffer = require('safe-buffer').Buffer
console.log(Buffer(20))
$ standard example.safe-buffer.js
$ ./node-v6.13.0-linux-x64/bin/node example.safe-buffer.js
<Buffer 08 00 00 00 00 00 00 00 28 58 01 82 fe 7f 00 00 00 00 00 00>
```
See the problem? Adding in `safe-buffer` _magically removes the lint warning_, but the behavior
remains identiсal to what we had before, and when launched on Node.js 6.x LTS — this dumps out
chunks of uninitialized memory.
_And this code will still emit runtime warnings on Node.js 10.x and above._
That was done by design. I first considered changing `safe-buffer`, prohibiting old API usage or
emitting warnings on it, but that significantly diverges from `safe-buffer` design. After some
discussion, it was decided to move my approach into a separate package, and _this is that separate
package_.
This footgun is not imaginary — I observed top-downloaded packages doing that kind of thing,
«fixing» the lint warning by blindly including `safe-buffer` without any actual changes.
Also in some cases, even if the API _was_ migrated to use of safe Buffer API — a random pull request
can bring unsafe Buffer API usage back to the codebase by adding new calls — and that could go
unnoticed even if you have a linter prohibiting that (becase of the reason stated above), and even
pass CI. _I also observed that being done in popular packages._
Some examples:
* [webdriverio](https://github.com/webdriverio/webdriverio/commit/05cbd3167c12e4930f09ef7cf93b127ba4effae4#diff-124380949022817b90b622871837d56cR31)
(a module with 548 759 downloads/month),
* [websocket-stream](https://github.com/maxogden/websocket-stream/commit/c9312bd24d08271687d76da0fe3c83493871cf61)
(218 288 d/m, fix in [maxogden/websocket-stream#142](https://github.com/maxogden/websocket-stream/pull/142)),
* [node-serialport](https://github.com/node-serialport/node-serialport/commit/e8d9d2b16c664224920ce1c895199b1ce2def48c)
(113 138 d/m, fix in [node-serialport/node-serialport#1510](https://github.com/node-serialport/node-serialport/pull/1510)),
* [karma](https://github.com/karma-runner/karma/commit/3d94b8cf18c695104ca195334dc75ff054c74eec)
(3 973 193 d/m, fix in [karma-runner/karma#2947](https://github.com/karma-runner/karma/pull/2947)),
* [spdy-transport](https://github.com/spdy-http2/spdy-transport/commit/5375ac33f4a62a4f65bcfc2827447d42a5dbe8b1)
(5 970 727 d/m, fix in [spdy-http2/spdy-transport#53](https://github.com/spdy-http2/spdy-transport/pull/53)).
* And there are a lot more over the ecosystem.
I filed a PR at
[mysticatea/eslint-plugin-node#110](https://github.com/mysticatea/eslint-plugin-node/pull/110) to
partially fix that (for cases when that lint rule is used), but it is a semver-major change for
linter rules and presets, so it would take significant time for that to reach actual setups.
_It also hasn't been released yet (2018-03-20)._
Also, `safer-buffer` discourages the usage of `.allocUnsafe()`, which is often done by a mistake.
It still supports it with an explicit concern barier, by placing it under
`require('safer-buffer/dangereous')`.
## But isn't throwing bad?
Not really. It's an error that could be noticed and fixed early, instead of causing havoc later like
unguarded `new Buffer()` calls that end up receiving user input can do.
This package affects only the files where `var Buffer = require('safer-buffer').Buffer` was done, so
it is really simple to keep track of things and make sure that you don't mix old API usage with that.
Also, CI should hint anything that you might have missed.
New commits, if tested, won't land new usage of unsafe Buffer API this way.
_Node.js 10.x also deals with that by printing a runtime depecation warning._
### Would it affect third-party modules?
No, unless you explicitly do an awful thing like monkey-patching or overriding the built-in `Buffer`.
Don't do that.
### But I don't want throwing…
That is also fine!
Also, it could be better in some cases when you don't comprehensive enough test coverage.
In that case — just don't override `Buffer` and use
`var SaferBuffer = require('safer-buffer').Buffer` instead.
That way, everything using `Buffer` natively would still work, but there would be two drawbacks:
* `Buffer.from`/`Buffer.alloc` won't be polyfilled — use `SaferBuffer.from` and
`SaferBuffer.alloc` instead.
* You are still open to accidentally using the insecure deprecated API — use a linter to catch that.
Note that using a linter to catch accidential `Buffer` constructor usage in this case is strongly
recommended. `Buffer` is not overriden in this usecase, so linters won't get confused.
## «Without footguns»?
Well, it is still possible to do _some_ things with `Buffer` API, e.g. accessing `.buffer` property
on older versions and duping things from there. You shouldn't do that in your code, probabably.
The intention is to remove the most significant footguns that affect lots of packages in the
ecosystem, and to do it in the proper way.
Also, this package doesn't protect against security issues affecting some Node.js versions, so for
usage in your own production code, it is still recommended to update to a Node.js version
[supported by upstream](https://github.com/nodejs/release#release-schedule).
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/safer-buffer/Readme.md | Readme.md |
'use strict'
var buffer = require('buffer')
var Buffer = buffer.Buffer
var safer = {}
var key
for (key in buffer) {
if (!buffer.hasOwnProperty(key)) continue
if (key === 'SlowBuffer' || key === 'Buffer') continue
safer[key] = buffer[key]
}
var Safer = safer.Buffer = {}
for (key in Buffer) {
if (!Buffer.hasOwnProperty(key)) continue
if (key === 'allocUnsafe' || key === 'allocUnsafeSlow') continue
Safer[key] = Buffer[key]
}
safer.Buffer.prototype = Buffer.prototype
if (!Safer.from || Safer.from === Uint8Array.from) {
Safer.from = function (value, encodingOrOffset, length) {
if (typeof value === 'number') {
throw new TypeError('The "value" argument must not be of type number. Received type ' + typeof value)
}
if (value && typeof value.length === 'undefined') {
throw new TypeError('The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type ' + typeof value)
}
return Buffer(value, encodingOrOffset, length)
}
}
if (!Safer.alloc) {
Safer.alloc = function (size, fill, encoding) {
if (typeof size !== 'number') {
throw new TypeError('The "size" argument must be of type number. Received type ' + typeof size)
}
if (size < 0 || size >= 2 * (1 << 30)) {
throw new RangeError('The value "' + size + '" is invalid for option "size"')
}
var buf = Buffer(size)
if (!fill || fill.length === 0) {
buf.fill(0)
} else if (typeof encoding === 'string') {
buf.fill(fill, encoding)
} else {
buf.fill(fill)
}
return buf
}
}
if (!safer.kStringMaxLength) {
try {
safer.kStringMaxLength = process.binding('buffer').kStringMaxLength
} catch (e) {
// we can't determine kStringMaxLength in environments where process.binding
// is unsupported, so let's not set it
}
}
if (!safer.constants) {
safer.constants = {
MAX_LENGTH: safer.kMaxLength
}
if (safer.kStringMaxLength) {
safer.constants.MAX_STRING_LENGTH = safer.kStringMaxLength
}
}
module.exports = safer | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/safer-buffer/safer.js | safer.js |
# Porting to the Buffer.from/Buffer.alloc API
<a id="overview"></a>
## Overview
- [Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x.](#variant-1) (*recommended*)
- [Variant 2: Use a polyfill](#variant-2)
- [Variant 3: manual detection, with safeguards](#variant-3)
### Finding problematic bits of code using grep
Just run `grep -nrE '[^a-zA-Z](Slow)?Buffer\s*\(' --exclude-dir node_modules`.
It will find all the potentially unsafe places in your own code (with some considerably unlikely
exceptions).
### Finding problematic bits of code using Node.js 8
If you’re using Node.js ≥ 8.0.0 (which is recommended), Node.js exposes multiple options that help with finding the relevant pieces of code:
- `--trace-warnings` will make Node.js show a stack trace for this warning and other warnings that are printed by Node.js.
- `--trace-deprecation` does the same thing, but only for deprecation warnings.
- `--pending-deprecation` will show more types of deprecation warnings. In particular, it will show the `Buffer()` deprecation warning, even on Node.js 8.
You can set these flags using an environment variable:
```console
$ export NODE_OPTIONS='--trace-warnings --pending-deprecation'
$ cat example.js
'use strict';
const foo = new Buffer('foo');
$ node example.js
(node:7147) [DEP0005] DeprecationWarning: The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead.
at showFlaggedDeprecation (buffer.js:127:13)
at new Buffer (buffer.js:148:3)
at Object.<anonymous> (/path/to/example.js:2:13)
[... more stack trace lines ...]
```
### Finding problematic bits of code using linters
Eslint rules [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor)
or
[node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md)
also find calls to deprecated `Buffer()` API. Those rules are included in some pre-sets.
There is a drawback, though, that it doesn't always
[work correctly](https://github.com/chalker/safer-buffer#why-not-safe-buffer) when `Buffer` is
overriden e.g. with a polyfill, so recommended is a combination of this and some other method
described above.
<a id="variant-1"></a>
## Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x.
This is the recommended solution nowadays that would imply only minimal overhead.
The Node.js 5.x release line has been unsupported since July 2016, and the Node.js 4.x release line reaches its End of Life in April 2018 (→ [Schedule](https://github.com/nodejs/Release#release-schedule)). This means that these versions of Node.js will *not* receive any updates, even in case of security issues, so using these release lines should be avoided, if at all possible.
What you would do in this case is to convert all `new Buffer()` or `Buffer()` calls to use `Buffer.alloc()` or `Buffer.from()`, in the following way:
- For `new Buffer(number)`, replace it with `Buffer.alloc(number)`.
- For `new Buffer(string)` (or `new Buffer(string, encoding)`), replace it with `Buffer.from(string)` (or `Buffer.from(string, encoding)`).
- For all other combinations of arguments (these are much rarer), also replace `new Buffer(...arguments)` with `Buffer.from(...arguments)`.
Note that `Buffer.alloc()` is also _faster_ on the current Node.js versions than
`new Buffer(size).fill(0)`, which is what you would otherwise need to ensure zero-filling.
Enabling eslint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor)
or
[node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md)
is recommended to avoid accidential unsafe Buffer API usage.
There is also a [JSCodeshift codemod](https://github.com/joyeecheung/node-dep-codemod#dep005)
for automatically migrating Buffer constructors to `Buffer.alloc()` or `Buffer.from()`.
Note that it currently only works with cases where the arguments are literals or where the
constructor is invoked with two arguments.
_If you currently support those older Node.js versions and dropping them would be a semver-major change
for you, or if you support older branches of your packages, consider using [Variant 2](#variant-2)
or [Variant 3](#variant-3) on older branches, so people using those older branches will also receive
the fix. That way, you will eradicate potential issues caused by unguarded Buffer API usage and
your users will not observe a runtime deprecation warning when running your code on Node.js 10._
<a id="variant-2"></a>
## Variant 2: Use a polyfill
Utilize [safer-buffer](https://www.npmjs.com/package/safer-buffer) as a polyfill to support older
Node.js versions.
You would take exacly the same steps as in [Variant 1](#variant-1), but with a polyfill
`const Buffer = require('safer-buffer').Buffer` in all files where you use the new `Buffer` api.
Make sure that you do not use old `new Buffer` API — in any files where the line above is added,
using old `new Buffer()` API will _throw_. It will be easy to notice that in CI, though.
Alternatively, you could use [buffer-from](https://www.npmjs.com/package/buffer-from) and/or
[buffer-alloc](https://www.npmjs.com/package/buffer-alloc) [ponyfills](https://ponyfill.com/) —
those are great, the only downsides being 4 deps in the tree and slightly more code changes to
migrate off them (as you would be using e.g. `Buffer.from` under a different name). If you need only
`Buffer.from` polyfilled — `buffer-from` alone which comes with no extra dependencies.
_Alternatively, you could use [safe-buffer](https://www.npmjs.com/package/safe-buffer) — it also
provides a polyfill, but takes a different approach which has
[it's drawbacks](https://github.com/chalker/safer-buffer#why-not-safe-buffer). It will allow you
to also use the older `new Buffer()` API in your code, though — but that's arguably a benefit, as
it is problematic, can cause issues in your code, and will start emitting runtime deprecation
warnings starting with Node.js 10._
Note that in either case, it is important that you also remove all calls to the old Buffer
API manually — just throwing in `safe-buffer` doesn't fix the problem by itself, it just provides
a polyfill for the new API. I have seen people doing that mistake.
Enabling eslint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor)
or
[node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md)
is recommended.
_Don't forget to drop the polyfill usage once you drop support for Node.js < 4.5.0._
<a id="variant-3"></a>
## Variant 3 — manual detection, with safeguards
This is useful if you create Buffer instances in only a few places (e.g. one), or you have your own
wrapper around them.
### Buffer(0)
This special case for creating empty buffers can be safely replaced with `Buffer.concat([])`, which
returns the same result all the way down to Node.js 0.8.x.
### Buffer(notNumber)
Before:
```js
var buf = new Buffer(notNumber, encoding);
```
After:
```js
var buf;
if (Buffer.from && Buffer.from !== Uint8Array.from) {
buf = Buffer.from(notNumber, encoding);
} else {
if (typeof notNumber === 'number')
throw new Error('The "size" argument must be of type number.');
buf = new Buffer(notNumber, encoding);
}
```
`encoding` is optional.
Note that the `typeof notNumber` before `new Buffer` is required (for cases when `notNumber` argument is not
hard-coded) and _is not caused by the deprecation of Buffer constructor_ — it's exactly _why_ the
Buffer constructor is deprecated. Ecosystem packages lacking this type-check caused numereous
security issues — situations when unsanitized user input could end up in the `Buffer(arg)` create
problems ranging from DoS to leaking sensitive information to the attacker from the process memory.
When `notNumber` argument is hardcoded (e.g. literal `"abc"` or `[0,1,2]`), the `typeof` check can
be omitted.
Also note that using TypeScript does not fix this problem for you — when libs written in
`TypeScript` are used from JS, or when user input ends up there — it behaves exactly as pure JS, as
all type checks are translation-time only and are not present in the actual JS code which TS
compiles to.
### Buffer(number)
For Node.js 0.10.x (and below) support:
```js
var buf;
if (Buffer.alloc) {
buf = Buffer.alloc(number);
} else {
buf = new Buffer(number);
buf.fill(0);
}
```
Otherwise (Node.js ≥ 0.12.x):
```js
const buf = Buffer.alloc ? Buffer.alloc(number) : new Buffer(number).fill(0);
```
## Regarding Buffer.allocUnsafe
Be extra cautious when using `Buffer.allocUnsafe`:
* Don't use it if you don't have a good reason to
* e.g. you probably won't ever see a performance difference for small buffers, in fact, those
might be even faster with `Buffer.alloc()`,
* if your code is not in the hot code path — you also probably won't notice a difference,
* keep in mind that zero-filling minimizes the potential risks.
* If you use it, make sure that you never return the buffer in a partially-filled state,
* if you are writing to it sequentially — always truncate it to the actuall written length
Errors in handling buffers allocated with `Buffer.allocUnsafe` could result in various issues,
ranged from undefined behaviour of your code to sensitive data (user input, passwords, certs)
leaking to the remote attacker.
_Note that the same applies to `new Buffer` usage without zero-filling, depending on the Node.js
version (and lacking type checks also adds DoS to the list of potential problems)._
<a id="faq"></a>
## FAQ
<a id="design-flaws"></a>
### What is wrong with the `Buffer` constructor?
The `Buffer` constructor could be used to create a buffer in many different ways:
- `new Buffer(42)` creates a `Buffer` of 42 bytes. Before Node.js 8, this buffer contained
*arbitrary memory* for performance reasons, which could include anything ranging from
program source code to passwords and encryption keys.
- `new Buffer('abc')` creates a `Buffer` that contains the UTF-8-encoded version of
the string `'abc'`. A second argument could specify another encoding: For example,
`new Buffer(string, 'base64')` could be used to convert a Base64 string into the original
sequence of bytes that it represents.
- There are several other combinations of arguments.
This meant that, in code like `var buffer = new Buffer(foo);`, *it is not possible to tell
what exactly the contents of the generated buffer are* without knowing the type of `foo`.
Sometimes, the value of `foo` comes from an external source. For example, this function
could be exposed as a service on a web server, converting a UTF-8 string into its Base64 form:
```
function stringToBase64(req, res) {
// The request body should have the format of `{ string: 'foobar' }`
const rawBytes = new Buffer(req.body.string)
const encoded = rawBytes.toString('base64')
res.end({ encoded: encoded })
}
```
Note that this code does *not* validate the type of `req.body.string`:
- `req.body.string` is expected to be a string. If this is the case, all goes well.
- `req.body.string` is controlled by the client that sends the request.
- If `req.body.string` is the *number* `50`, the `rawBytes` would be 50 bytes:
- Before Node.js 8, the content would be uninitialized
- After Node.js 8, the content would be `50` bytes with the value `0`
Because of the missing type check, an attacker could intentionally send a number
as part of the request. Using this, they can either:
- Read uninitialized memory. This **will** leak passwords, encryption keys and other
kinds of sensitive information. (Information leak)
- Force the program to allocate a large amount of memory. For example, when specifying
`500000000` as the input value, each request will allocate 500MB of memory.
This can be used to either exhaust the memory available of a program completely
and make it crash, or slow it down significantly. (Denial of Service)
Both of these scenarios are considered serious security issues in a real-world
web server context.
when using `Buffer.from(req.body.string)` instead, passing a number will always
throw an exception instead, giving a controlled behaviour that can always be
handled by the program.
<a id="ecosystem-usage"></a>
### The `Buffer()` constructor has been deprecated for a while. Is this really an issue?
Surveys of code in the `npm` ecosystem have shown that the `Buffer()` constructor is still
widely used. This includes new code, and overall usage of such code has actually been
*increasing*.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/safer-buffer/Porting-Buffer.md | Porting-Buffer.md |
<a href="http://promisesaplus.com/">
<img src="https://promises-aplus.github.io/promises-spec/assets/logo-small.png" align="right" alt="Promises/A+ logo" />
</a>
# request-promise-core
[](https://gitter.im/request/request-promise?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://travis-ci.org/request/promise-core)
[](https://coveralls.io/r/request/promise-core)
[](https://david-dm.org/request/promise-core)
[](https://snyk.io/test/npm/promise-core)
This package is the core for the following packages:
- [`request-promise`](https://github.com/request/request-promise)
- [`request-promise-any`](https://github.com/request/request-promise-any)
- [`request-promise-bluebird`](https://github.com/request/request-promise-bluebird)
- [`request-promise-native`](https://github.com/request/request-promise-native)
`request-promise-core` contains the core logic to add Promise support to [`request`](https://github.com/request/request).
Please use one of the libraries above. It is only recommended to use this library directly, if you have very specific requirements.
## Installation for `request@^2.34`
This module is installed via npm:
```
npm install --save request
npm install --save request-promise-core
```
`request` is defined as a peer-dependency and thus has to be installed separately.
## Usage for `request@^2.34`
``` js
// 1. Load the request library
// Only use a direct require if you are 100% sure that:
// - Your project does not use request directly. That is without the Promise capabilities by calling require('request').
// - Any of the installed libraries use request.
// ...because Request's prototype will be patched in step 2.
/* var request = require('request'); */
// Instead use:
var stealthyRequire = require('stealthy-require');
var request = stealthyRequire(require.cache, function () {
return require('request');
});
// 2. Add Promise support to request
var configure = require('request-promise-core/configure/request2');
configure({
request: request,
// Pass your favorite ES6-compatible promise implementation
PromiseImpl: Promise,
// Expose all methods of the promise instance you want to call on the request(...) call
expose: [
'then', // Allows to use request(...).then(...)
'catch', // Allows to use request(...).catch(...)
'promise' // Allows to use request(...).promise() which returns the promise instance
],
// Optional: Pass a callback that is called within the Promise constructor
constructorMixin: function (resolve, reject) {
// `this` is the request object
// Additional arguments may be passed depending on the PromiseImpl used
}
});
// 3. Use request with its promise capabilities
// E.g. crawl a web page:
request('http://www.google.com')
.then(function (htmlString) {
// Process html...
})
.catch(function (err) {
// Crawling failed...
});
```
## Installation and Usage for `request@next`
[Request Next](https://github.com/request/request/issues/1982) is still in alpha. However, `request-promise-core` is already designed to be compatible and ships with a configuration helper – `require('request-promise-core/configure/request-next')` – that is [used by `request-promise`](https://github.com/request/request-promise/blob/next/lib/rp.js) in its "next" branch.
## Contributing
To set up your development environment:
1. clone the repo to your desktop,
2. in the shell `cd` to the main folder,
3. hit `npm install`,
4. hit `npm install gulp -g` if you haven't installed gulp globally yet, and
5. run `gulp dev`. (Or run `node ./node_modules/.bin/gulp dev` if you don't want to install gulp globally.)
`gulp dev` watches all source files and if you save some changes it will lint the code and execute all tests. The test coverage report can be viewed from `./coverage/lcov-report/index.html`.
If you want to debug a test you should use `gulp test-without-coverage` to run all tests without obscuring the code by the test coverage instrumentation.
## Change History
- 1.1.4 (2020-07-21)
- Security fix: bumped `lodash` to `^4.17.19` following [this advisory](https://www.npmjs.com/advisories/1523).
- 1.1.3 (2019-11-03)
- Security fix: bumped `lodash` to `^4.17.15`. See [vulnerabilty reports](https://snyk.io/vuln/search?q=lodash&type=npm).
*(Thanks to @daniel-nagy for pull request [#20](https://github.com/request/promise-core/pull/20) and thanks to @quetzaluz for reporting this in issue [#21](https://github.com/request/promise-core/issues/21).)*
- 1.1.2 (2019-02-14)
- Security fix: bumped `lodash` to `^4.17.11`. See [vulnerabilty reports](https://snyk.io/vuln/search?q=lodash&type=npm).
*(Thanks to @lucaswillering and @sam-warren-finnair for reporting this in issues [#12](https://github.com/request/promise-core/issues/12) and [#13](https://github.com/request/promise-core/issues/13) and thanks to @Alec321 for pull request [#14](https://github.com/request/promise-core/pull/14).)*
- 1.1.1 (2016-08-08)
- Renamed package to `request-promise-core` because there were [too](https://github.com/request/request-promise/issues/137) [many](https://github.com/request/request-promise/issues/141) issues with the scoped package name `@request/promise-core`
- 1.1.0 (2016-07-30)
- Added `constructorMixin` option to enable [request/request-promise#123](https://github.com/request/request-promise/pull/123)
- 1.0.0 (2016-07-15)
- All tests green, ready for prime time
- 1.0.0-rc.1 (2016-07-10)
- Reimplementation of core logic based on `[email protected]`
- Plus `transform2xxOnly` option (fixes [request/request-promise#131](https://github.com/request/request-promise/issues/131))
## License (ISC)
In case you never heard about the [ISC license](http://en.wikipedia.org/wiki/ISC_license) it is functionally equivalent to the MIT license.
See the [LICENSE file](LICENSE) for details.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/request-promise-core/README.md | README.md |
'use strict';
var errors = require('request-promise-core/lib/errors.js'),
isFunction = require('lodash/isFunction'),
isObjectLike = require('lodash/isObjectLike'),
isString = require('lodash/isString'),
isUndefined = require('lodash/isUndefined');
module.exports = function (options) {
var errorText = 'Please verify options'; // For better minification because this string is repeating
if (!isObjectLike(options)) {
throw new TypeError(errorText);
}
if (!isFunction(options.PromiseImpl)) {
throw new TypeError(errorText + '.PromiseImpl');
}
if (!isUndefined(options.constructorMixin) && !isFunction(options.constructorMixin)) {
throw new TypeError(errorText + '.PromiseImpl');
}
var PromiseImpl = options.PromiseImpl;
var constructorMixin = options.constructorMixin;
var plumbing = {};
plumbing.init = function (requestOptions) {
var self = this;
self._rp_promise = new PromiseImpl(function (resolve, reject) {
self._rp_resolve = resolve;
self._rp_reject = reject;
if (constructorMixin) {
constructorMixin.apply(self, arguments); // Using arguments since specific Promise libraries may pass additional parameters
}
});
self._rp_callbackOrig = requestOptions.callback;
requestOptions.callback = self.callback = function RP$callback(err, response, body) {
plumbing.callback.call(self, err, response, body);
};
if (isString(requestOptions.method)) {
requestOptions.method = requestOptions.method.toUpperCase();
}
requestOptions.transform = requestOptions.transform || plumbing.defaultTransformations[requestOptions.method];
self._rp_options = requestOptions;
self._rp_options.simple = requestOptions.simple !== false;
self._rp_options.resolveWithFullResponse = requestOptions.resolveWithFullResponse === true;
self._rp_options.transform2xxOnly = requestOptions.transform2xxOnly === true;
};
plumbing.defaultTransformations = {
HEAD: function (body, response, resolveWithFullResponse) {
return resolveWithFullResponse ? response : response.headers;
}
};
plumbing.callback = function (err, response, body) {
var self = this;
var origCallbackThrewException = false, thrownException = null;
if (isFunction(self._rp_callbackOrig)) {
try {
self._rp_callbackOrig.apply(self, arguments); // TODO: Apply to self mimics behavior of request@2. Is that also right for request@next?
} catch (e) {
origCallbackThrewException = true;
thrownException = e;
}
}
var is2xx = !err && /^2/.test('' + response.statusCode);
if (err) {
self._rp_reject(new errors.RequestError(err, self._rp_options, response));
} else if (self._rp_options.simple && !is2xx) {
if (isFunction(self._rp_options.transform) && self._rp_options.transform2xxOnly === false) {
(new PromiseImpl(function (resolve) {
resolve(self._rp_options.transform(body, response, self._rp_options.resolveWithFullResponse)); // transform may return a Promise
}))
.then(function (transformedResponse) {
self._rp_reject(new errors.StatusCodeError(response.statusCode, body, self._rp_options, transformedResponse));
})
.catch(function (transformErr) {
self._rp_reject(new errors.TransformError(transformErr, self._rp_options, response));
});
} else {
self._rp_reject(new errors.StatusCodeError(response.statusCode, body, self._rp_options, response));
}
} else {
if (isFunction(self._rp_options.transform) && (is2xx || self._rp_options.transform2xxOnly === false)) {
(new PromiseImpl(function (resolve) {
resolve(self._rp_options.transform(body, response, self._rp_options.resolveWithFullResponse)); // transform may return a Promise
}))
.then(function (transformedResponse) {
self._rp_resolve(transformedResponse);
})
.catch(function (transformErr) {
self._rp_reject(new errors.TransformError(transformErr, self._rp_options, response));
});
} else if (self._rp_options.resolveWithFullResponse) {
self._rp_resolve(response);
} else {
self._rp_resolve(body);
}
}
if (origCallbackThrewException) {
throw thrownException;
}
};
plumbing.exposePromiseMethod = function (exposeTo, bindTo, promisePropertyKey, methodToExpose, exposeAs) {
exposeAs = exposeAs || methodToExpose;
if (exposeAs in exposeTo) {
throw new Error('Unable to expose method "' + exposeAs + '"');
}
exposeTo[exposeAs] = function RP$exposed() {
var self = bindTo || this;
return self[promisePropertyKey][methodToExpose].apply(self[promisePropertyKey], arguments);
};
};
plumbing.exposePromise = function (exposeTo, bindTo, promisePropertyKey, exposeAs) {
exposeAs = exposeAs || 'promise';
if (exposeAs in exposeTo) {
throw new Error('Unable to expose method "' + exposeAs + '"');
}
exposeTo[exposeAs] = function RP$promise() {
var self = bindTo || this;
return self[promisePropertyKey];
};
};
return plumbing;
}; | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/request-promise-core/lib/plumbing.js | plumbing.js |
"use strict";
/**
* Character classes for XML.
*
* @deprecated since 1.3.0. Import from the ``xml`` and ``xmlns`` hierarchies
* instead.
*
* @author Louis-Dominique Dubeau
* @license MIT
* @copyright Louis-Dominique Dubeau
*/
Object.defineProperty(exports, "__esModule", { value: true });
var ed4 = require("xmlchars/xml/1.0/ed4");
var ed5 = require("xmlchars/xml/1.0/ed5");
var nsed3 = require("xmlchars/xmlns/1.0/ed3");
// tslint:disable-next-line:no-console
console.warn("DEPRECATION WARNING: the xmlchar *module* is deprecated: please \
replace e.g. require('xmlchars') with require('xmlchars/xml/...')");
/**
* Character class utilities for XML 1.0.
*/
// tslint:disable-next-line:no-namespace
var XML_1_0;
(function (XML_1_0) {
/**
* Fifth edition.
*/
var ED5;
(function (ED5) {
/**
* Regular expression fragments. These fragments are designed to be included
* inside square brackets in a regular expression.
*/
var fragments;
(function (fragments) {
fragments.CHAR = ed5.CHAR;
fragments.S = ed5.S;
fragments.NAME_START_CHAR = ed5.NAME_START_CHAR;
fragments.NAME_CHAR = ed5.NAME_CHAR;
})(fragments = ED5.fragments || (ED5.fragments = {}));
/**
* Regular expression. These correspond to the productions of the same name
* in the specification.
*/
var regexes;
(function (regexes) {
regexes.CHAR = ed5.CHAR_RE;
regexes.S = ed5.S_RE;
regexes.NAME_START_CHAR = ed5.NAME_START_CHAR_RE;
regexes.NAME_CHAR = ed5.NAME_CHAR_RE;
regexes.NAME = ed5.NAME_RE;
regexes.NMTOKEN = ed5.NMTOKEN_RE;
})(regexes = ED5.regexes || (ED5.regexes = {}));
/**
* Lists of characters.
*
* The names defined in this namespace are arrays of codepoints which
* contain the set of codepoints that an XML production encompasses. Note
* that many productions are too large to be reasonably represented as sets.
*/
var lists;
(function (lists) {
lists.S = ed5.S_LIST;
})(lists = ED5.lists || (ED5.lists = {}));
/**
* Determines whether a codepoint matches the ``CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR``.
*/
ED5.isChar = ed5.isChar;
/**
* Determines whether a codepoint matches the ``S`` (space) production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``S``.
*/
ED5.isS = ed5.isS;
/**
* Determines whether a codepoint matches the ``NAME_START_CHAR``
* production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_START_CHAR``.
*/
ED5.isNameStartChar = ed5.isNameStartChar;
/**
* Determines whether a codepoint matches the ``NAME_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_CHAR``.
*/
ED5.isNameChar = ed5.isNameChar;
})(ED5 = XML_1_0.ED5 || (XML_1_0.ED5 = {}));
/**
* Fourth edition. These are deprecated in the 5th edition but some of the
* standards related to XML 1.0 (e.g. XML Schema 1.0) refer to these. So they
* are still generally useful.
*/
var ED4;
(function (ED4) {
/**
* Regular expression fragments. These fragments are designed to be included
* inside square brackets in a regular expression.
*/
var fragments;
(function (fragments) {
fragments.CHAR = ed4.CHAR;
fragments.S = ed4.S;
fragments.BASE_CHAR = ed4.BASE_CHAR;
fragments.IDEOGRAPHIC = ed4.IDEOGRAPHIC;
fragments.COMBINING_CHAR = ed4.COMBINING_CHAR;
fragments.DIGIT = ed4.DIGIT;
fragments.EXTENDER = ed4.EXTENDER;
fragments.LETTER = ed4.LETTER;
fragments.NAME_CHAR = ed4.NAME_CHAR;
})(fragments = ED4.fragments || (ED4.fragments = {}));
/**
* Regular expression. These correspond to the productions of the same
* name in the specification.
*/
var regexes;
(function (regexes) {
regexes.CHAR = ed4.CHAR_RE;
regexes.S = ed4.S_RE;
regexes.BASE_CHAR = ed4.BASE_CHAR_RE;
regexes.IDEOGRAPHIC = ed4.IDEOGRAPHIC_RE;
regexes.COMBINING_CHAR = ed4.COMBINING_CHAR_RE;
regexes.DIGIT = ed4.DIGIT_RE;
regexes.EXTENDER = ed4.EXTENDER_RE;
regexes.LETTER = ed4.LETTER_RE;
regexes.NAME_CHAR = ed4.NAME_CHAR_RE;
regexes.NAME = ed4.NAME_RE;
regexes.NMTOKEN = ed4.NMTOKEN_RE;
})(regexes = ED4.regexes || (ED4.regexes = {}));
})(ED4 = XML_1_0.ED4 || (XML_1_0.ED4 = {}));
})(XML_1_0 = exports.XML_1_0 || (exports.XML_1_0 = {}));
/**
* Character class utilities for XML NS 1.0.
*/
// tslint:disable-next-line:no-namespace
var XMLNS_1_0;
(function (XMLNS_1_0) {
/**
* Third edition.
*/
var ED3;
(function (ED3) {
/**
* Regular expression fragments. These fragments are designed to be included
* inside square brackets in a regular expression.
*/
var fragments;
(function (fragments) {
fragments.NC_NAME_START_CHAR = nsed3.NC_NAME_START_CHAR;
fragments.NC_NAME_CHAR = nsed3.NC_NAME_CHAR;
})(fragments = ED3.fragments || (ED3.fragments = {}));
/**
* Regular expression. These correspond to the productions of the same name
* in the specification.
*/
var regexes;
(function (regexes) {
regexes.NC_NAME_START_CHAR = nsed3.NC_NAME_START_CHAR_RE;
regexes.NC_NAME_CHAR = nsed3.NC_NAME_CHAR_RE;
regexes.NC_NAME = nsed3.NC_NAME_RE;
})(regexes = ED3.regexes || (ED3.regexes = {}));
/**
* Determines whether a codepoint matches
* [[regexes.NC_NAME_START_CHAR]].
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches.
*/
ED3.isNCNameStartChar = nsed3.isNCNameStartChar;
/**
* Determines whether a codepoint matches [[regexes.NC_NAME_CHAR]].
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches.
*/
ED3.isNCNameChar = nsed3.isNCNameChar;
})(ED3 = XMLNS_1_0.ED3 || (XMLNS_1_0.ED3 = {}));
})(XMLNS_1_0 = exports.XMLNS_1_0 || (exports.XMLNS_1_0 = {}));
//# sourceMappingURL=xmlchars.js.map | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xmlchars.js | xmlchars.js |
import * as ed5 from "xmlchars/xml/1.0/ed5";
import * as nsed3 from "xmlchars/xmlns/1.0/ed3";
/**
* Character class utilities for XML 1.0.
*/
export declare namespace XML_1_0 {
/**
* Fifth edition.
*/
namespace ED5 {
/**
* Regular expression fragments. These fragments are designed to be included
* inside square brackets in a regular expression.
*/
namespace fragments {
const CHAR = "\t\n\r -\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
const S = " \t\r\n";
const NAME_START_CHAR = ":A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\uD800\uDC00-\uDB7F\uDFFF";
const NAME_CHAR: string;
}
/**
* Regular expression. These correspond to the productions of the same name
* in the specification.
*/
namespace regexes {
const CHAR: RegExp;
const S: RegExp;
const NAME_START_CHAR: RegExp;
const NAME_CHAR: RegExp;
const NAME: RegExp;
const NMTOKEN: RegExp;
}
/**
* Lists of characters.
*
* The names defined in this namespace are arrays of codepoints which
* contain the set of codepoints that an XML production encompasses. Note
* that many productions are too large to be reasonably represented as sets.
*/
namespace lists {
const S: number[];
}
/**
* Determines whether a codepoint matches the ``CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR``.
*/
const isChar: typeof ed5.isChar;
/**
* Determines whether a codepoint matches the ``S`` (space) production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``S``.
*/
const isS: typeof ed5.isS;
/**
* Determines whether a codepoint matches the ``NAME_START_CHAR``
* production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_START_CHAR``.
*/
const isNameStartChar: typeof ed5.isNameStartChar;
/**
* Determines whether a codepoint matches the ``NAME_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_CHAR``.
*/
const isNameChar: typeof ed5.isNameChar;
}
/**
* Fourth edition. These are deprecated in the 5th edition but some of the
* standards related to XML 1.0 (e.g. XML Schema 1.0) refer to these. So they
* are still generally useful.
*/
namespace ED4 {
/**
* Regular expression fragments. These fragments are designed to be included
* inside square brackets in a regular expression.
*/
namespace fragments {
const CHAR = "\t\n\r -\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
const S = " \t\r\n";
const BASE_CHAR = "A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF\u0100-\u0131\u0134-\u013E\u0141-\u0148\u014A-\u017E\u0180-\u01C3\u01CD-\u01F0\u01F4-\u01F5\u01FA-\u0217\u0250-\u02A8\u02BB-\u02C1\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03D6\u03DA\u03DC\u03DE\u03E0\u03E2-\u03F3\u0401-\u040C\u040E-\u044F\u0451-\u045C\u045E-\u0481\u0490-\u04C4\u04C7-\u04C8\u04CB-\u04CC\u04D0-\u04EB\u04EE-\u04F5\u04F8-\u04F9\u0531-\u0556\u0559\u0561-\u0586\u05D0-\u05EA\u05F0-\u05F2\u0621-\u063A\u0641-\u064A\u0671-\u06B7\u06BA-\u06BE\u06C0-\u06CE\u06D0-\u06D3\u06D5\u06E5-\u06E6\u0905-\u0939\u093D\u0958-\u0961\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09DC-\u09DD\u09DF-\u09E1\u09F0-\u09F1\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8B\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABD\u0AE0\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B36-\u0B39\u0B3D\u0B5C-\u0B5D\u0B5F-\u0B61\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB5\u0BB7-\u0BB9\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C60-\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CDE\u0CE0-\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D60-\u0D61\u0E01-\u0E2E\u0E30\u0E32-\u0E33\u0E40-\u0E45\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EAE\u0EB0\u0EB2-\u0EB3\u0EBD\u0EC0-\u0EC4\u0F40-\u0F47\u0F49-\u0F69\u10A0-\u10C5\u10D0-\u10F6\u1100\u1102-\u1103\u1105-\u1107\u1109\u110B-\u110C\u110E-\u1112\u113C\u113E\u1140\u114C\u114E\u1150\u1154-\u1155\u1159\u115F-\u1161\u1163\u1165\u1167\u1169\u116D-\u116E\u1172-\u1173\u1175\u119E\u11A8\u11AB\u11AE-\u11AF\u11B7-\u11B8\u11BA\u11BC-\u11C2\u11EB\u11F0\u11F9\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2126\u212A-\u212B\u212E\u2180-\u2182\u3041-\u3094\u30A1-\u30FA\u3105-\u312C\uAC00-\uD7A3";
const IDEOGRAPHIC = "\u4E00-\u9FA5\u3007\u3021-\u3029";
const COMBINING_CHAR = "\u0300-\u0345\u0360-\u0361\u0483-\u0486\u0591-\u05A1\u05A3-\u05B9\u05BB-\u05BD\u05BF\u05C1-\u05C2\u05C4\u064B-\u0652\u0670\u06D6-\u06DC\u06DD-\u06DF\u06E0-\u06E4\u06E7-\u06E8\u06EA-\u06ED\u0901-\u0903\u093C\u093E-\u094C\u094D\u0951-\u0954\u0962-\u0963\u0981-\u0983\u09BC\u09BE\u09BF\u09C0-\u09C4\u09C7-\u09C8\u09CB-\u09CD\u09D7\u09E2-\u09E3\u0A02\u0A3C\u0A3E\u0A3F\u0A40-\u0A42\u0A47-\u0A48\u0A4B-\u0A4D\u0A70-\u0A71\u0A81-\u0A83\u0ABC\u0ABE-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0B01-\u0B03\u0B3C\u0B3E-\u0B43\u0B47-\u0B48\u0B4B-\u0B4D\u0B56-\u0B57\u0B82-\u0B83\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD7\u0C01-\u0C03\u0C3E-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55-\u0C56\u0C82-\u0C83\u0CBE-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5-\u0CD6\u0D02-\u0D03\u0D3E-\u0D43\u0D46-\u0D48\u0D4A-\u0D4D\u0D57\u0E31\u0E34-\u0E3A\u0E47-\u0E4E\u0EB1\u0EB4-\u0EB9\u0EBB-\u0EBC\u0EC8-\u0ECD\u0F18-\u0F19\u0F35\u0F37\u0F39\u0F3E\u0F3F\u0F71-\u0F84\u0F86-\u0F8B\u0F90-\u0F95\u0F97\u0F99-\u0FAD\u0FB1-\u0FB7\u0FB9\u20D0-\u20DC\u20E1\u302A-\u302F\u3099\u309A";
const DIGIT = "0-9\u0660-\u0669\u06F0-\u06F9\u0966-\u096F\u09E6-\u09EF\u0A66-\u0A6F\u0AE6-\u0AEF\u0B66-\u0B6F\u0BE7-\u0BEF\u0C66-\u0C6F\u0CE6-\u0CEF\u0D66-\u0D6F\u0E50-\u0E59\u0ED0-\u0ED9\u0F20-\u0F29";
const EXTENDER = "\u00B7\u02D0\u02D1\u0387\u0640\u0E46\u0EC6\u3005\u3031-\u3035\u309D-\u309E\u30FC-\u30FE";
const LETTER: string;
const NAME_CHAR: string;
}
/**
* Regular expression. These correspond to the productions of the same
* name in the specification.
*/
namespace regexes {
const CHAR: RegExp;
const S: RegExp;
const BASE_CHAR: RegExp;
const IDEOGRAPHIC: RegExp;
const COMBINING_CHAR: RegExp;
const DIGIT: RegExp;
const EXTENDER: RegExp;
const LETTER: RegExp;
const NAME_CHAR: RegExp;
const NAME: RegExp;
const NMTOKEN: RegExp;
}
}
}
/**
* Character class utilities for XML NS 1.0.
*/
export declare namespace XMLNS_1_0 {
/**
* Third edition.
*/
namespace ED3 {
/**
* Regular expression fragments. These fragments are designed to be included
* inside square brackets in a regular expression.
*/
namespace fragments {
const NC_NAME_START_CHAR = "A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\uD800\uDC00-\uDB7F\uDFFF";
const NC_NAME_CHAR: string;
}
/**
* Regular expression. These correspond to the productions of the same name
* in the specification.
*/
namespace regexes {
const NC_NAME_START_CHAR: RegExp;
const NC_NAME_CHAR: RegExp;
const NC_NAME: RegExp;
}
/**
* Determines whether a codepoint matches
* [[regexes.NC_NAME_START_CHAR]].
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches.
*/
const isNCNameStartChar: typeof nsed3.isNCNameStartChar;
/**
* Determines whether a codepoint matches [[regexes.NC_NAME_CHAR]].
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches.
*/
const isNCNameChar: typeof nsed3.isNCNameChar;
}
} | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xmlchars.d.ts | xmlchars.d.ts |
"use strict";
/**
* Character class utilities for XML NS 1.0 edition 3.
*
* @author Louis-Dominique Dubeau
* @license MIT
* @copyright Louis-Dominique Dubeau
*/
Object.defineProperty(exports, "__esModule", { value: true });
//
// Fragments.
//
// tslint:disable-next-line:max-line-length
exports.NC_NAME_START_CHAR = "A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\uD800\uDC00-\uDB7F\uDFFF";
exports.NC_NAME_CHAR = "-" + exports.NC_NAME_START_CHAR + ".0-9\u00B7\u0300-\u036F\u203F-\u2040";
//
// Regular expressions.
//
exports.NC_NAME_START_CHAR_RE = new RegExp("^[" + exports.NC_NAME_START_CHAR + "]$", "u");
exports.NC_NAME_CHAR_RE = new RegExp("^[" + exports.NC_NAME_CHAR + "]$", "u");
exports.NC_NAME_RE = new RegExp("^[" + exports.NC_NAME_START_CHAR + "][" + exports.NC_NAME_CHAR + "]*$", "u");
/**
* Determines whether a codepoint matches [[NC_NAME_START_CHAR]].
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches.
*/
// tslint:disable-next-line:cyclomatic-complexity
function isNCNameStartChar(c) {
return ((c >= 0x41 && c <= 0x5A) ||
c === 0x5F ||
(c >= 0x61 && c <= 0x7A) ||
(c >= 0xC0 && c <= 0xD6) ||
(c >= 0xD8 && c <= 0xF6) ||
(c >= 0x00F8 && c <= 0x02FF) ||
(c >= 0x0370 && c <= 0x037D) ||
(c >= 0x037F && c <= 0x1FFF) ||
(c >= 0x200C && c <= 0x200D) ||
(c >= 0x2070 && c <= 0x218F) ||
(c >= 0x2C00 && c <= 0x2FEF) ||
(c >= 0x3001 && c <= 0xD7FF) ||
(c >= 0xF900 && c <= 0xFDCF) ||
(c >= 0xFDF0 && c <= 0xFFFD) ||
(c >= 0x10000 && c <= 0xEFFFF));
}
exports.isNCNameStartChar = isNCNameStartChar;
/**
* Determines whether a codepoint matches [[NC_NAME_CHAR]].
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches.
*/
function isNCNameChar(c) {
return isNCNameStartChar(c) ||
(c === 0x2D ||
c === 0x2E ||
(c >= 0x30 && c <= 0x39) ||
c === 0x00B7 ||
(c >= 0x0300 && c <= 0x036F) ||
(c >= 0x203F && c <= 0x2040));
}
exports.isNCNameChar = isNCNameChar;
//# sourceMappingURL=ed3.js.map | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xmlns/1.0/ed3.js | ed3.js |
"use strict";
/**
* Character classes and associated utilities for the 2nd edition of XML 1.1.
*
* @author Louis-Dominique Dubeau
* @license MIT
* @copyright Louis-Dominique Dubeau
*/
Object.defineProperty(exports, "__esModule", { value: true });
//
// Fragments.
//
exports.CHAR = "\u0001-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
exports.RESTRICTED_CHAR = "\u0001-\u0008\u000B\u000C\u000E-\u001F\u007F-\u0084\u0086-\u009F";
exports.S = " \t\r\n";
// tslint:disable-next-line:max-line-length
exports.NAME_START_CHAR = ":A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\uD800\uDC00-\uDB7F\uDFFF";
exports.NAME_CHAR = "-" + exports.NAME_START_CHAR + ".0-9\u00B7\u0300-\u036F\u203F-\u2040";
//
// Regular expressions.
//
exports.CHAR_RE = new RegExp("^[" + exports.CHAR + "]$", "u");
exports.RESTRICTED_CHAR_RE = new RegExp("^[" + exports.RESTRICTED_CHAR + "]$", "u");
exports.S_RE = new RegExp("^[" + exports.S + "]+$", "u");
exports.NAME_START_CHAR_RE = new RegExp("^[" + exports.NAME_START_CHAR + "]$", "u");
exports.NAME_CHAR_RE = new RegExp("^[" + exports.NAME_CHAR + "]$", "u");
exports.NAME_RE = new RegExp("^[" + exports.NAME_START_CHAR + "][" + exports.NAME_CHAR + "]*$", "u");
exports.NMTOKEN_RE = new RegExp("^[" + exports.NAME_CHAR + "]+$", "u");
var TAB = 9;
var NL = 0xA;
var CR = 0xD;
var SPACE = 0x20;
//
// Lists.
//
/** All characters in the ``S`` production. */
exports.S_LIST = [SPACE, NL, CR, TAB];
/**
* Determines whether a codepoint matches the ``CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR``.
*/
function isChar(c) {
return (c >= 0x0001 && c <= 0xD7FF) ||
(c >= 0xE000 && c <= 0xFFFD) ||
(c >= 0x10000 && c <= 0x10FFFF);
}
exports.isChar = isChar;
/**
* Determines whether a codepoint matches the ``RESTRICTED_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``RESTRICTED_CHAR``.
*/
function isRestrictedChar(c) {
return (c >= 0x1 && c <= 0x8) ||
c === 0xB ||
c === 0xC ||
(c >= 0xE && c <= 0x1F) ||
(c >= 0x7F && c <= 0x84) ||
(c >= 0x86 && c <= 0x9F);
}
exports.isRestrictedChar = isRestrictedChar;
/**
* Determines whether a codepoint matches the ``CHAR`` production and does not
* match the ``RESTRICTED_CHAR`` production. ``isCharAndNotRestricted(x)`` is
* equivalent to ``isChar(x) && !isRestrictedChar(x)``. This function is faster
* than running the two-call equivalent.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR`` and does not match
* ``RESTRICTED_CHAR``.
*/
function isCharAndNotRestricted(c) {
return (c === 0x9) ||
(c === 0xA) ||
(c === 0xD) ||
(c > 0x1F && c < 0x7F) ||
(c === 0x85) ||
(c > 0x9F && c <= 0xD7FF) ||
(c >= 0xE000 && c <= 0xFFFD) ||
(c >= 0x10000 && c <= 0x10FFFF);
}
exports.isCharAndNotRestricted = isCharAndNotRestricted;
/**
* Determines whether a codepoint matches the ``S`` (space) production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``S``.
*/
function isS(c) {
return c === SPACE || c === NL || c === CR || c === TAB;
}
exports.isS = isS;
/**
* Determines whether a codepoint matches the ``NAME_START_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_START_CHAR``.
*/
// tslint:disable-next-line:cyclomatic-complexity
function isNameStartChar(c) {
return ((c >= 0x41 && c <= 0x5A) ||
(c >= 0x61 && c <= 0x7A) ||
c === 0x3A ||
c === 0x5F ||
c === 0x200C ||
c === 0x200D ||
(c >= 0xC0 && c <= 0xD6) ||
(c >= 0xD8 && c <= 0xF6) ||
(c >= 0x00F8 && c <= 0x02FF) ||
(c >= 0x0370 && c <= 0x037D) ||
(c >= 0x037F && c <= 0x1FFF) ||
(c >= 0x2070 && c <= 0x218F) ||
(c >= 0x2C00 && c <= 0x2FEF) ||
(c >= 0x3001 && c <= 0xD7FF) ||
(c >= 0xF900 && c <= 0xFDCF) ||
(c >= 0xFDF0 && c <= 0xFFFD) ||
(c >= 0x10000 && c <= 0xEFFFF));
}
exports.isNameStartChar = isNameStartChar;
/**
* Determines whether a codepoint matches the ``NAME_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_CHAR``.
*/
function isNameChar(c) {
return isNameStartChar(c) ||
(c >= 0x30 && c <= 0x39) ||
c === 0x2D ||
c === 0x2E ||
c === 0xB7 ||
(c >= 0x0300 && c <= 0x036F) ||
(c >= 0x203F && c <= 0x2040);
}
exports.isNameChar = isNameChar;
//# sourceMappingURL=ed2.js.map | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xml/1.1/ed2.js | ed2.js |
export declare const CHAR = "\u0001-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
export declare const RESTRICTED_CHAR = "\u0001-\b\v\f\u000E-\u001F-\u0084\u0086-\u009F";
export declare const S = " \t\r\n";
export declare const NAME_START_CHAR = ":A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\uD800\uDC00-\uDB7F\uDFFF";
export declare const NAME_CHAR: string;
export declare const CHAR_RE: RegExp;
export declare const RESTRICTED_CHAR_RE: RegExp;
export declare const S_RE: RegExp;
export declare const NAME_START_CHAR_RE: RegExp;
export declare const NAME_CHAR_RE: RegExp;
export declare const NAME_RE: RegExp;
export declare const NMTOKEN_RE: RegExp;
/** All characters in the ``S`` production. */
export declare const S_LIST: number[];
/**
* Determines whether a codepoint matches the ``CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR``.
*/
export declare function isChar(c: number): boolean;
/**
* Determines whether a codepoint matches the ``RESTRICTED_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``RESTRICTED_CHAR``.
*/
export declare function isRestrictedChar(c: number): boolean;
/**
* Determines whether a codepoint matches the ``CHAR`` production and does not
* match the ``RESTRICTED_CHAR`` production. ``isCharAndNotRestricted(x)`` is
* equivalent to ``isChar(x) && !isRestrictedChar(x)``. This function is faster
* than running the two-call equivalent.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR`` and does not match
* ``RESTRICTED_CHAR``.
*/
export declare function isCharAndNotRestricted(c: number): boolean;
/**
* Determines whether a codepoint matches the ``S`` (space) production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``S``.
*/
export declare function isS(c: number): boolean;
/**
* Determines whether a codepoint matches the ``NAME_START_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_START_CHAR``.
*/
export declare function isNameStartChar(c: number): boolean;
/**
* Determines whether a codepoint matches the ``NAME_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_CHAR``.
*/
export declare function isNameChar(c: number): boolean; | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xml/1.1/ed2.d.ts | ed2.d.ts |
export declare const CHAR = "\t\n\r -\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
export declare const S = " \t\r\n";
export declare const BASE_CHAR = "A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF\u0100-\u0131\u0134-\u013E\u0141-\u0148\u014A-\u017E\u0180-\u01C3\u01CD-\u01F0\u01F4-\u01F5\u01FA-\u0217\u0250-\u02A8\u02BB-\u02C1\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03D6\u03DA\u03DC\u03DE\u03E0\u03E2-\u03F3\u0401-\u040C\u040E-\u044F\u0451-\u045C\u045E-\u0481\u0490-\u04C4\u04C7-\u04C8\u04CB-\u04CC\u04D0-\u04EB\u04EE-\u04F5\u04F8-\u04F9\u0531-\u0556\u0559\u0561-\u0586\u05D0-\u05EA\u05F0-\u05F2\u0621-\u063A\u0641-\u064A\u0671-\u06B7\u06BA-\u06BE\u06C0-\u06CE\u06D0-\u06D3\u06D5\u06E5-\u06E6\u0905-\u0939\u093D\u0958-\u0961\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09DC-\u09DD\u09DF-\u09E1\u09F0-\u09F1\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8B\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABD\u0AE0\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B36-\u0B39\u0B3D\u0B5C-\u0B5D\u0B5F-\u0B61\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB5\u0BB7-\u0BB9\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C60-\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CDE\u0CE0-\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D60-\u0D61\u0E01-\u0E2E\u0E30\u0E32-\u0E33\u0E40-\u0E45\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EAE\u0EB0\u0EB2-\u0EB3\u0EBD\u0EC0-\u0EC4\u0F40-\u0F47\u0F49-\u0F69\u10A0-\u10C5\u10D0-\u10F6\u1100\u1102-\u1103\u1105-\u1107\u1109\u110B-\u110C\u110E-\u1112\u113C\u113E\u1140\u114C\u114E\u1150\u1154-\u1155\u1159\u115F-\u1161\u1163\u1165\u1167\u1169\u116D-\u116E\u1172-\u1173\u1175\u119E\u11A8\u11AB\u11AE-\u11AF\u11B7-\u11B8\u11BA\u11BC-\u11C2\u11EB\u11F0\u11F9\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2126\u212A-\u212B\u212E\u2180-\u2182\u3041-\u3094\u30A1-\u30FA\u3105-\u312C\uAC00-\uD7A3";
export declare const IDEOGRAPHIC = "\u4E00-\u9FA5\u3007\u3021-\u3029";
export declare const COMBINING_CHAR = "\u0300-\u0345\u0360-\u0361\u0483-\u0486\u0591-\u05A1\u05A3-\u05B9\u05BB-\u05BD\u05BF\u05C1-\u05C2\u05C4\u064B-\u0652\u0670\u06D6-\u06DC\u06DD-\u06DF\u06E0-\u06E4\u06E7-\u06E8\u06EA-\u06ED\u0901-\u0903\u093C\u093E-\u094C\u094D\u0951-\u0954\u0962-\u0963\u0981-\u0983\u09BC\u09BE\u09BF\u09C0-\u09C4\u09C7-\u09C8\u09CB-\u09CD\u09D7\u09E2-\u09E3\u0A02\u0A3C\u0A3E\u0A3F\u0A40-\u0A42\u0A47-\u0A48\u0A4B-\u0A4D\u0A70-\u0A71\u0A81-\u0A83\u0ABC\u0ABE-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0B01-\u0B03\u0B3C\u0B3E-\u0B43\u0B47-\u0B48\u0B4B-\u0B4D\u0B56-\u0B57\u0B82-\u0B83\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD7\u0C01-\u0C03\u0C3E-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55-\u0C56\u0C82-\u0C83\u0CBE-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5-\u0CD6\u0D02-\u0D03\u0D3E-\u0D43\u0D46-\u0D48\u0D4A-\u0D4D\u0D57\u0E31\u0E34-\u0E3A\u0E47-\u0E4E\u0EB1\u0EB4-\u0EB9\u0EBB-\u0EBC\u0EC8-\u0ECD\u0F18-\u0F19\u0F35\u0F37\u0F39\u0F3E\u0F3F\u0F71-\u0F84\u0F86-\u0F8B\u0F90-\u0F95\u0F97\u0F99-\u0FAD\u0FB1-\u0FB7\u0FB9\u20D0-\u20DC\u20E1\u302A-\u302F\u3099\u309A";
export declare const DIGIT = "0-9\u0660-\u0669\u06F0-\u06F9\u0966-\u096F\u09E6-\u09EF\u0A66-\u0A6F\u0AE6-\u0AEF\u0B66-\u0B6F\u0BE7-\u0BEF\u0C66-\u0C6F\u0CE6-\u0CEF\u0D66-\u0D6F\u0E50-\u0E59\u0ED0-\u0ED9\u0F20-\u0F29";
export declare const EXTENDER = "\u00B7\u02D0\u02D1\u0387\u0640\u0E46\u0EC6\u3005\u3031-\u3035\u309D-\u309E\u30FC-\u30FE";
export declare const LETTER: string;
export declare const NAME_CHAR: string;
export declare const CHAR_RE: RegExp;
export declare const S_RE: RegExp;
export declare const BASE_CHAR_RE: RegExp;
export declare const IDEOGRAPHIC_RE: RegExp;
export declare const COMBINING_CHAR_RE: RegExp;
export declare const DIGIT_RE: RegExp;
export declare const EXTENDER_RE: RegExp;
export declare const LETTER_RE: RegExp;
export declare const NAME_CHAR_RE: RegExp;
export declare const NAME_RE: RegExp;
export declare const NMTOKEN_RE: RegExp; | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xml/1.0/ed4.d.ts | ed4.d.ts |
"use strict";
/**
* Character classes and associated utilities for the 4th edition of XML 1.0.
*
* These are deprecated in the 5th edition but some of the standards related to
* XML 1.0 (e.g. XML Schema 1.0) refer to these. So they are still generally
* useful.
*
* @author Louis-Dominique Dubeau
* @license MIT
* @copyright Louis-Dominique Dubeau
*/
Object.defineProperty(exports, "__esModule", { value: true });
//
// Fragments.
//
exports.CHAR = "\t\n\r -\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
exports.S = " \t\r\n";
// tslint:disable-next-line:missing-jsdoc max-line-length
exports.BASE_CHAR = "A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF\u0100-\u0131\u0134-\u013E\u0141-\u0148\u014A-\u017E\u0180-\u01C3\u01CD-\u01F0\u01F4-\u01F5\u01FA-\u0217\u0250-\u02A8\u02BB-\u02C1\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03D6\u03DA\u03DC\u03DE\u03E0\u03E2-\u03F3\u0401-\u040C\u040E-\u044F\u0451-\u045C\u045E-\u0481\u0490-\u04C4\u04C7-\u04C8\u04CB-\u04CC\u04D0-\u04EB\u04EE-\u04F5\u04F8-\u04F9\u0531-\u0556\u0559\u0561-\u0586\u05D0-\u05EA\u05F0-\u05F2\u0621-\u063A\u0641-\u064A\u0671-\u06B7\u06BA-\u06BE\u06C0-\u06CE\u06D0-\u06D3\u06D5\u06E5-\u06E6\u0905-\u0939\u093D\u0958-\u0961\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09DC-\u09DD\u09DF-\u09E1\u09F0-\u09F1\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8B\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABD\u0AE0\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B36-\u0B39\u0B3D\u0B5C-\u0B5D\u0B5F-\u0B61\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB5\u0BB7-\u0BB9\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C60-\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CDE\u0CE0-\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D60-\u0D61\u0E01-\u0E2E\u0E30\u0E32-\u0E33\u0E40-\u0E45\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EAE\u0EB0\u0EB2-\u0EB3\u0EBD\u0EC0-\u0EC4\u0F40-\u0F47\u0F49-\u0F69\u10A0-\u10C5\u10D0-\u10F6\u1100\u1102-\u1103\u1105-\u1107\u1109\u110B-\u110C\u110E-\u1112\u113C\u113E\u1140\u114C\u114E\u1150\u1154-\u1155\u1159\u115F-\u1161\u1163\u1165\u1167\u1169\u116D-\u116E\u1172-\u1173\u1175\u119E\u11A8\u11AB\u11AE-\u11AF\u11B7-\u11B8\u11BA\u11BC-\u11C2\u11EB\u11F0\u11F9\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2126\u212A-\u212B\u212E\u2180-\u2182\u3041-\u3094\u30A1-\u30FA\u3105-\u312C\uAC00-\uD7A3";
exports.IDEOGRAPHIC = "\u4E00-\u9FA5\u3007\u3021-\u3029";
// tslint:disable-next-line:missing-jsdoc max-line-length
exports.COMBINING_CHAR = "\u0300-\u0345\u0360-\u0361\u0483-\u0486\u0591-\u05A1\u05A3-\u05B9\u05BB-\u05BD\u05BF\u05C1-\u05C2\u05C4\u064B-\u0652\u0670\u06D6-\u06DC\u06DD-\u06DF\u06E0-\u06E4\u06E7-\u06E8\u06EA-\u06ED\u0901-\u0903\u093C\u093E-\u094C\u094D\u0951-\u0954\u0962-\u0963\u0981-\u0983\u09BC\u09BE\u09BF\u09C0-\u09C4\u09C7-\u09C8\u09CB-\u09CD\u09D7\u09E2-\u09E3\u0A02\u0A3C\u0A3E\u0A3F\u0A40-\u0A42\u0A47-\u0A48\u0A4B-\u0A4D\u0A70-\u0A71\u0A81-\u0A83\u0ABC\u0ABE-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0B01-\u0B03\u0B3C\u0B3E-\u0B43\u0B47-\u0B48\u0B4B-\u0B4D\u0B56-\u0B57\u0B82-\u0B83\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD7\u0C01-\u0C03\u0C3E-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55-\u0C56\u0C82-\u0C83\u0CBE-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5-\u0CD6\u0D02-\u0D03\u0D3E-\u0D43\u0D46-\u0D48\u0D4A-\u0D4D\u0D57\u0E31\u0E34-\u0E3A\u0E47-\u0E4E\u0EB1\u0EB4-\u0EB9\u0EBB-\u0EBC\u0EC8-\u0ECD\u0F18-\u0F19\u0F35\u0F37\u0F39\u0F3E\u0F3F\u0F71-\u0F84\u0F86-\u0F8B\u0F90-\u0F95\u0F97\u0F99-\u0FAD\u0FB1-\u0FB7\u0FB9\u20D0-\u20DC\u20E1\u302A-\u302F\u3099\u309A";
// tslint:disable-next-line:missing-jsdoc max-line-length
exports.DIGIT = "0-9\u0660-\u0669\u06F0-\u06F9\u0966-\u096F\u09E6-\u09EF\u0A66-\u0A6F\u0AE6-\u0AEF\u0B66-\u0B6F\u0BE7-\u0BEF\u0C66-\u0C6F\u0CE6-\u0CEF\u0D66-\u0D6F\u0E50-\u0E59\u0ED0-\u0ED9\u0F20-\u0F29";
// tslint:disable-next-line:missing-jsdoc max-line-length
exports.EXTENDER = "\u00B7\u02D0\u02D1\u0387\u0640\u0E46\u0EC6\u3005\u3031-\u3035\u309D-\u309E\u30FC-\u30FE";
exports.LETTER = exports.BASE_CHAR + exports.IDEOGRAPHIC;
exports.NAME_CHAR = "-" + exports.LETTER + exports.DIGIT + "._:" + exports.COMBINING_CHAR + exports.EXTENDER;
//
// Regular expressions.
//
exports.CHAR_RE = new RegExp("^[" + exports.CHAR + "]$", "u");
exports.S_RE = new RegExp("^[" + exports.S + "]+$", "u");
exports.BASE_CHAR_RE = new RegExp("^[" + exports.BASE_CHAR + "]$", "u");
exports.IDEOGRAPHIC_RE = new RegExp("^[" + exports.IDEOGRAPHIC + "]$", "u");
exports.COMBINING_CHAR_RE = new RegExp("^[" + exports.COMBINING_CHAR + "]$", "u");
exports.DIGIT_RE = new RegExp("^[" + exports.DIGIT + "]$", "u");
exports.EXTENDER_RE = new RegExp("^[" + exports.EXTENDER + "]$", "u");
exports.LETTER_RE = new RegExp("^[" + exports.LETTER + "]$", "u");
exports.NAME_CHAR_RE = new RegExp("^[" + exports.NAME_CHAR + "]$", "u");
exports.NAME_RE = new RegExp("^[" + exports.LETTER + "_:][" + exports.NAME_CHAR + "]*$", "u");
exports.NMTOKEN_RE = new RegExp("^[" + exports.NAME_CHAR + "]+$", "u");
//# sourceMappingURL=ed4.js.map | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xml/1.0/ed4.js | ed4.js |
"use strict";
/**
* Character classes and associated utilities for the 5th edition of XML 1.0.
*
* @author Louis-Dominique Dubeau
* @license MIT
* @copyright Louis-Dominique Dubeau
*/
Object.defineProperty(exports, "__esModule", { value: true });
//
// Fragments.
//
exports.CHAR = "\t\n\r -\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF";
exports.S = " \t\r\n";
// tslint:disable-next-line:max-line-length
exports.NAME_START_CHAR = ":A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\uD800\uDC00-\uDB7F\uDFFF";
exports.NAME_CHAR = "-" + exports.NAME_START_CHAR + ".0-9\u00B7\u0300-\u036F\u203F-\u2040";
//
// Regular expressions.
//
exports.CHAR_RE = new RegExp("^[" + exports.CHAR + "]$", "u");
exports.S_RE = new RegExp("^[" + exports.S + "]+$", "u");
exports.NAME_START_CHAR_RE = new RegExp("^[" + exports.NAME_START_CHAR + "]$", "u");
exports.NAME_CHAR_RE = new RegExp("^[" + exports.NAME_CHAR + "]$", "u");
exports.NAME_RE = new RegExp("^[" + exports.NAME_START_CHAR + "][" + exports.NAME_CHAR + "]*$", "u");
exports.NMTOKEN_RE = new RegExp("^[" + exports.NAME_CHAR + "]+$", "u");
var TAB = 9;
var NL = 0xA;
var CR = 0xD;
var SPACE = 0x20;
//
// Lists.
//
/** All characters in the ``S`` production. */
exports.S_LIST = [SPACE, NL, CR, TAB];
/**
* Determines whether a codepoint matches the ``CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``CHAR``.
*/
function isChar(c) {
return (c >= SPACE && c <= 0xD7FF) ||
c === NL || c === CR || c === TAB ||
(c >= 0xE000 && c <= 0xFFFD) ||
(c >= 0x10000 && c <= 0x10FFFF);
}
exports.isChar = isChar;
/**
* Determines whether a codepoint matches the ``S`` (space) production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``S``.
*/
function isS(c) {
return c === SPACE || c === NL || c === CR || c === TAB;
}
exports.isS = isS;
/**
* Determines whether a codepoint matches the ``NAME_START_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_START_CHAR``.
*/
function isNameStartChar(c) {
return ((c >= 0x41 && c <= 0x5A) ||
(c >= 0x61 && c <= 0x7A) ||
c === 0x3A ||
c === 0x5F ||
c === 0x200C ||
c === 0x200D ||
(c >= 0xC0 && c <= 0xD6) ||
(c >= 0xD8 && c <= 0xF6) ||
(c >= 0x00F8 && c <= 0x02FF) ||
(c >= 0x0370 && c <= 0x037D) ||
(c >= 0x037F && c <= 0x1FFF) ||
(c >= 0x2070 && c <= 0x218F) ||
(c >= 0x2C00 && c <= 0x2FEF) ||
(c >= 0x3001 && c <= 0xD7FF) ||
(c >= 0xF900 && c <= 0xFDCF) ||
(c >= 0xFDF0 && c <= 0xFFFD) ||
(c >= 0x10000 && c <= 0xEFFFF));
}
exports.isNameStartChar = isNameStartChar;
/**
* Determines whether a codepoint matches the ``NAME_CHAR`` production.
*
* @param c The code point.
*
* @returns ``true`` if the codepoint matches ``NAME_CHAR``.
*/
function isNameChar(c) {
return isNameStartChar(c) ||
(c >= 0x30 && c <= 0x39) ||
c === 0x2D ||
c === 0x2E ||
c === 0xB7 ||
(c >= 0x0300 && c <= 0x036F) ||
(c >= 0x203F && c <= 0x2040);
}
exports.isNameChar = isNameChar;
//# sourceMappingURL=ed5.js.map | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/xmlchars/xml/1.0/ed5.js | ed5.js |
# delayed-stream
Buffers events from a stream until you are ready to handle them.
## Installation
``` bash
npm install delayed-stream
```
## Usage
The following example shows how to write a http echo server that delays its
response by 1000 ms.
``` javascript
var DelayedStream = require('delayed-stream');
var http = require('http');
http.createServer(function(req, res) {
var delayed = DelayedStream.create(req);
setTimeout(function() {
res.writeHead(200);
delayed.pipe(res);
}, 1000);
});
```
If you are not using `Stream#pipe`, you can also manually release the buffered
events by calling `delayedStream.resume()`:
``` javascript
var delayed = DelayedStream.create(req);
setTimeout(function() {
// Emit all buffered events and resume underlaying source
delayed.resume();
}, 1000);
```
## Implementation
In order to use this meta stream properly, here are a few things you should
know about the implementation.
### Event Buffering / Proxying
All events of the `source` stream are hijacked by overwriting the `source.emit`
method. Until node implements a catch-all event listener, this is the only way.
However, delayed-stream still continues to emit all events it captures on the
`source`, regardless of whether you have released the delayed stream yet or
not.
Upon creation, delayed-stream captures all `source` events and stores them in
an internal event buffer. Once `delayedStream.release()` is called, all
buffered events are emitted on the `delayedStream`, and the event buffer is
cleared. After that, delayed-stream merely acts as a proxy for the underlaying
source.
### Error handling
Error events on `source` are buffered / proxied just like any other events.
However, `delayedStream.create` attaches a no-op `'error'` listener to the
`source`. This way you only have to handle errors on the `delayedStream`
object, rather than in two places.
### Buffer limits
delayed-stream provides a `maxDataSize` property that can be used to limit
the amount of data being buffered. In order to protect you from bad `source`
streams that don't react to `source.pause()`, this feature is enabled by
default.
## API
### DelayedStream.create(source, [options])
Returns a new `delayedStream`. Available options are:
* `pauseStream`
* `maxDataSize`
The description for those properties can be found below.
### delayedStream.source
The `source` stream managed by this object. This is useful if you are
passing your `delayedStream` around, and you still want to access properties
on the `source` object.
### delayedStream.pauseStream = true
Whether to pause the underlaying `source` when calling
`DelayedStream.create()`. Modifying this property afterwards has no effect.
### delayedStream.maxDataSize = 1024 * 1024
The amount of data to buffer before emitting an `error`.
If the underlaying source is emitting `Buffer` objects, the `maxDataSize`
refers to bytes.
If the underlaying source is emitting JavaScript strings, the size refers to
characters.
If you know what you are doing, you can set this property to `Infinity` to
disable this feature. You can also modify this property during runtime.
### delayedStream.dataSize = 0
The amount of data buffered so far.
### delayedStream.readable
An ECMA5 getter that returns the value of `source.readable`.
### delayedStream.resume()
If the `delayedStream` has not been released so far, `delayedStream.release()`
is called.
In either case, `source.resume()` is called.
### delayedStream.pause()
Calls `source.pause()`.
### delayedStream.pipe(dest)
Calls `delayedStream.resume()` and then proxies the arguments to `source.pipe`.
### delayedStream.release()
Emits and clears all events that have been buffered up so far. This does not
resume the underlaying source, use `delayedStream.resume()` instead.
## License
delayed-stream is licensed under the MIT license.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/delayed-stream/Readme.md | Readme.md |
var Stream = require('stream').Stream;
var util = require('util');
module.exports = DelayedStream;
function DelayedStream() {
this.source = null;
this.dataSize = 0;
this.maxDataSize = 1024 * 1024;
this.pauseStream = true;
this._maxDataSizeExceeded = false;
this._released = false;
this._bufferedEvents = [];
}
util.inherits(DelayedStream, Stream);
DelayedStream.create = function(source, options) {
var delayedStream = new this();
options = options || {};
for (var option in options) {
delayedStream[option] = options[option];
}
delayedStream.source = source;
var realEmit = source.emit;
source.emit = function() {
delayedStream._handleEmit(arguments);
return realEmit.apply(source, arguments);
};
source.on('error', function() {});
if (delayedStream.pauseStream) {
source.pause();
}
return delayedStream;
};
Object.defineProperty(DelayedStream.prototype, 'readable', {
configurable: true,
enumerable: true,
get: function() {
return this.source.readable;
}
});
DelayedStream.prototype.setEncoding = function() {
return this.source.setEncoding.apply(this.source, arguments);
};
DelayedStream.prototype.resume = function() {
if (!this._released) {
this.release();
}
this.source.resume();
};
DelayedStream.prototype.pause = function() {
this.source.pause();
};
DelayedStream.prototype.release = function() {
this._released = true;
this._bufferedEvents.forEach(function(args) {
this.emit.apply(this, args);
}.bind(this));
this._bufferedEvents = [];
};
DelayedStream.prototype.pipe = function() {
var r = Stream.prototype.pipe.apply(this, arguments);
this.resume();
return r;
};
DelayedStream.prototype._handleEmit = function(args) {
if (this._released) {
this.emit.apply(this, args);
return;
}
if (args[0] === 'data') {
this.dataSize += args[1].length;
this._checkIfMaxDataSizeExceeded();
}
this._bufferedEvents.push(args);
};
DelayedStream.prototype._checkIfMaxDataSizeExceeded = function() {
if (this._maxDataSizeExceeded) {
return;
}
if (this.dataSize <= this.maxDataSize) {
return;
}
this._maxDataSizeExceeded = true;
var message =
'DelayedStream#maxDataSize of ' + this.maxDataSize + ' bytes exceeded.'
this.emit('error', new Error(message));
}; | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/delayed-stream/lib/delayed_stream.js | delayed_stream.js |
This software is released under the MIT license:
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/is-typedarray/LICENSE.md | LICENSE.md |
# is-typedarray [](http://github.com/badges/stability-badges)
Detect whether or not an object is a
[Typed Array](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Typed_arrays).
## Usage
[](https://nodei.co/npm/is-typedarray/)
### isTypedArray(array)
Returns `true` when array is a Typed Array, and `false` when it is not.
## License
MIT. See [LICENSE.md](http://github.com/hughsk/is-typedarray/blob/master/LICENSE.md) for details.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/is-typedarray/README.md | README.md |
# extsprintf: extended POSIX-style sprintf
Stripped down version of s[n]printf(3c). We make a best effort to throw an
exception when given a format string we don't understand, rather than ignoring
it, so that we won't break existing programs if/when we go implement the rest
of this.
This implementation currently supports specifying
* field alignment ('-' flag),
* zero-pad ('0' flag)
* always show numeric sign ('+' flag),
* field width
* conversions for strings, decimal integers, and floats (numbers).
* argument size specifiers. These are all accepted but ignored, since
Javascript has no notion of the physical size of an argument.
Everything else is currently unsupported, most notably: precision, unsigned
numbers, non-decimal numbers, and characters.
Besides the usual POSIX conversions, this implementation supports:
* `%j`: pretty-print a JSON object (using node's "inspect")
* `%r`: pretty-print an Error object
# Example
First, install it:
# npm install extsprintf
Now, use it:
var mod_extsprintf = require('extsprintf');
console.log(mod_extsprintf.sprintf('hello %25s', 'world'));
outputs:
hello world
# Also supported
**printf**: same args as sprintf, but prints the result to stdout
**fprintf**: same args as sprintf, preceded by a Node stream. Prints the result
to the given stream.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/extsprintf/README.md | README.md |
var mod_assert = require('assert');
var mod_util = require('util');
/*
* Public interface
*/
exports.sprintf = jsSprintf;
exports.printf = jsPrintf;
exports.fprintf = jsFprintf;
/*
* Stripped down version of s[n]printf(3c). We make a best effort to throw an
* exception when given a format string we don't understand, rather than
* ignoring it, so that we won't break existing programs if/when we go implement
* the rest of this.
*
* This implementation currently supports specifying
* - field alignment ('-' flag),
* - zero-pad ('0' flag)
* - always show numeric sign ('+' flag),
* - field width
* - conversions for strings, decimal integers, and floats (numbers).
* - argument size specifiers. These are all accepted but ignored, since
* Javascript has no notion of the physical size of an argument.
*
* Everything else is currently unsupported, most notably precision, unsigned
* numbers, non-decimal numbers, and characters.
*/
function jsSprintf(fmt)
{
var regex = [
'([^%]*)', /* normal text */
'%', /* start of format */
'([\'\\-+ #0]*?)', /* flags (optional) */
'([1-9]\\d*)?', /* width (optional) */
'(\\.([1-9]\\d*))?', /* precision (optional) */
'[lhjztL]*?', /* length mods (ignored) */
'([diouxXfFeEgGaAcCsSp%jr])' /* conversion */
].join('');
var re = new RegExp(regex);
var args = Array.prototype.slice.call(arguments, 1);
var flags, width, precision, conversion;
var left, pad, sign, arg, match;
var ret = '';
var argn = 1;
mod_assert.equal('string', typeof (fmt));
while ((match = re.exec(fmt)) !== null) {
ret += match[1];
fmt = fmt.substring(match[0].length);
flags = match[2] || '';
width = match[3] || 0;
precision = match[4] || '';
conversion = match[6];
left = false;
sign = false;
pad = ' ';
if (conversion == '%') {
ret += '%';
continue;
}
if (args.length === 0)
throw (new Error('too few args to sprintf'));
arg = args.shift();
argn++;
if (flags.match(/[\' #]/))
throw (new Error(
'unsupported flags: ' + flags));
if (precision.length > 0)
throw (new Error(
'non-zero precision not supported'));
if (flags.match(/-/))
left = true;
if (flags.match(/0/))
pad = '0';
if (flags.match(/\+/))
sign = true;
switch (conversion) {
case 's':
if (arg === undefined || arg === null)
throw (new Error('argument ' + argn +
': attempted to print undefined or null ' +
'as a string'));
ret += doPad(pad, width, left, arg.toString());
break;
case 'd':
arg = Math.floor(arg);
/*jsl:fallthru*/
case 'f':
sign = sign && arg > 0 ? '+' : '';
ret += sign + doPad(pad, width, left,
arg.toString());
break;
case 'x':
ret += doPad(pad, width, left, arg.toString(16));
break;
case 'j': /* non-standard */
if (width === 0)
width = 10;
ret += mod_util.inspect(arg, false, width);
break;
case 'r': /* non-standard */
ret += dumpException(arg);
break;
default:
throw (new Error('unsupported conversion: ' +
conversion));
}
}
ret += fmt;
return (ret);
}
function jsPrintf() {
var args = Array.prototype.slice.call(arguments);
args.unshift(process.stdout);
jsFprintf.apply(null, args);
}
function jsFprintf(stream) {
var args = Array.prototype.slice.call(arguments, 1);
return (stream.write(jsSprintf.apply(this, args)));
}
function doPad(chr, width, left, str)
{
var ret = str;
while (ret.length < width) {
if (left)
ret += chr;
else
ret = chr + ret;
}
return (ret);
}
/*
* This function dumps long stack traces for exceptions having a cause() method.
* See node-verror for an example.
*/
function dumpException(ex)
{
var ret;
if (!(ex instanceof Error))
throw (new Error(jsSprintf('invalid type for %%r: %j', ex)));
/* Note that V8 prepends "ex.stack" with ex.toString(). */
ret = 'EXCEPTION: ' + ex.constructor.name + ': ' + ex.stack;
if (ex.cause && typeof (ex.cause) === 'function') {
var cex = ex.cause();
if (cex) {
ret += '\nCaused by: ' + dumpException(cex);
}
}
return (ret);
} | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/extsprintf/lib/extsprintf.js | extsprintf.js |
# HAR Schema [![version][npm-version]][npm-url] [![License][npm-license]][license-url]
> JSON Schema for HTTP Archive ([HAR][spec]).
[![Build Status][travis-image]][travis-url]
[![Downloads][npm-downloads]][npm-url]
[![Code Climate][codeclimate-quality]][codeclimate-url]
[![Coverage Status][codeclimate-coverage]][codeclimate-url]
[![Dependency Status][dependencyci-image]][dependencyci-url]
[![Dependencies][david-image]][david-url]
## Install
```bash
npm install --only=production --save har-schema
```
## Usage
Compatible with any [JSON Schema validation tool][validator].
----
> :copyright: [ahmadnassri.com](https://www.ahmadnassri.com/) ·
> License: [ISC][license-url] ·
> Github: [@ahmadnassri](https://github.com/ahmadnassri) ·
> Twitter: [@ahmadnassri](https://twitter.com/ahmadnassri)
[license-url]: http://choosealicense.com/licenses/isc/
[travis-url]: https://travis-ci.org/ahmadnassri/har-schema
[travis-image]: https://img.shields.io/travis/ahmadnassri/har-schema.svg?style=flat-square
[npm-url]: https://www.npmjs.com/package/har-schema
[npm-license]: https://img.shields.io/npm/l/har-schema.svg?style=flat-square
[npm-version]: https://img.shields.io/npm/v/har-schema.svg?style=flat-square
[npm-downloads]: https://img.shields.io/npm/dm/har-schema.svg?style=flat-square
[codeclimate-url]: https://codeclimate.com/github/ahmadnassri/har-schema
[codeclimate-quality]: https://img.shields.io/codeclimate/github/ahmadnassri/har-schema.svg?style=flat-square
[codeclimate-coverage]: https://img.shields.io/codeclimate/coverage/github/ahmadnassri/har-schema.svg?style=flat-square
[david-url]: https://david-dm.org/ahmadnassri/har-schema
[david-image]: https://img.shields.io/david/ahmadnassri/har-schema.svg?style=flat-square
[dependencyci-url]: https://dependencyci.com/github/ahmadnassri/har-schema
[dependencyci-image]: https://dependencyci.com/github/ahmadnassri/har-schema/badge?style=flat-square
[spec]: https://github.com/ahmadnassri/har-spec/blob/master/versions/1.2.md
[validator]: https://github.com/ahmadnassri/har-validator
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/har-schema/README.md | README.md |
# Decode According to the WHATWG Encoding Standard
This package provides a thin layer on top of [iconv-lite](https://github.com/ashtuchkin/iconv-lite) which makes it expose some of the same primitives as the [Encoding Standard](https://encoding.spec.whatwg.org/).
```js
const whatwgEncoding = require("whatwg-encoding");
console.assert(whatwgEncoding.labelToName("latin1") === "windows-1252");
console.assert(whatwgEncoding.labelToName(" CYRILLic ") === "ISO-8859-5");
console.assert(whatwgEncoding.isSupported("IBM866") === true);
// Not supported by the Encoding Standard
console.assert(whatwgEncoding.isSupported("UTF-32") === false);
// In the Encoding Standard, but this package can't decode it
console.assert(whatwgEncoding.isSupported("x-mac-cyrillic") === false);
console.assert(whatwgEncoding.getBOMEncoding(new Buffer([0xFE, 0xFF])) === "UTF-16BE");
console.assert(whatwgEncoding.getBOMEncoding(new Buffer([0x48, 0x69])) === null);
console.assert(whatwgEncoding.decode(new Buffer([0x48, 0x69]), "UTF-8") === "Hi");
```
## API
- `decode(buffer, fallbackEncodingName)`: performs the [decode](https://encoding.spec.whatwg.org/#decode) algorithm (in which any BOM will override the passed fallback encoding), and returns the resulting string
- `labelToName(label)`: performs the [get an encoding](https://encoding.spec.whatwg.org/#concept-encoding-get) algorithm and returns the resulting encoding's name, or `null` for failure
- `isSupported(name)`: returns whether the encoding is one of [the encodings](https://encoding.spec.whatwg.org/#names-and-labels) of the Encoding Standard, _and_ is an encoding that this package can decode (via iconv-lite)
- `getBOMEncoding(buffer)`: sniffs the first 2–3 bytes of the supplied `Buffer`, returning one of the encoding names `"UTF-8"`, `"UTF-16LE"`, or `"UTF-16BE"` if the appropriate BOM is present, or `null` if no BOM is present
## Unsupported encodings
Since we rely on iconv-lite, we are limited to support only the encodings that they support. Currently we are missing support for:
- ISO-2022-JP
- ISO-8859-8-I
- replacement
- x-mac-cyrillic
- x-user-defined
Passing these encoding names will return `false` when calling `isSupported`, and passing any of the possible labels for these encodings to `labelToName` will return `null`.
## Credits
This package was originally based on the excellent work of [@nicolashenry](https://github.com/nicolashenry), [in jsdom](https://github.com/tmpvar/jsdom/blob/7ce11776ce161e8d5921a7a183585327400f786b/lib/jsdom/living/helpers/encoding.js). It has since been pulled out into this separate package.
## Alternatives
If you are looking for a JavaScript implementation of the Encoding Standard's `TextEncoder` and `TextDecoder` APIs, you'll want [@inexorabletash](https://github.com/inexorabletash)'s [text-encoding](https://github.com/inexorabletash/text-encoding) package.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/whatwg-encoding/README.md | README.md |
# type-check [](https://travis-ci.org/gkz/type-check)
<a name="type-check" />
`type-check` is a library which allows you to check the types of JavaScript values at runtime with a Haskell like type syntax. It is great for checking external input, for testing, or even for adding a bit of safety to your internal code. It is a major component of [levn](https://github.com/gkz/levn). MIT license. Version 0.3.2. Check out the [demo](http://gkz.github.io/type-check/).
For updates on `type-check`, [follow me on twitter](https://twitter.com/gkzahariev).
npm install type-check
## Quick Examples
```js
// Basic types:
var typeCheck = require('type-check').typeCheck;
typeCheck('Number', 1); // true
typeCheck('Number', 'str'); // false
typeCheck('Error', new Error); // true
typeCheck('Undefined', undefined); // true
// Comment
typeCheck('count::Number', 1); // true
// One type OR another type:
typeCheck('Number | String', 2); // true
typeCheck('Number | String', 'str'); // true
// Wildcard, matches all types:
typeCheck('*', 2) // true
// Array, all elements of a single type:
typeCheck('[Number]', [1, 2, 3]); // true
typeCheck('[Number]', [1, 'str', 3]); // false
// Tuples, or fixed length arrays with elements of different types:
typeCheck('(String, Number)', ['str', 2]); // true
typeCheck('(String, Number)', ['str']); // false
typeCheck('(String, Number)', ['str', 2, 5]); // false
// Object properties:
typeCheck('{x: Number, y: Boolean}', {x: 2, y: false}); // true
typeCheck('{x: Number, y: Boolean}', {x: 2}); // false
typeCheck('{x: Number, y: Maybe Boolean}', {x: 2}); // true
typeCheck('{x: Number, y: Boolean}', {x: 2, y: false, z: 3}); // false
typeCheck('{x: Number, y: Boolean, ...}', {x: 2, y: false, z: 3}); // true
// A particular type AND object properties:
typeCheck('RegExp{source: String, ...}', /re/i); // true
typeCheck('RegExp{source: String, ...}', {source: 're'}); // false
// Custom types:
var opt = {customTypes:
{Even: { typeOf: 'Number', validate: function(x) { return x % 2 === 0; }}}};
typeCheck('Even', 2, opt); // true
// Nested:
var type = '{a: (String, [Number], {y: Array, ...}), b: Error{message: String, ...}}'
typeCheck(type, {a: ['hi', [1, 2, 3], {y: [1, 'ms']}], b: new Error('oh no')}); // true
```
Check out the [type syntax format](#syntax) and [guide](#guide).
## Usage
`require('type-check');` returns an object that exposes four properties. `VERSION` is the current version of the library as a string. `typeCheck`, `parseType`, and `parsedTypeCheck` are functions.
```js
// typeCheck(type, input, options);
typeCheck('Number', 2); // true
// parseType(type);
var parsedType = parseType('Number'); // object
// parsedTypeCheck(parsedType, input, options);
parsedTypeCheck(parsedType, 2); // true
```
### typeCheck(type, input, options)
`typeCheck` checks a JavaScript value `input` against `type` written in the [type format](#type-format) (and taking account the optional `options`) and returns whether the `input` matches the `type`.
##### arguments
* type - `String` - the type written in the [type format](#type-format) which to check against
* input - `*` - any JavaScript value, which is to be checked against the type
* options - `Maybe Object` - an optional parameter specifying additional options, currently the only available option is specifying [custom types](#custom-types)
##### returns
`Boolean` - whether the input matches the type
##### example
```js
typeCheck('Number', 2); // true
```
### parseType(type)
`parseType` parses string `type` written in the [type format](#type-format) into an object representing the parsed type.
##### arguments
* type - `String` - the type written in the [type format](#type-format) which to parse
##### returns
`Object` - an object in the parsed type format representing the parsed type
##### example
```js
parseType('Number'); // [{type: 'Number'}]
```
### parsedTypeCheck(parsedType, input, options)
`parsedTypeCheck` checks a JavaScript value `input` against parsed `type` in the parsed type format (and taking account the optional `options`) and returns whether the `input` matches the `type`. Use this in conjunction with `parseType` if you are going to use a type more than once.
##### arguments
* type - `Object` - the type in the parsed type format which to check against
* input - `*` - any JavaScript value, which is to be checked against the type
* options - `Maybe Object` - an optional parameter specifying additional options, currently the only available option is specifying [custom types](#custom-types)
##### returns
`Boolean` - whether the input matches the type
##### example
```js
parsedTypeCheck([{type: 'Number'}], 2); // true
var parsedType = parseType('String');
parsedTypeCheck(parsedType, 'str'); // true
```
<a name="type-format" />
## Type Format
### Syntax
White space is ignored. The root node is a __Types__.
* __Identifier__ = `[\$\w]+` - a group of any lower or upper case letters, numbers, underscores, or dollar signs - eg. `String`
* __Type__ = an `Identifier`, an `Identifier` followed by a `Structure`, just a `Structure`, or a wildcard `*` - eg. `String`, `Object{x: Number}`, `{x: Number}`, `Array{0: String, 1: Boolean, length: Number}`, `*`
* __Types__ = optionally a comment (an `Indentifier` followed by a `::`), optionally the identifier `Maybe`, one or more `Type`, separated by `|` - eg. `Number`, `String | Date`, `Maybe Number`, `Maybe Boolean | String`
* __Structure__ = `Fields`, or a `Tuple`, or an `Array` - eg. `{x: Number}`, `(String, Number)`, `[Date]`
* __Fields__ = a `{`, followed one or more `Field` separated by a comma `,` (trailing comma `,` is permitted), optionally an `...` (always preceded by a comma `,`), followed by a `}` - eg. `{x: Number, y: String}`, `{k: Function, ...}`
* __Field__ = an `Identifier`, followed by a colon `:`, followed by `Types` - eg. `x: Date | String`, `y: Boolean`
* __Tuple__ = a `(`, followed by one or more `Types` separated by a comma `,` (trailing comma `,` is permitted), followed by a `)` - eg `(Date)`, `(Number, Date)`
* __Array__ = a `[` followed by exactly one `Types` followed by a `]` - eg. `[Boolean]`, `[Boolean | Null]`
### Guide
`type-check` uses `Object.toString` to find out the basic type of a value. Specifically,
```js
{}.toString.call(VALUE).slice(8, -1)
{}.toString.call(true).slice(8, -1) // 'Boolean'
```
A basic type, eg. `Number`, uses this check. This is much more versatile than using `typeof` - for example, with `document`, `typeof` produces `'object'` which isn't that useful, and our technique produces `'HTMLDocument'`.
You may check for multiple types by separating types with a `|`. The checker proceeds from left to right, and passes if the value is any of the types - eg. `String | Boolean` first checks if the value is a string, and then if it is a boolean. If it is none of those, then it returns false.
Adding a `Maybe` in front of a list of multiple types is the same as also checking for `Null` and `Undefined` - eg. `Maybe String` is equivalent to `Undefined | Null | String`.
You may add a comment to remind you of what the type is for by following an identifier with a `::` before a type (or multiple types). The comment is simply thrown out.
The wildcard `*` matches all types.
There are three types of structures for checking the contents of a value: 'fields', 'tuple', and 'array'.
If used by itself, a 'fields' structure will pass with any type of object as long as it is an instance of `Object` and the properties pass - this allows for duck typing - eg. `{x: Boolean}`.
To check if the properties pass, and the value is of a certain type, you can specify the type - eg. `Error{message: String}`.
If you want to make a field optional, you can simply use `Maybe` - eg. `{x: Boolean, y: Maybe String}` will still pass if `y` is undefined (or null).
If you don't care if the value has properties beyond what you have specified, you can use the 'etc' operator `...` - eg. `{x: Boolean, ...}` will match an object with an `x` property that is a boolean, and with zero or more other properties.
For an array, you must specify one or more types (separated by `|`) - it will pass for something of any length as long as each element passes the types provided - eg. `[Number]`, `[Number | String]`.
A tuple checks for a fixed number of elements, each of a potentially different type. Each element is separated by a comma - eg. `(String, Number)`.
An array and tuple structure check that the value is of type `Array` by default, but if another type is specified, they will check for that instead - eg. `Int32Array[Number]`. You can use the wildcard `*` to search for any type at all.
Check out the [type precedence](https://github.com/zaboco/type-precedence) library for type-check.
## Options
Options is an object. It is an optional parameter to the `typeCheck` and `parsedTypeCheck` functions. The only current option is `customTypes`.
<a name="custom-types" />
### Custom Types
__Example:__
```js
var options = {
customTypes: {
Even: {
typeOf: 'Number',
validate: function(x) {
return x % 2 === 0;
}
}
}
};
typeCheck('Even', 2, options); // true
typeCheck('Even', 3, options); // false
```
`customTypes` allows you to set up custom types for validation. The value of this is an object. The keys of the object are the types you will be matching. Each value of the object will be an object having a `typeOf` property - a string, and `validate` property - a function.
The `typeOf` property is the type the value should be, and `validate` is a function which should return true if the value is of that type. `validate` receives one parameter, which is the value that we are checking.
## Technical About
`type-check` is written in [LiveScript](http://livescript.net/) - a language that compiles to JavaScript. It also uses the [prelude.ls](http://preludels.com/) library.
| zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/type-check/README.md | README.md |
(function(){
var ref$, any, all, isItNaN, types, defaultType, customTypes, toString$ = {}.toString;
ref$ = require('prelude-ls'), any = ref$.any, all = ref$.all, isItNaN = ref$.isItNaN;
types = {
Number: {
typeOf: 'Number',
validate: function(it){
return !isItNaN(it);
}
},
NaN: {
typeOf: 'Number',
validate: isItNaN
},
Int: {
typeOf: 'Number',
validate: function(it){
return !isItNaN(it) && it % 1 === 0;
}
},
Float: {
typeOf: 'Number',
validate: function(it){
return !isItNaN(it);
}
},
Date: {
typeOf: 'Date',
validate: function(it){
return !isItNaN(it.getTime());
}
}
};
defaultType = {
array: 'Array',
tuple: 'Array'
};
function checkArray(input, type){
return all(function(it){
return checkMultiple(it, type.of);
}, input);
}
function checkTuple(input, type){
var i, i$, ref$, len$, types;
i = 0;
for (i$ = 0, len$ = (ref$ = type.of).length; i$ < len$; ++i$) {
types = ref$[i$];
if (!checkMultiple(input[i], types)) {
return false;
}
i++;
}
return input.length <= i;
}
function checkFields(input, type){
var inputKeys, numInputKeys, k, numOfKeys, key, ref$, types;
inputKeys = {};
numInputKeys = 0;
for (k in input) {
inputKeys[k] = true;
numInputKeys++;
}
numOfKeys = 0;
for (key in ref$ = type.of) {
types = ref$[key];
if (!checkMultiple(input[key], types)) {
return false;
}
if (inputKeys[key]) {
numOfKeys++;
}
}
return type.subset || numInputKeys === numOfKeys;
}
function checkStructure(input, type){
if (!(input instanceof Object)) {
return false;
}
switch (type.structure) {
case 'fields':
return checkFields(input, type);
case 'array':
return checkArray(input, type);
case 'tuple':
return checkTuple(input, type);
}
}
function check(input, typeObj){
var type, structure, setting, that;
type = typeObj.type, structure = typeObj.structure;
if (type) {
if (type === '*') {
return true;
}
setting = customTypes[type] || types[type];
if (setting) {
return setting.typeOf === toString$.call(input).slice(8, -1) && setting.validate(input);
} else {
return type === toString$.call(input).slice(8, -1) && (!structure || checkStructure(input, typeObj));
}
} else if (structure) {
if (that = defaultType[structure]) {
if (that !== toString$.call(input).slice(8, -1)) {
return false;
}
}
return checkStructure(input, typeObj);
} else {
throw new Error("No type defined. Input: " + input + ".");
}
}
function checkMultiple(input, types){
if (toString$.call(types).slice(8, -1) !== 'Array') {
throw new Error("Types must be in an array. Input: " + input + ".");
}
return any(function(it){
return check(input, it);
}, types);
}
module.exports = function(parsedType, input, options){
options == null && (options = {});
customTypes = options.customTypes || {};
return checkMultiple(input, parsedType);
};
}).call(this); | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/type-check/lib/check.js | check.js |
(function(){
var identifierRegex, tokenRegex;
identifierRegex = /[\$\w]+/;
function peek(tokens){
var token;
token = tokens[0];
if (token == null) {
throw new Error('Unexpected end of input.');
}
return token;
}
function consumeIdent(tokens){
var token;
token = peek(tokens);
if (!identifierRegex.test(token)) {
throw new Error("Expected text, got '" + token + "' instead.");
}
return tokens.shift();
}
function consumeOp(tokens, op){
var token;
token = peek(tokens);
if (token !== op) {
throw new Error("Expected '" + op + "', got '" + token + "' instead.");
}
return tokens.shift();
}
function maybeConsumeOp(tokens, op){
var token;
token = tokens[0];
if (token === op) {
return tokens.shift();
} else {
return null;
}
}
function consumeArray(tokens){
var types;
consumeOp(tokens, '[');
if (peek(tokens) === ']') {
throw new Error("Must specify type of Array - eg. [Type], got [] instead.");
}
types = consumeTypes(tokens);
consumeOp(tokens, ']');
return {
structure: 'array',
of: types
};
}
function consumeTuple(tokens){
var components;
components = [];
consumeOp(tokens, '(');
if (peek(tokens) === ')') {
throw new Error("Tuple must be of at least length 1 - eg. (Type), got () instead.");
}
for (;;) {
components.push(consumeTypes(tokens));
maybeConsumeOp(tokens, ',');
if (')' === peek(tokens)) {
break;
}
}
consumeOp(tokens, ')');
return {
structure: 'tuple',
of: components
};
}
function consumeFields(tokens){
var fields, subset, ref$, key, types;
fields = {};
consumeOp(tokens, '{');
subset = false;
for (;;) {
if (maybeConsumeOp(tokens, '...')) {
subset = true;
break;
}
ref$ = consumeField(tokens), key = ref$[0], types = ref$[1];
fields[key] = types;
maybeConsumeOp(tokens, ',');
if ('}' === peek(tokens)) {
break;
}
}
consumeOp(tokens, '}');
return {
structure: 'fields',
of: fields,
subset: subset
};
}
function consumeField(tokens){
var key, types;
key = consumeIdent(tokens);
consumeOp(tokens, ':');
types = consumeTypes(tokens);
return [key, types];
}
function maybeConsumeStructure(tokens){
switch (tokens[0]) {
case '[':
return consumeArray(tokens);
case '(':
return consumeTuple(tokens);
case '{':
return consumeFields(tokens);
}
}
function consumeType(tokens){
var token, wildcard, type, structure;
token = peek(tokens);
wildcard = token === '*';
if (wildcard || identifierRegex.test(token)) {
type = wildcard
? consumeOp(tokens, '*')
: consumeIdent(tokens);
structure = maybeConsumeStructure(tokens);
if (structure) {
return structure.type = type, structure;
} else {
return {
type: type
};
}
} else {
structure = maybeConsumeStructure(tokens);
if (!structure) {
throw new Error("Unexpected character: " + token);
}
return structure;
}
}
function consumeTypes(tokens){
var lookahead, types, typesSoFar, typeObj, type;
if ('::' === peek(tokens)) {
throw new Error("No comment before comment separator '::' found.");
}
lookahead = tokens[1];
if (lookahead != null && lookahead === '::') {
tokens.shift();
tokens.shift();
}
types = [];
typesSoFar = {};
if ('Maybe' === peek(tokens)) {
tokens.shift();
types = [
{
type: 'Undefined'
}, {
type: 'Null'
}
];
typesSoFar = {
Undefined: true,
Null: true
};
}
for (;;) {
typeObj = consumeType(tokens), type = typeObj.type;
if (!typesSoFar[type]) {
types.push(typeObj);
}
typesSoFar[type] = true;
if (!maybeConsumeOp(tokens, '|')) {
break;
}
}
return types;
}
tokenRegex = RegExp('\\.\\.\\.|::|->|' + identifierRegex.source + '|\\S', 'g');
module.exports = function(input){
var tokens, e;
if (!input.length) {
throw new Error('No type specified.');
}
tokens = input.match(tokenRegex) || [];
if (in$('->', tokens)) {
throw new Error("Function types are not supported.\ To validate that something is a function, you may use 'Function'.");
}
try {
return consumeTypes(tokens);
} catch (e$) {
e = e$;
throw new Error(e.message + " - Remaining tokens: " + JSON.stringify(tokens) + " - Initial input: '" + input + "'");
}
};
function in$(x, xs){
var i = -1, l = xs.length >>> 0;
while (++i < l) if (x === xs[i]) return true;
return false;
}
}).call(this); | zhihu-crawler | /zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/type-check/lib/parse-type.js | parse-type.js |
# 更新历史
## 实验阶段
### WIP
- [add] 增加了生成器的 Slice 操作,现在可以用 `for people in me.followers[:10]` 这样的写法了
\* 注意:如果你使用 `followers[10:100]`, 其实前 10 个用户其实也被获取了,并不是最佳用法。推荐使用 `followers.jump(10)[:90]` 这样的写法,会跳过前 10 个用户的网络请求。目前如果 Slice 的 start 不是 0,将会引发 Warning
### 0.0.41
- [add] 增加了之前忘记写的 `Topic.activities` 接口
### 0.0.41.dev1
- [fix] 尝试修复 `COLUMN_POPULAR_ARTICLE` 型 Feed 的问题
- [update] 现在未实现类型的 Exception 转化为 Warning,会自动跳过,不影响正常使用
### 0.0.40.post1
- [fix] 修复了 `Feed` 遇到来自话题的电子书时出错的 Bug。
### 0.0.40
- [add] 增加了 `Feed` 类,可用 `for feed in me.feeds` 获取登录用户的首页信息流。
### 0.0.39.post1
- [fix] 修复了各知乎类的 ID 类型有时不一致的 Bug
### 0.0.38
- [update] API 版本升级至 3.0.54,APP 版本升级至 4.18.0,更新 UA,跟上 Android 客户端的步伐。
- [add] `zhihu_oauth.zhcls.urls` 增加了通知的 API 地址,虽然现在还没使用。
- [add] 增加了话题索引的获取(初次发布,未完全测试)
### 0.0.37
- [fix] 修复了一个因为搜索结果里有广告类型而出错的 Bug。
- [add] 增加了 `TokenError` 表示用户 Token 过期的错误
- [change] 去除了 `ZhihuObjGenerator` 的错误重试机制
### 0.0.36
- [fix] `SearchResultSection` 的 `has_more` 应该是属性而不是方法。
### 0.0.35
- [add] 增加了 `ZhihuClient.search` 和 `ZhihuClient.search_unfold` 方法,实现知乎的搜索。
- [add] 增加了 `SearchType` 枚举,表示搜索的方式。目前支持除了电子书外的几种搜索方式。
搜索功能的详细介绍请看文档:http://zhihu-oauth.readthedocs.io/zh_CN/latest/for-user/search.html
### 0.0.34
- [add] 增加了 `People.hosted_live_count/participated_live_count/live_count` 等属性。
- [add] `ZhihuClient.login_in_terminal()` 函数增加了 `captcha_filename` 参数用于设置保存验证码时的文件名。
- [add] 增加了 `Pin` 分享类,就是知乎的分享到首页功能。
- [add] 增加了 `ZhihuClient.pin()` 函数用户构建 Pin 对象。
- [add] `ZhihuClient.from_url()` 函数现在支持从 Pin 的 URL 构建 Pin 对象。
- [add] 增加了 `People.pins`, `People.pin_count` 两个属性。
- [add] `Me.comment()` 现在可以向 Pin 评论。
- [add] `Me.vote()` 现在可以给 Pin 点赞。
- [add] `Me.delete()` 现在能删除 Pin。
- [add] `Me.collect()` 现在能收藏文章。
- [change] 将一些只有获取当前登录用户信息时才能获取到的属性从 `People` 类移动到 `Me` 类中。
- [change] 修改了 'act2str' 对于 `ActType.LIKE_PIN` 和 `ActType.CREATE_PIN` 的输出格式。
- [change] 修改了文档的默认顺序,从按字母排序变为按照源代码顺序。
- [fix] 去掉了不需要(有点麻烦)的函数缓存。
### 0.0.33.post2
- [fix] 修复因为 `lru_cache` 的依赖导致的 Python 2 不兼容问题。
- [fix] 修复因为 `__all__` 变量中缺少一个逗号导致的使用 `from zhihu_oauth import *` 语句会出错的 Bug。
### 0.0.32
- [add] 增加用户对象的 `is_follower` 属性,表示此用户是否是我的粉丝。
- [add] 增加用户对象的 `is_following` 属性,表示我是否是此用户的粉丝。(用这两个属性的时候,脑子里在中间加个 `my` 可能会有利于理解)
- [add] 增加了 `Badge` 类,使用 `People.badge` 方法可获取徽章对象,可获取知乎的「个人认证」,「话题最佳回答者」,「已认证机构」等徽章内容。
### 0.0.31.post1
- [fix] 修复 `Activities.filter` 函数的实现错误,我最近越来越蠢了……
### 0.0.31
- [add] 增加对 socks 代理的支持,其实就是把 requests 的版本强制要求成 2.10.0 以上,然后自然而然就支持 socks 代理了。(Thanks [@lbc001](https://github.com/lbc001))
- [add] 增加了一个 `ts2str` 的辅助函数,用于把时间戳转换为表示时间的字符串。
- [add] 增加了一个 `act2str` 的辅助函数,用于把用户动态对象转换为描述这一动态的字符串。
- [add] 增加了一个 `ActivityFormatter` 类,可自定义用户动态转换为字符串时的模板。
- [add] `people.activities` 生成器增加了一个 `filter` 函数,可根据动态类别,或者自定义函数来过滤需要的用户动态。
### 0.0.30.post1
- [update] API 版本升级至 3.0.41,APP 版本升级至 4.12.0,更新 UA 和 UUID,跟上 Android 客户端的步伐。
- [fix] 修复因 message.py 中未设置文件编码造成的在 Python 2.7 环境下可能会出错的 Bug。
### 0.0.30
- [fix] 修复了一个尼玛嗨尼玛嗨的代码里不知道怎么多打了一个 `+` 的 Bug =。=
- [add] `Activity/ActType` 类增加了 `PUBLISH_LIVE` 型动态,表示用户举办 Live。
### 0.0.30.beta3
- [fix] 修复了当用户参与了某 Live 时,调用此 Live 的 `tickets` 属性会报错的 Bug。此 Bug 是由于知乎的查看票价接口不允许已参与的用户调用,目前此属性在这种情况下将给出 warning 并且不返回任何值。(对于 `for in` 操作是安全的)
- [add] 增加了 `Live.role` 属性表示用户于 Live 的关系。
- [add] 增加了 `Live.cospeakers` 属性表示 Live 的协作者,只有小部分多人主讲的 Live 这个属性才有值。
### 0.0.30.beta2
- [fix] 修复了在使用 `client.people()` 获取 `People` 对象后直接获取 `activities` 属性时会无法获取的 Bug。
### 0.0.30.beta1
- [add] 增加了 `Live` 类。
- [add] 增加了 `LiveBadge` Live 徽章类。
- [add] 增加了 `LiveTag` Live 标签类。
- [add] 增加了 `LiveTicket` Live 门票类。
- [add] `ZhihuClient` 增加了 `lives_ongoing` 和 `lives_ended` 两个属性,可以获取所有正在进行(或未开始)的 Live 和已经结束的 Live。并且提供 `lives` 快捷方法,将以上两个 Generator chain 起来。
- [add] `ZhihuClient` 增加了 `live()` 方法,通过 Live ID 获取 `Live` 对象。
- [add] `ZhihuClient.from_url()` 现在支持使用 Live 的 URL 创建 `Live` 对象。
- [add] `ZhihuClient` 增加了 `live_tags` 属性,用于获取所有 Live Tag。
- [add] `People` 类增加了 `lives` 属性,获取用户举办和参加的 Live。
- [add] `People` 类增加了 `liked_lives` 属性,获取用户感兴趣的 Live。
- [add] `Me.follow()` 方法现在可以对 `Live` 对象使用,也即对 Live 感兴趣(点小红心)。
- [change] `JOIN_LIVE` 型的动态现在返回 `type` 属性为 `ActType.JOIN_LIVE` 型的 `Activity` 对象,其 `target` 属性为 `Live` 对象。
### 0.0.29.post2
- [add] `Activity` 类增加了 `action_text` 属性,表示对动态的描述。比如 `xxx 赞同了问题`, `xxx 参加了 Live` 等
- [add/fix] `Activity/ActType` 类增加了 `VOTEUP_EBOOK` 型动态,暂时使用 `StreamingJSON` 做兼容,`EBook` 类预计在 0.0.32 或 33 版本才会增加。
### 0.0.29.post1
- [fix/add] 修复/增加了用户动态中的「参加 Live」,「创建分享」,「点赞分享」三种动作,暂时使用 `StreamingJSON` 做个兼容,后期会弄成和其他动作相同的形式并且增加 `Live` 和 `Pin` 类,预计在 0.0.30 版本吧。
### 0.0.29
- [add] 增加了之前忘记写的问题创建时间 `Question.created_time` 属性。
### 0.0.28
- [add] 增加了 `Collection.contents` 属性用于获取收藏夹里的答案和文章(具体用法请看文档)。
- [add] 增加了 `Collection.article` 属性用于获取收藏夹里的文章。
- [fix] 修复了 `Collection.answer` 因为新版知乎 API 取消接口而失效的问题。
### 0.0.27
- [fix] 修复 Python 3.4 和 2.7.x 及之前版本因为 dict unpacking 之后不能有 trailing comma 造成的语法错误。
### 0.0.26
- [add] 增加用户私信的获取接口:使用 `me.whispers` 获取对话列表,再用 `whisper.messages` 获取每条消息。
- [fix] 修复用户动态之前没有「收藏文章」类型而在获取时可能出错的情况
- [add] 用户动态增加了「收藏文章 (`COLLECT_ARTICLE`)」 类型
### 0.0.25
- [fix] 紧急修复 `shield()` 函数的实现错误。
(嗯我觉得这个版本号很好……因为我今天头痛,脑袋转了半天也没想清楚现在的实现到底对不对……不管了,反正是刚加的新功能,错就错吧,有人反馈我再改 =。=
### 0.0.24
- [update] API 版本升级至 3.0.40,APP 版本升级至 4.11.0,更新 UA 和 UUID,跟上 Android 客户端的步伐
- [add] `People.activities` 现在能够获取「收藏答案」类型的动态,具体 `ActType` 和 `Activity.type` 取值请参见文档
- [fix] 修复获取某些用户时可能会 500 的 Bug
- [add] `shield` 现在可以防御 HTTP 请求达到最大重试次数的异常(`MaxRetryError`)
- [fix] `shield` 函数现在不能防御 `Activity` 生成器,因为它比较特殊
- [fix] 修改文档里的小 typo
注意,因为更新了 API 版本,而这个项目并没有完善的自动测试,所以某些情况下可能会造成 Bug,如果你遇到了,请暂时使用上一版本并与我联系,谢谢。
### 0.0.23
- [add] 增加 `People.over()` 和 `People.over_reason()` 函数判断用户是否被知乎反作弊系统屏蔽。
- [add] 增加 `shield()` 函数来辅助处理生成器获取数据时的异常。
- [per] 新增 `ZhihuException`, package 内所有自定义的异常均改为继承此异常,方便处理。
- [per] 新增 `ZhihuWarning`,package 内所有自定义的警告均继承此警告,方便处理。
- [del] 取消 404 错误码的自动重试。
### 0.0.22
- [fix] 修复 `ZhihuClient.people()` 方法的文档里的返回值类型错误。
- [add] 增加 `Activity.created_time` 属性表示用户动态的发生时间戳。
- [per] 完善 `Activity` 的文档。
### 0.0.21
- [fix] 修复 `Answer.save`,`Post.save()` 方法无法自定义多级目录,比如 `'data/' + answer.question.title` 的问题。
- [fix] 修复由于 Windows 不允许文件和文件夹名前后出现空格而导致的储存文件时可能出错的 Bug。
### 0.0.20
- [fix] 升级 OAuth API 到 3.0.29, APP Version 4.7.1,以支持机构帐号的基本资料获取。
### 0.0.19
- [fix] 修复了 Windows下文件名中含有换行时会出错的 Bug。
- [change] `Answer.save()` 和 `Article.save()` 方法的第三个参数现在不会重新设置非法字符列表,而是会更新它。
### 0.0.18
- [fix] 修复在使用 `BaseGenerator.add_params` 设置 `offset=100` 参数以函数跳过前面某些用户时,因为一直覆盖原始 URL 参数造成的只能循环获取 20 个数据的 Bug。
- [add] 提供了一个 `BaseGenerator.jump(n)` 函数作为 `add_params(offset=n)` 的简写。
### 0.0.17
- [add] `ZhihuClient.login_in_terminal` 增加了一个 `use_getpass` 参数用于控制在输入密码时是否使用密码模式(不回显输入),用于解决某些 Windows IDE 中无法在控制台中登录的问题。
- [change] Python 2 情况下,API requests 的 Header 从 unicode 转变成 ASCII,解决 Issue #20。
### 0.0.16
- [fix] 修复了在用户被知乎的新「悟空」系统屏蔽是,获取用户数据会发生 `MaxRetryError` 的 Bug,现在可以使用 `GetDataErrorException` 来捕获这一情况,并可用 `exception.reason` 输出原因,
- [del] 删除了 400 错误的自动重试机制。
- [fix] 修复了因 Python 2.7 版本,`getpass.win_getpass` 使用 `msvcrt.putch` 而不是 `msvcrt.putwch` 造成的使用 unicode 字符串作为输入密码时的提示字符串时出现的异常。
- [fix] 修改了一处 magic string。
### 0.0.15
- [fix] 修复了当文章不属于任何专栏时,使用 ``article.column`` 获取时会出错的 Bug。
### 0.0.14
- [fix] 修复了知乎现在的 API 添加了新验证字段 UUID 和 UA 而导致的无法使用的 Bug。
- [change] 关闭非安全请求的警告和输入密码时的警告。
### 0.0.13
- [fix] 修复了保存答案或文章时文件名的扩展名部分会多一个点的 bug。
- [fix] 修复了保存答案或文章时传递进文件名没有进行非法字符过滤引发的 bug。
### 0.0.12 - 2016.05.29
- [add] 莫名其妙的好像知乎 API 限制了获取用户粉丝的数量,只允许获取前 5020 个?稍微加了个 Warning。
### 0.0.11 - 2016.05.15
- [fix] 我真是傻 QwQ,自动重试机制写错了,现在应该是对了……
### 0.0.10 - 2016.05.15
- [fix] 修复一个由于登录时服务器返回的数据和大多数情况不一致造成的无法登录的 Bug(Issue #13)
- [add] 为 `StreamingJSON` 类增加了 `raw_data` 方法,用于获取内部数据的副本
### 0.0.9 - 2016.05.15
- [change] 修改了 `ZhihuClient.login` 方法中某些失败信息,使其能更明确的说明失败原因
- [add] 尝试性的为网络请求加入了自动重试机制
- [add] 完善文档,加入了手机登录的说明
### 0.0.8 - 2016.05.04
- [fix] 修复了 `Topic.best_answerers` 因返回的 JSON 与常规返回不符造成的 Bug
### 0.0.7 - 2016.04.28
- [fix] 修复了设置代理后因为关闭了 SSL 而造成的报 Warning 的问题
- [add] `Comment` 类增加了获取父评论作者的 `reply_to` 属性
### 0.0.6 - 2016.04.21
- [fix] 修复了 `Collection` 类的 `answer_count` 属性无法使用的 bug
- [change] 由于发现知乎 API 无法获取除自己以外用户关注的收藏夹,将 `following_collections` 由 `People` 类 移动至 `Me` 类中
### 0.0.5 - 2016.04.18
- [add] `Topic` 类增加了 `followers` 属性,可获取话题关注者
- [add] `Me` 类增加了 `vote` 方法,可以给答案/文章/评论点赞同/[反对]/清除赞和反对。
- [add] `Me` 类增加了 `thanks` 方法,可以给答案点感谢/取消感谢
- [add] `Me` 类增加了 `unhelpful` 方法,可以给答案没有帮助/取消没有帮助
- [add] `Me` 类增加了 `follow` 方法,可以关注/取消关注问题/话题/用户/专栏/收藏夹
- [add] `Me` 类增加了 `block` 方法,可以屏蔽/取消屏蔽用户
- [add] `Me` 类增加了 `collect` 方法,可以将答案加入自己的收藏夹
- [add] `Me` 类增加了 `message` 方法,可以向别的用户发私信
- [add] `Me` 类增加了 `comment` 方法,可以向答案/文章/问题/收藏夹发送评论,并且支持回复特定评论
- [add] `Me` 类增加了 `delete` 方法,可以删除自己的答案/评论/收藏夹/文章
### 0.0.4 - 2016.04.16
- [change] 所有自定义异常修改为继承 `Exception` 类,遵循 Python 文档的要求。[REF](https://docs.python.org/2/library/exceptions.html#exceptions.Exception)
- [add] `ZhihuClient` 增加 `set_proxy` 方法,可设置代理
- [add] 增加了 `People` 类的 `activities` 属性,可以获取用户动态
- [fix] 修复 Python 2 下因为 `__init__.py` 文件中的 `__all__` 变量是 unicode 而造成的 `from xxx import *` 报错的 bug
- [change] 生成器不再尝试使用类内缓存的数据,而是一定会访问 API(改了一下实现,对用户接口没啥影响)
- [add] 小小的增加了一点没啥用的测试
### 0.0.3 - 2016.04.09
- [add] 增加了 `ZhihuClient.from_url` 方法,传入合法的知乎网址,就能生成对应的对象
- [add] 给 `BaseGenerator` 增加了 `add_params` 和 `set_params` 方法
- [fix] 修复了 `BaseGenerator` 在 Python 2 下有问题的情况。
- [fix] 修复了当用户的 `locations`,`educations`,`business`,`employments` 等属性值不存在强行获取会出错的 bug
- [add] 写完了文档
- [change] 改变了好多内部类名和变量名,不过对外部接口没有影响
### 0.0.2 - 2016.04.07
- [fix] 修复错误的 BASE_HTML_HEADER 值。原值会导致 html 文件在 Firefox 中打开时,由于没有编码信息显示而不正确的问题。
- [add] 完善文档,用户文档基本写完。
### 0.0.1 - 2016.04.07
首次发布,提供基础功能。
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/changelog.md | changelog.md |
# Zhihu-OAuth
[![author][badge-author]][my-zhihu] [![ci-dev][ci-dev-img]][ci-page] [![ci-master][ci-master-img]][ci-page] [![docs][badge-docs]][rtds-home] [![version][badge-version]][pypi] [![py-version][badge-py-version]][pypi] [![state][badge-state]][pypi] [![license][badge-license]][license]
## 近况
由于知乎给 Github 发了 [DMCA][zhihu-dmca] 要求删除 [zhihu-oauth 仓库][github-repo],所以 Github 把仓库设置为不可公开访问了。所以目前我只能把最新版代码同步到我自己个人的 Git Server 上了。
虽然一直有用户和我反馈还在使用这个库,并且某些功能也确实可以继续正常使用,但由于本人已经不用知乎多年,所以这个库其实有两年多没更新过了。
这次移动到个人站点之后,由于本站点不对外开放注册,游客就没法使用 Issue 和 PR 功能,所以可以预见的未来估计也不会有什么更新了,这里基本只提供代码备份和下载功能。
由于以上原因,README 中的捐款渠道也已一并删除。
如果你有想交流的问题,请使用邮件,或者 [Twitter][my-twitter] 联系我。
—— 2019.10.19
## 简介
**同学们,由于知乎新的 API 验证 UA,0.0.14 之前的版本已经不可用了,请尽快升级到 0.0.14 以上版本。**
最近在尝试解析出知乎官方未开放的 OAuth2 接口,顺便提供优雅的使用方式,作为 [zhihu-py3][zhihu-py3-github] 项目的继任者。
恩,理论上来说会比 zhihu-py3 更加稳定,原因如下:
- 知乎 API 相比前端 HTML 来说肯定更加稳定和规范
- 这次的代码更加规范
- 网络请求统一放在基类中
- 属性解析统一放在装饰器中,各知乎类只用于声明有哪些属性可供使用
- 统一翻页逻辑,再也不用一个地方一个逻辑了
- 翻页时的自动重试机制(虽然不知道有没有用吧)
这一新库与 zhihu-py3 相比速度更快。有关速度对比的详细信息请点击[这里][speed-compare]。
**这个库是 Py2 和 Py3 通用的!** 但是 Py3 的优先级比 Py2 高,也就是说,我会优先保证在 Py3 下的稳定性和正确性。毕竟在我学的时候选了 Py3,所以对 2 与 3 的差异了解不是很清楚,Py2 只能尽力而为了,
后期的计划是这样的:
- 0.0.x 这个阶段是 alpha 期,主要做的是补齐功能的工作。基本上 TODO 里的功能都会在这个时期实现。其中 0.0.5 版本计划完成和 zhihu-py3 同样多的功能(**已完成**)。
- 0.1.x 这个阶段是 beta 期,主要做完善测试,修复 bug,提升性能,改善架构之类的工作吧。以上两个阶段变化很大,有可能出现不兼容老版本的更新。使用需要注意。
- 0.2.x 及以后就是 stable 期,只要 API 不变,基本上代码结构就不会变了,接口可能会增加但一定不会减。
由于现在使用的 CLIENT_ID 和 SECRET 的获取方法并不正当,所以请大家暂时不要大规模宣传,自己用用就好啦,Thanks。
等我什么时候觉得时机成熟(等知乎真•开放 OAuth 申请?),会去知乎专栏里宣传一波的。
## 最近更新
目前版本是 0.0.41,没更新的快更新一下,更新说明在[这里][changelog]。
0.0.41 版本修复了 Feed 流的一些问题,加上了 `Topic.activities` 接口。
0.0.40 版本增加了 Feed 首页信息流的支持。
## 使用
### 安装
```bash
pip install -U zhihu_oauth
```
如果安装遇到问题,请查看文档:[安装][rtds-install]
### 登录
请参见文档:[登录][rtds-login]
### 获取基础信息
代码:
```python
from zhihu_oauth import ZhihuClient
client = ZhihuClient()
client.load_token('token.pkl')
me = client.me()
print('name', me.name)
print('headline', me.headline)
print('description', me.description)
print('following topic count', me.following_topic_count)
print('following people count', me.following_count)
print('followers count', me.follower_count)
print('voteup count', me.voteup_count)
print('get thanks count', me.thanked_count)
print('answered question', me.answer_count)
print('question asked', me.question_count)
print('collection count', me.collection_count)
print('article count', me.articles_count)
print('following column count', me.following_column_count)
```
输出:
```text
name 7sDream
headline 二次元普通居民,不入流程序员,http://0v0.link
description 关注本AI的话,会自动给你发私信的哟!
following topic count 35
following people count 101
followers count 1294
voteup count 2493
get thanks count 760
answered question 258
question asked 18
collection count 9
article count 7
following column count 11
```
更多功能请参见文档:[使用方法][rtds-usage]
## 文档
完整的文档可以在[这里][rtds-home] 找到。我写的文档好吧,可详细了……有啥问题先去找文档。我写的那么累你们看都不看我好不服啊!
(貌似 ReadTheDocs 在伟大的国家访问速度有点慢,建议自备手段。)
## TODO
- [x] 保证对 Python 2 和 3 的兼容性
- [x] 用户私信支持
- [x] Live 支持
- [x] Pin(分享)支持
- [x] 搜索功能(还差电子书搜索)
- [x] 用户首页 Feed
- [ ] 知乎电子书
- [ ] 获取用户消息。新关注者,新评论,关注的回答有新问题
- [ ] Token check/refresh
- [ ] Setting
- [ ] 规范、完善的测试
- [ ] article.voters 文章点赞者,貌似 OAuth2 没有这个 API
- [ ] collection.followers 这个 API 不稳定,没法返回所有关注者
## 协助开发
### 通过代码
1. Fork
2. 从 dev 分支新建一个分支
3. 编写代码,更新 Changelog 和 sphinx 文档,如果可能的话加上测试
4. PR 到原 dev 分支
### 通过捐款
由于开发不积极,不再接受捐款。
[捐款记录][donate-record]
## LICENSE
MIT
[zhihu-py3-github]: https://github.com/7sDream/zhihu-py3
[speed-compare]: https://github.com/7sDream/zhihu-oauth/blob/master/compare.md
[changelog]: https://github.com/7sDream/zhihu-oauth/blob/master/changelog.md
[rtds-home]: http://zhihu-oauth.readthedocs.io/zh_CN/latest
[rtds-install]: http://zhihu-oauth.readthedocs.io/zh_CN/latest/guide/install.html
[rtds-login]: http://zhihu-oauth.readthedocs.io/zh_CN/latest/guide/login.html
[rtds-usage]: http://zhihu-oauth.readthedocs.io/zh_CN/latest/guide/use.html
[badge-author]: https://img.shields.io/badge/Author-7sDream-blue.svg
[badge-docs]: https://readthedocs.org/projects/zhihu-oauth/badge/?version=latest
[badge-version]: https://img.shields.io/pypi/v/zhihu_oauth.svg
[badge-py-version]: https://img.shields.io/pypi/pyversions/zhihu_oauth.svg
[badge-state]: https://img.shields.io/pypi/status/zhihu_oauth.svg
[badge-license]: https://img.shields.io/pypi/l/zhihu_oauth.svg
[my-zhihu]: https://www.zhihu.com/people/7sdream
[ci-page]: https://travis-ci.org/7sDream/zhihu-oauth
[ci-dev-img]: https://api.travis-ci.org/7sDream/zhihu-oauth.svg?branch=dev
[ci-master-img]: https://api.travis-ci.org/7sDream/zhihu-oauth.svg?branch=master
[pypi]: https://pypi.python.org/pypi/zhihu_oauth
[license]: https://github.com/7sDream/zhihu-oauth/blob/master/LICENSE
[donate-record]: https://github.com/7sDream/zhihu-oauth/blob/donate/donate.md
[my-twitter]: https://twitter.com/7sDream
[zhihu-dmca]: https://github.com/github/dmca/blob/master/2019/10/2019-10-17-Zhizhetianxia.md
[github-repo]: https://github.com/7sDream/zhihu-oauth | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/README.md | README.md |
Install - 安装
==============
Use pip - 使用 pip
------------------
zhihu_oauth 支持 Python 2 和 Python 3,用对应版本的 pip 安装即可。
.. code-block:: bash
pip install zhihu_oauth
.. note:: 提示
请自行判断是否要加上 ``sudo`` 命令。
请自行选择对应的 pip 版本(如 ``pip3``,``pip2.7``)替换上述命令中的 ``pip``。
如果不报错大概就是安装成功了。
Use git - 使用 git
------------------
.. warning:: 别!
不推荐用这种方法安装!pip 还满足不了你么!?
.. code-block:: bash
git clone https://github.com/7sDream/zhihu-oauth.git
cd zhihu_oauth
python setup.py install
.. note:: 提示
请自行判断是否要加上 ``sudo`` 命令。
请自行选择对应的 python 版本(如 ``python3``,``python2.7``)
替换上述命令中的 ``python``。
如果不报错大概就是安装成功了。
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/guide/install.rst | install.rst |
Usage - 使用方法
================
Charset - 关于字符编码
----------------------
Python 3 用户不用关心这一小节。
.. warning:: 给 Python 2 用户
为了防止我被编码的不统一问题弄得焦头烂额,zhihu_oauth 内部统一
使用 utf-8 编码。你可能需要自己处理 encode decode 之类乱七八糟的问题。
如果你只是想使用 print 打印结果的话,最好导入一下 print function,
因为 Python 2 自己的 print 会将 unicode 以 ``u'\uxxxx'`` 的方式输出,
而导入了之后就相当于使用 Python 3 的 print 方法了。
.. code-block:: python
from __future__ import print_function
Generic example - 通用示例
--------------------------
不管用什么方法,登录成功之后就可以愉快的使用了。
zhihu_oauth 的使用方法很简单,用已登录 :any:`ZhihuClient` 构造想要的对象,
然后取数据就好。
这里以 :any:`ZhihuClient.me` 为例,给一些通用的用法
Normal attr - 普通属性
~~~~~~~~~~~~~~~~~~~~~~
普通属性表示哪些通过 ``.`` 操作符能够直接取到基本数据类型的数据,例子如下:
.. code-block:: python
# import、构建 client 以及登录知乎的代码省略
me = client.me()
print('name', me.name)
print('headline', me.headline)
print('description', me.description)
print('following topic count', me.following_topic_count)
print('following people count', me.following_topic_count)
print('followers count', me.follower_count)
print('voteup count', me.voteup_count)
print('get thanks count', me.thanked_count)
print('answered question', me.answer_count)
print('question asked', me.question_count)
print('collection count', me.collection_count)
print('article count', me.articles_count)
print('following column count', me.following_column_count)
产生如下输出
.. code-block:: none
name 7sDream
headline 二次元普通居民,不入流程序员,http://0v0.link
description 关注本AI的话,会自动给你发私信的哟!
following topic count 35
following people count 101
followers count 1294
voteup count 2493
get thanks count 760
answered question 258
question asked 18
collection count 9
article count 7
following column count 11
Object attr and streaming call - 对象属性和流式调用
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
有些属性对应的是知乎类或者知乎类的列表(生成器)。
生成器可以通过 ``for ... in ...`` 进行迭代。
知乎类可以通过连续的 ``.`` 操作符进行流式调用,直到获取到基本属性。
.. code-block:: python
# 获取最近 5 个回答
for _, answer in zip(range(5), me.answers):
print(answer.question.title, answer.voteup_count)
print('----------')
# 获取点赞量最高的 5 个回答
for _, answer in zip(range(5), me.answers.order_by('votenum')):
print(answer.question.title, answer.voteup_count)
print('----------')
# 获取最近提的 5 个问题
for _, question in zip(range(5), me.questions):
print(question.title, question.answer_count)
print('----------')
# 获取最近发表的 5 个文章
for _, article in zip(range(5), me.articles):
print(article.title, article.voteup_count)
输出:
.. code-block:: none
如何想象诸如超立方体之类的四维空间物体? 10
你的第一次心动献给了 ACGN 作品中的谁? 3
大年初一差点把自己饿死在家里是一种怎样的体验?以及有没有什么建议来规划自己的日常生活? 1
有哪些歌曲色气满满? 27
作为程序员,自己在Github上的项目被很多人使用是什么体验? 32
----------
只是为了好玩儿,如何学编程? 593
计算机领域有哪些短小精悍的轮子?(仅用于教学) 268
小明打饭的问题? 198
如何写个爬虫程序扒下知乎某个回答所有点赞用户名单? 116
被盗版泛滥毁掉的行业,是如何一步一步走向消亡的? 95
----------
用户「松阳先生」的主页出了什么问题? 1
C++运算符重载在头文件中应该如何定义? 1
亚马逊应用市场的应用都是正版的吗? 0
Tkinter中event_generate创建的Event如何附加数据? 1
用Android Studio开发对电脑配置的要求? 7
----------
你们资道吗,知乎多了个新功能哟 7
谢谢你关注我呀!!! 28
【软件推荐01】Seer——给Win加上空格预览功能 13
终于寒假惹!准备开始写东西啦~ 14
吐槽 + 更新说明 + 寒假专栏征求意见稿 10
Streaming JSON - 流式 JSON
~~~~~~~~~~~~~~~~~~~~~~~~~~
另一种和知乎类很像的东西叫做 :any:`StreamingJSON`。你可以把它想像成一个 JS 对象。
如果你不熟悉 JS 的话,那就想像成一个 Python 字典好了,只是这个字典不用 ``[]``,
而是用 ``.`` 来取出数据。
.. code-block:: python
me = client.me()
locations = me.locations
print(locations)
for location in locations:
print(location.name, location.avatar_url)
输出(格式化后):
.. code-block:: none
[
{
'name':'天津',
'avatar_url':'http://pic4.zhimg.com/acad405e7_s.jpg',
'introduction':'天津,简称津,地处华北平原,自古因漕运而兴起,明永乐二年十一月二十一日(1404年12月23日)正式筑城,是中国古代唯一有确切建城时间记录的城市。经历600余年,特别是近代百年,造就了天津中西合璧、古今兼容的独特城市风貌。\xa0',
'excerpt':'天津,简称津,地处华北平原,自古因漕运而兴起,明永乐二年十一月二十一日(1404年12月23日)正式筑城,是中国古代唯一有确切建城时间记录的城市。经历600余年,特别是近代百年,造就了天津中西合璧、古今兼容的独特城市风貌。 ',
'type':'topic',
'id':'19577238',
'url':'https://api.zhihu.com/topics/19577238'
}
]
天津 http://pic4.zhimg.com/acad405e7_s.jpg
对照代码和输出,我相信你能理解什么叫做 StreamingJSON。
.. seealso:: 详细
有关 StreamingJSON 的更多资料请看 :ref:`intro_streaming_json`
Get other object - 获取其他对象
-------------------------------
除了 :any:`Me` 以外,还有很多类可供使用,比如 :any:`Answer` 可以通过
:any:`ZhihuClient.answer` 方法获取,并输出答案的一些资料:
.. code-block:: python
answer = client.answer(94150403)
print(answer.question.title)
print(answer.author.name)
print(answer.voteup_count)
print(answer.thanks_count)
print(answer.created_time)
print(answer.updated_time)
for voter in answer.voters:
print(voter.name, voter.headline)
输出如下:
.. code-block:: none
如何评价南开大学津南校区的建设质量?
7sDream
4
0
1460039289
1460088371
秦承平 莫做开山怪,莫做开山怪!
CINDY Warm♥Brave
杀马特绅少 懂礼貌的好周绅
codefalling https://github.com/CodeFalling
所有可用的类请转到 :ref:`知乎类文档 <for_user_zhcls>` 进行查看,用法均类似。
除了以上的使用方式外,:any:`ZhihuClient` 还提供了一个通用的,通过 URL 的创建知乎类对象的方法。
比如上述代码中的
``answer = client.answer(94150403)``
可以改写成
``answer = client.from_url('https://www.zhihu.com/question/42248369/answer/94150403')``
传递不同的 URL 可以获得不同的对象以供使用。
.. seealso:: 另见
:any:`ZhihuClient.from_url`
Backup & Save - 备份和保存
--------------------------
zhihu_oauth 还提供了简单地备份(保存)答案和文章的功能。以答案为例:
.. code-block:: python
question = client.question(35166763)
print(question.title)
for answer in question.answers:
print(answer.author.name, answer.voteup_count)
answer.save(question.title)
输出:
.. code-block:: none
Dota2有什么你知道的小技巧?来恶补一下!?
呵呵 341
赵小胖 523
隔壁小岚哥 69
曹凌群 51
匿名用户 43
# many many author name
匿名用户 0
托托 0
结果:
.. figure:: /images/save-answer.png
.. seealso:: 保存
答案保存功能的详细参数参见 :any:`Answer.save`
文章保存功能的详细参数参见 :any:`Article.save`
What's Next - 下一步
--------------------
这里只用 :any:`Me` 类作为示例,其他类的用法其实也类似。
.. seealso:: 有那些类可以使用?
请看 :ref:`知乎相关类文档 <for_user_zhcls>`
用 :any:`ZhihuClient` 的生成这些对象的方法请看 :doc:`这里 <../for-user/client>`
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/guide/use.rst | use.rst |
Login - 登录
============
zhihu_oauth 中对用户来说最重要的类就是 :any:`ZhihuClient` 了。
想要获取到知乎的数据,必须先创建 :any:`ZhihuClient` 对象并登录。
以下是几种常用的登录方法。
.. warning:: 关于手机号登录
使用手机号登录需要在手机号前加 ``+86`` 前缀。
使用手机号登录需要在手机号前加 ``+86`` 前缀。
使用手机号登录需要在手机号前加 ``+86`` 前缀。
说三遍。
use login - 使用 login
--------------------------
.. warning:: 小心
这种方式在知乎要求输入验证码时会引发 NeedCaptchaException,需要进行处理
.. seealso::
:any:`ZhihuClient.login`
.. code-block:: python
from zhihu_oauth import ZhihuClient
from zhihu_oauth.exception import NeedCaptchaException
client = ZhihuClient()
try:
client.login('email_or_phone', 'password')
except NeedCaptchaException:
# 保存验证码并提示输入,重新登录
with open('a.gif', 'wb') as f:
f.write(client.get_captcha())
captcha = input('please input captcha:')
client.login('email_or_phone', 'password', captcha)
use login_in_terminal - 使用 login_in_terminal
----------------------------------------------
第二种方式,使用 :any:`login_in_terminal` 方法,此方法可以无参数调用,
将会在终端中提示用户输入用户名和密码。
也可以将用户名和密码作为参数,此时将不会提示输入。
此方式在遇见知乎需要验证码时会自动将验证码保存并提示用户输入,不用用户处理。
.. seealso::
:any:`ZhihuClient.login_in_terminal`
.. note::
为节省篇幅,``import`` 语句和 构建 :any:`ZhihuClient` 类的语句均省略,下同。
.. code-block:: python
client.login_in_terminal() # or ('[email protected]', 'password')
Use load_token - 使用 load_token
--------------------------------
第三种方式,载入 token 文件。
.. seealso::
:any:`ZhihuClient.load_token`
.. code-block:: python
client.load_token('filename')
Save login session - 保存登录会话
---------------------------------
有 load 当然也就有 save。
在成功登录之后,可以使用 :any:`save_token` 方法保存登录状态,留着以后 load。
.. seealso::
:any:`ZhihuClient.save_token`
.. code-block:: python
# 必须在 client 已经处于登录状态时才能使用
client.save_token('token.pkl')
Recommended way - 建议的用法
----------------------------
综上所述,如果你的代码运行在终端环境下,我建议这样处理登录环节
.. code-block:: python
# coding=utf-8
from __future__ import unicode_literals, print_function
import os
from zhihu_oauth import ZhihuClient
TOKEN_FILE = 'token.pkl'
client = ZhihuClient()
if os.path.isfile(TOKEN_FILE):
client.load_token(TOKEN_FILE)
else:
client.login_in_terminal()
client.save_token(TOKEN_FILE)
如果你在编写一个 GUI 程序的话,请自行处理 token 文件。
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/guide/login.rst | login.rst |
一些辅助函数
============
生成器护盾(误
--------------
由于知乎有很多列表型数据,比如用户的关注者,问题的答案,专栏的文章,等等。这些数据在
知乎的 API 里是通过统一的分页逻辑来一段一段的发送的。(详细说明请看::ref:`generator`)
所以在用 ``for...in loop`` 获取这些数据的时候由于网络或者知乎的原因可能出现异常,
但是因为是生成器,如果你在外部 ``try...catch`` 处理异常的话,就又需要从头开始获取数据了……
所以知乎的分页数据生成器提供了 jump 函数,方便你处理完异常之后跳到上次的地方继续获取数据……(见 :any:`BaseGenerator.jump`)
但是这样还是很麻烦 =。=,所以我写了个辅助函数 shield 来处理这个问题。
如果下面的说明和例子看不懂的话……点开右边的 ``[源代码]`` 按钮,看看源码你就懂了……
.. autofunction:: zhihu_oauth.helpers.shield
.. autodata:: zhihu_oauth.helpers.SHIELD_ACTION
时间戳转换
----------
.. autofunction:: zhihu_oauth.helpers.ts2str
用户动态格式化
--------------
.. autodata:: zhihu_oauth.helpers.act2str
.. autoclass:: zhihu_oauth.helpers.ActivityFormatter
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-user/helpers.rst | helpers.rst |
Search - 搜索
-------------
知乎的搜索功能通过 :any:`ZhihuClient.search` 方法提供。
目前知乎提供了 6 个搜索方式, :any:`SearchType` 枚举常量表示这六种方式,作为参数传递给 :any:`ZhihuClient.search` 方法。
方式与枚举常量对应关系如下:
.. automodule:: zhihu_oauth.zhcls.search
:members: SearchType
搜索的常见用法:
.. code-block:: Python
client.search('程序', SearchType.COLUMN)
client.search('7sDream', SearchType.PEOPLE)
除了 ``SearchType.GENERAL`` 方式,其他方式的搜索都会返回 :any:`SearchResult` 对象的迭代器。
可用属性如下:
.. autoclass:: zhihu_oauth.zhcls.search.SearchResult
:members:
:undoc-members:
:special-members: __init__
所以一般这样用:
.. code-block:: Python
for result in client.search('程序', SearchType.COLUMN):
column = result.obj
print(column.title, column.author.name)
# do something with `column`
结果: ::
程序员实验室 Wayne Shi
程序员达达 达达
程序人生 hi大头鬼hi
程序员的自我修养 luckystar
反转程序猿 大房
程序员作战手册 Jim Jin
红客联盟 小食妹
非著名程序员 loonggg
其他类型的搜索的用法也类似,就不赘述了。
而 ``SearchType.GENERAL`` 方式的搜索也是迭代器,但可能返回 :any:`SearchResult` 和 :any:`SearchResultSection` 对象。
:any:`SearchResultSection` 对象除了自身有一些属性(见下)之外,本身也是个 :any:`SearchResult` 的迭代器:
.. autoclass:: zhihu_oauth.zhcls.search.SearchResultSection
:members:
:special-members: __init__
这样用起来就有点麻烦,你得判断迭代器返回的是那种对象,大概就要这样写:
.. code-block:: Python
for result in client.search("panda", search_type=SearchType.GENERAL):
if isinstance(result, SearchResultSection):
print(result.type, "search result list:")
for r in result:
# do something with r
print(r.obj)
else:
# result is SearchResult object
r = result
# do something with r
print(r.highlight_title, r.highlight_desc)
print(r.obj)
print('-' * 20)
结果如下: ::
topic search result list:
<zhihu_oauth.zhcls.topic.Topic object at 0x7f19e9c1ce48>
--------------------
column search result list:
<zhihu_oauth.zhcls.column.Column object at 0x7f19e9c1ce48>
--------------------
people search result list:
<zhihu_oauth.zhcls.people.People object at 0x7f19e9c1ce48>
<zhihu_oauth.zhcls.people.People object at 0x7f19e9c1ceb8>
<zhihu_oauth.zhcls.people.People object at 0x7f19e9c1ce80>
--------------------
你有哪些收藏来反复看的<em>大熊猫</em>(<em>panda</em>)的图片? <em>熊猫</em><em>panda</em>的尾巴是白色的白色的白色的,重说三,看到好多<em>熊猫</em>玩偶都把<em>熊猫</em>尾巴做成黑色的,就连功夫<em>熊猫</em>里阿宝的尾巴都是黑色的,我觉得有必要科普一下哦,对了,图片来自ipanda,
<zhihu_oauth.zhcls.answer.Answer object at 0x7f19e9c1cef0>
--------------------
如何评价<em>熊猫</em>tv狼人杀新节目<em>panda</em>kill? 10月22日局更新.就第一集而言个人分析仅供参考.首先十二位玩家一一点评.1号鼠大王:比上一季进步了,当民的时候站边,发言都阳光了很多,没有被抗推就是不错的进步,但是当狼的时候依然会紧张状态不稳,第三
<zhihu_oauth.zhcls.answer.Answer object at 0x7f19e9c1cef0>
--------------------
# ... 未完 ...
由于这样写不是很方便,所以提供了 :any:`ZhihuClient.search_unfold` 方法,他会自动将 :any:`SearchResultSection` 展开,生成 :any:`SearchResult` 型的对象,用法:
.. code-block:: Python
for result in client.search_unfold("panda"):
# result is SearchResult object
r = result
print(r.highlight_title, r.highlight_desc)
print(r.obj)
print('-' * 20)
结果: ::
<zhihu_oauth.zhcls.topic.Topic object at 0x7f6ffa42bf60>
--------------------
我吃掉了一辆奔驰
<zhihu_oauth.zhcls.column.Column object at 0x7f6ffa42bf60>
--------------------
<zhihu_oauth.zhcls.people.People object at 0x7f6ffa42bf60>
--------------------
<zhihu_oauth.zhcls.people.People object at 0x7f6ffa42bf60>
--------------------
<zhihu_oauth.zhcls.people.People object at 0x7f6ffa42bf60>
--------------------
你有哪些收藏来反复看的<em>大熊猫</em>(<em>panda</em>)的图片? <em>熊猫</em><em>panda</em>的尾巴是白色的白色的白色的,重说三,看到好多<em>熊猫</em>玩偶都把<em>熊猫</em>尾巴做成黑色的,就连功夫<em>熊猫</em>里阿宝的尾巴都是黑色的,我觉得有必要科普一下哦,对了,图片来自ipanda,
<zhihu_oauth.zhcls.answer.Answer object at 0x7f6ffa42bf60>
--------------------
如何评价<em>熊猫</em>tv狼人杀新节目<em>panda</em>kill? 10月22日局更新.就第一集而言个人分析仅供参考.首先十二位玩家一一点评.1号鼠大王:比上一季进步了,当民的时候站边,发言都阳光了很多,没有被抗推就是不错的进步,但是当狼的时候依然会紧张状态不稳,第三
<zhihu_oauth.zhcls.answer.Answer object at 0x7f6ffa42bef0>
--------------------
如何评价11.5 <em>panda</em>kill 各位的表现? 其实这一期我感觉没有分析的必要,因为这一期总体上就是上一集坏现象进一步恶化后形成的的"进阶版大乱斗",重复的话我觉得没必要再说了,这里随手放个上一期回答的链接~如何评价10.29 pandakill
<zhihu_oauth.zhcls.answer.Answer object at 0x7f6ffa42bf28>
--------------------
# ... 未完 ... 最前面那些空行是因为 `highlight_title` 和 `highlight_desc` 属性都是空。
推荐在综合搜索时使用 :any:`ZhihuClient.search_unfold` 方法,注意,此方法不支持设置搜索类型,也就是说只支持综合搜索。
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-user/search.rst | search.rst |
Whisper & Message - 私信会话和私信消息
======================================
.. topic:: 注意
请先查看 :doc:`说明 <intro>` 了解一下知乎相关类的文档的阅读方法。
否则你会看不懂下面的东西的…………
Example - 用法示例
------------------
.. code-block:: python
# 省略登录步骤
me = client.me()
for whisper in me.whispers:
print("Whisper with", whisper.who.name)
print('Allow_reply', whisper.allow_reply)
print('Unread count', whisper.unread_count)
print('Updated time', whisper.updated_time)
print('Snippet', whisper.snippet)
print('-------------------------------------')
for message in whisper.messages:
print(message.format("{sender} to {receiver}: {content}")
print('-------------------------------------')
Class Ref - 类文档
------------------
.. automodule:: zhihu_oauth.zhcls.whisper
:members:
:undoc-members:
.. automodule:: zhihu_oauth.zhcls.message
:members:
:undoc-members:
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-user/zhcls/whisper_and_message.rst | whisper_and_message.rst |
Intro - 知乎类文档阅读说明
==========================
Usage - 使用方法
----------------
所有知乎类都不建议手动构建,而应该使用 :any:`ZhihuClient` 提供的相应的
生成方法来创建。
如想得到一个答案对象,请使用 :any:`ZhihuClient.answer` 方法,其余类似。
每个类所需要的 ID 参数如何获取请参考 :any:`ZhihuClient` 类对应方法的文档。
.. seealso::
:any:`ZhihuClient`
.. _intro_normal_attr:
Normal attr - 常规属性
----------------------
如果一个属性没有说明,则表示:
- 它的名称已经把自己描述的足够清楚了。
- 如果它是个单数,表示直接通过 ``.`` 操作符,
能直接获取到基本类型 ``(str, int, float, bool)`` 的数据,或另一个知乎对象。
.. note:: 举例
- :any:`Answer.voteup_count` 表示一个答案获得的赞同数(很明显是个 ``int``)。
- :any:`Answer.author` 表示答案的作者(很明显应该是 :class:`.People` 类)。
.. _intro_streaming_json:
Streaming JSON attr - 流式 JSON 属性
------------------------------------
如果我说明了一个属性的常见返回值,则表示
- 它返回的是一个 :any:`StreamingJSON` 对象,可以想像成一个 JS Object。
- 它的属性可通过 ``.`` 和 ``[]`` 操作符进行遍历。
.. note:: 举例
:any:`Answer.suggest_edit` 的常见返回值是
.. code-block:: python
{
'status': True,
'title': '为什么回答会被建议修改',
'tip': '作者修改内容通过后,回答会重新显示。如果一周内未得到有效修改,回答会自动折叠',
'reason': '回答被建议修改:\\n不宜公开讨论的政治内容',
'url': 'zhihu://questions/24752645'
}
表示我们可以
- 通过 ``answer.suggest_edit.status`` 取到 ``True``
- 通过 ``answer.suggest_edit.reason`` 取到 ``'回答被建议修改:\n不宜公开讨论的政治内容'``
.. note:: 再举例
:any:`People.locations` 的常见返回值是
.. code-block:: python
[
{
'introduction': '天津,简称津,地处华北平原,balabala,
'url': 'https://api.zhihu.com/topics/19577238',
'avatar_url': 'http://pic4.zhimg.com/acad405e7_s.jpg',
'excerpt': '天津,简称津,地处华北平原 balabalabala',
'type': 'topic',
'name': '天津',
'id': '19577238',
},
],
最外面是一个列表表示我们可以迭代它:
.. code-block:: python
for location in people.locations:
print(location.name, location.excerpt)
.. _tips-for-conflict-with-keyword:
.. note:: 提示
如果某个属性和 Python 的关键字冲突,请在属性名后面加上下划线 ``_`` 即可。
对了,如果你不喜欢用 ``.`` 操作符,而偏爱标准dict和list的操作模式,你可以使用
:any:`StreamingJSON.raw_data` 方法获取到内部数据。
.. _intro_generator_attr:
Generator attr - 生成器属性
---------------------------
如果一个属性名是复数,又没有给出常见返回值,那么它是生成器属性。
这表示直接通过 ``.`` 操作符,能获取到一个生成器,生成它所表示的知乎对象列表。
.. note:: 举例
- :any:`Answer.voters` 表示答案的所有点赞者(:any:`People` 对象的生成器)。
- :any:`People.answers` 表示用户的所有答案(:any:`Answer` 对象的生成器)。
可以通过 ``for in loop`` 对它们进行迭代:
.. code-block:: python
for answer in me.answers:
print(answer.question.name, answer.voteup_count
某些属性可以通过 order_by 来指定排序,但是一般用不到。
目前发现的的用法见::any:`BaseGenerator.order_by`。
Specification & Compatible - 规范 & 兼容
----------------------------------------
这个库遵循以下原则:
- 点赞一律用 vote,点赞者用 voter
- 收藏夹用 collection,收藏用 collect
- 某某某的数量一律用 ``xxx_count``,``xxx`` 使用单数形式
- 某某某的生成器一律用 ``xxxs``,即 ``xxx`` 的复数形式
例: :any:`Column.article_count` 专栏的文章数
例: :any:`Column.articles` 专栏所有文章的生成器
知乎返回的 JSON 大部分都很统一,比如用词的单复数,
用 vote 还是 like 表示点赞,等等这些。
但是就是有那么几个不合群。
如果你看到某个类有两个差不多的属性,他们的差别只是
- 某一个属性多了个 s
(比如 :any:`Column.article_count` 和 :any:`Column.articles_count`)
- 两个属性意思相同
(比如 :any:`People.favorited_count` 和 :any:`People.collected_count`)
那么:
- 有 s 的版本是我为了兼容知乎的原始数据加上的别名。
- 其中一个属性是我强行修改成符合我自己规范的名字。
这种做法只是为了兼容知乎原始数据,其实两个方法无任何区别(当然,除了名字)。
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-user/zhcls/intro.rst | intro.rst |
Game walkthrough - 游戏攻略
===========================
本来想的是写一篇博客的,但是果然还是不要太张扬,写在文档里算了。
.. warning:: 关于准备工作
貌似知乎不知道怎么更改了 SSL 的加密方法,使用 Packet Capture 会影响到知乎 APP 的正常运作,现在推荐
使用 Fiddler 代替 Packet Capture,并设置 SSL 解密方法为 `<client>,ssl3,tls1.2`。
或者使用 Burp Suite 也可以。
Instruction - 教学关
----------------------
你需要:
- 一台 Android 设备(我用的是 Nexus 7, 6.0.1,CM 13)
- 一台电脑,系统随意,有 Android Studio 最好,没有也行
- 支持 HTTPS 的抓包工具(我用的是 Android 上的 Packet Capture)
- APK 反编译工具(我用的是 jadx)
在 Android 设备上安装上知乎客户端,如果已经安装了的话就强行停止,
然后清除数据和缓存。
Start the game - 开始游戏
-------------------------
打开 Packet Capture,首次运行的话应该会让你安装证书(为了解密 SSL 流量)。
如果这一步遇到什么凭证问题,设置一下 Android 的锁屏密码什么的大概就可以,
不行的话就 Google 一下你的设备型号 + 问题描述,自己去解决吧。
证书安装完毕后点击 Packet Capture 的右上角绿三角开始抓包。
打开知乎客户端,按照正常的流程登录。
切回 Packet Capture,点开抓包列表,大概是这样一个情况。
.. image:: /images/explore-1.png
往下找,直到找到第一次知乎 APP 与 60 开头 IP 的有数据 SSL 通信,大概在最下面。
.. image:: /images/explore-2.png
.. _mission_one:
Mission 1 : Need Captcha? - 第一关:需要验证码么?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
点开最早的一次通信,然后点击右上角的 HTML 图标。忽略掉 recommendation,setting,
以及 topstory(估计是封面图) 等请求,找到第一个关键请求:captcha,如下:
.. image:: /images/explore-3.png
上图中重要的地方我都用红色框起来了。从上到下编号为 0-5 好了。
0 + 3 可以看出请求的地址是:https://api.zhihu.com/captcha
2 是表示当前 APP 和手机的一些信息的,是不是必须我没测试,但我是模拟了这几个
header 的。参见::any:`ImZhihuAndroidClient`。
4 很重要,里面记录的是你的验证码会话。如果在后面登录的时候不带这些 Cookies 的话,
服务器会提示你验证码会话不正确之类的错误。当然辣,requests 的 Session 会自动处理
Cookies 的,不用太在意。
1 是最重要的验证信息。如果没有这个 header 项,
直接请求 API 的话服务器是会回给你错误信息的。
比如直接在浏览器里访问的话会是这样:
.. image:: /images/explore-4.png
正确的返回值应该是像上上图的框 5 那样,一个带有 ``show_captcha`` 键的 JSON 数据。
键值的意义是 「下次登录是否需要输入验证码」。
由于这里是 False,也为了简化登录流程的介绍,下面都以 ``show_captcha=False`` 为前提。
(如果你对验证码的处理方式很感兴趣,恭喜你,发现了 :ref:`隐藏关 <hidden_mission>`)
Mission 2 : Login - 第二关:登录
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
按照第一关的方式轻车熟路的找到第二个请求 sign_in(也许要返回到之前的通信列表界面,
点进另一个列表项):
.. image:: /images/explore-5.png
好吧,很明显的两坨红色就是我的邮箱和密码了,不要管它们,继续看那些红框。
0:这是一个 POST 请求,地址是 https://api.zhihu.com/sign_in
1:和请求验证码的时候一致,其实没登录的情况下这个参数都一样。
由于这个验证在没登录的情况下都是存在的,所以我把它变成了一个类,
参见::any:`BeforeLoginAuth`
2:这些就是登录参数了。
在讲参数之前我们来看看 Cookies,你会发现它和询问是否需要验证码之后服务器返回的
Cookies 一样。注意这里一定要匹配,如果不匹配则登录操作是不会通过的。
然前面也说过了 Session 会自动处理的,所以也不用太在意。
好了来说重头戏,登录的参数。我把它用 Python 字典的形式写在下面(调整了一下参数位置):
.. code-block:: python
{
'username': '[email protected]',
'password': '123456',
'client_id': '8d5..............',
'grant_type': 'password',
'source': 'com.zhihu.android',
'timestamp': '1460165233',
'signature': '8ad..............',
}
前面两项不用多说,用户名和密码。我只试了邮箱,但是手机号也可以。
第三项叫做 ``client_id``,你可以和框 1 对比一下,就会发现其实他俩是一样的。
这其实就是 OAuth 里需要申请的,表示一个应用的 APPID 值,
如果你开发过微博的第三方应用,或者在你建的网站上使用了第三方登录功能,
应该不会对这个概念感到陌生。所有的(这个版本的) Android
知乎客户端的这个值都是一样的。
第四项是……恩,你大概当作授权类型把。``password`` 表示我们通过提供用户账户的密码
来获取用户令牌。其他的方式大概还有 OAuth 登录(就是像微博那样弹个小网页让你登录),
第三方登录(通过微博,QQ什么的),这里我们只讲密码型登录。
第五项叫做 source,表示登录请求的来源,可以看出值其实就是 APK 的包名。
第六项 timestamp,时间戳,表示当前时间。用来使每次登录请求的基础数据都不同,
方便 signature 签名加密用的。(在下一小节会详细介绍的)
最后一项是最重要的,请求的签名。如果你在知乎 APP 上多试几次,
就会发现这个值每次都不同。它是用来保证安全性的,因为你既不知道签名的计算方法,
又不知道加密的参数,所以你没法伪造登录请求。
下一小节介绍的就是签名加密算法的探寻过程。
Mission 3: Encrypted signature - 第三关:被加密的签名
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
为了破解这个签名,我费了挺大功夫的,大概一晚上加一早上吧……
这里就省略掉我试过的错误的方法(虽然这些试错的价值才是最大的),直击正确的途径。
首先我们用 jadx 拆掉 APK(记得打开反混淆),导入 Android Studio。(没有 Android Studio
的话也可以在 jadx 里直接查看)。导入之后大概如下图:
.. image:: /images/explore-6.png
然后打开 ``/com/zhihu/android/api/module/Authorisation.java``
(别问我是怎么知道的,我当然是一点一点找的啊……我又没有文章可以看)。
翻到 ``createBaseAuthorisation`` 这个方法,代码如下:
.. raw:: html
pre {
white-space: pre-wrap;
}
.. code-block:: java
:linenos:
:emphasize-lines: 6-12
private static Authorisation createBaseAuthorisation(Context context, GrantType grantType) {
String timestamp = String.valueOf(System.currentTimeMillis() / 1000);
Authorisation authorisation = new Authorisation();
authorisation.clientId = "8d5227e0aaaa4797a763ac64e0c3b8";
authorisation.source = SystemUtils.m18405c(context);
authorisation.signature = b.a(
grantType +
"8d5227e0aaaa4797a763ac64e0c3b8" +
authorisation.source +
timestamp,
"ecbefbf6b17e47ecb9035107866380"
);
authorisation.timestamp = timestamp;
return authorisation;
}
注意被标注的 6 到 12 行,这就是签名的加密算法。
我们可以看到,有一个叫做 ``b.a`` 的函数,接受两个参数,第一个是一堆字符串的拼接,
第二个是固定的字符串(其实就是 客户端的 SECRET)。
通过上一段对参数的解释,我们可以看到,除了 ``timestamp`` 之外,其他的都是固定的,
所以一第一个参数就是:
``“password8d5227e0aaaa4797a763ac64e0c3b8com.zhihu.android”``
后面再加上 ``timestamp`` 的值,
然后,最重要的来了,加密方法是什么。
我尝试了把第二个参数拼接到第一个参数的末尾和开头,然后再分别用 md5,sha1,先 md5
再 sha1,先 sha1 再 md5,以第二个参数为盐的 md5 和 sha1。反正都不对……
然后我就陷入了深深的迷茫中。洗把脸冷静了一会之后我想……知乎还算个比较跟潮流的公司,
去查查 Google 的 OAuth 文档说不定能有收获。(别问我当时怎么想的!
我也不知道为啥就觉的知乎应该会跟着 Google 的流程走……)
然后我找到了 Google OAuth 的签名文档(对 Google 的文档感兴趣的话点\ `这里 <https://developers.google.com/maps/documentation/static-maps/get-api-key#url->`_)
于是我就猜想是不是知乎也是用的 hmac.sha1 叻……然后就成功了,嗯,说起来就是这么简单……
签名代码参见::any:`login_signature`。
Last hint: Get token - 最后一击:获取令牌
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
在了解了签名加密算法之后,剩下的工作就很简单了,模拟成客户端把登录请求发过去就行。
以下是客户端返回的结果。
.. image:: /images/explore-7.png
由于返回结果涉及到账户安全信息,所以马赛克比较多,凑合着看哈。
最重要的是那个 ``access_token`` 项,登录后的每个请求都需要这个令牌进行验证。
阿,对了,那个 cookies 里的东西貌似并不是很重要,我没有手动添加进 Session,
所有的功能也能成功完成。
有关令牌的保存和使用,请看 :any:`ZhihuToken` 类 和 :any:`ZhihuOAuth` 类。
下面是一次登录传成功后的一次 API 请求头:
.. image:: /images/explore-8.png
注意红框部分的 ``Bearer``,这是 OAuth2 的一种 token type 方式,
如果你想了解它的定义,可以看看 `RFC 6750 <https://tools.ietf.org/html/rfc6750>`_。
后面那被我打了马赛克的地方就是上上图中的 ``access_token`` 值。
你用有效的 ``access_token`` 进行验证,服务器才会允许你获取数据。
服务器的回复我就不贴了。
至此,知乎 APP 的 OAuth 过程就解析完毕辣!下面的附加关卡是给兴趣浓厚的同学们准备的。
.. _hidden_mission:
Hidden mission: Process captcha - 隐藏关卡:验证码处理
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:ref:`mission_one` 里说到了验证码的问题。
知乎 OAuth 的验证码策略是这样的。
1. 每次登录前必须使用 ``GET`` 方式调用 ``captcha`` API 获取自己此次登录需不需要验证码。
知乎的服务器根据你最近的登录频繁程度,上次登录结果等来决定是否需要你输入验证码。
不管最后结果是需要还是不需要,服务器会在数据库里存你的验证码 Session 然后用
``Set-Cookies HTTP header`` 的方式给你 ``Session ID``。
2. 如果需要验证码则继续往下执行,不需要则转 6。
3. 请求使用 ``PUT`` 方式调用 ``captcha`` API,(记得带上上一步发给你的 Cookies)
获取到的是 base64 编码的一张 gif 图片。
4. 使用 ``POST`` 方式调用 ``captcha`` API,``data`` 设置为 ``{'captcha'='abcd'}``
(当然也得记得带上 Cookies)
5. 如果验证码输入正确,服务器会在你的验证码 Session 里写上验证成功。如果输入失败
你就得重新转到步骤 3,成功的话继续往下。
6. 用正常方式使用 ``sign_up`` API 登录即可(带上 Cookies)。
知乎所有关于验证码的操作都使用同一个 API,用不同的 HTTP Verb 把功能区分开,我觉得挺有意思的。
我代码里有关登录和注册码相关逻辑处理,请看下面几个函数:
- :any:`ZhihuClient.login`
- :any:`ZhihuClient.need_captcha`
- :any:`ZhihuClient.get_captcha`
- :any:`ZhihuClient.login_in_terminal`
Finale - 大结局
---------------
好啦,游戏攻略就写到这里……快去自己玩玩呗?
(完)
2016.04.09 初稿。
2016.08.30 修改一些格式和用词小问题。
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-dev/oauth/game.rst | game.rst |
.. _generator:
Paging data process - 多页数据处理
==================================
intro - 介绍
------------
知乎有很多列表型数据,比如问题的答案,我的关注者,专栏的文章,等等。
这些数据在知乎的 API 获取的时候是间断的,比如,每次获取 20 个,在手机 APP
上就体现为继续上划加载更多。这些数据处理的逻辑类似,数据的格式也类似,
只是最后列表项中的对象不同。
常见的多页数据的 JSON 如下:
.. code-block:: python
{
'paging': {
'previous': 'previous page url' ,
'next': 'next page url',
'is_end': False, # or True
},
'data': [
{
'type': 'answer',
'id': 'xxxx',
'created_time': '14xxxxx'
# many attr
},
{
# like last one
},
# many many objects
],
}
为了 DYR,这些逻辑被抽象成 :any:`BaseGenerator` 基类,其他类通过继承基类,
来实现创建不同对象的功能。
效果见::ref:`intro_generator_attr`
Base class - 基类
-----------------
.. autoclass:: zhihu_oauth.zhcls.generator.BaseGenerator
:members:
:undoc-members:
:private-members:
:special-members: __init__, __getitem__, __next__, __iter__
.. autoclass:: zhihu_oauth.zhcls.generator.FilterableGenerator
:members:
:undoc-members:
:private-members:
:special-members:
Childs - 子类
---------------
:any:`BaseGenerator` 和 :any:`FilterableGenerator` 的子类才是真正可以使用的类。它们重载了 ``_build_obj`` 方法。
因其他结构无变化,故文档省略。
.. automodule:: zhihu_oauth.zhcls.generator
:members:
:exclude-members: BaseGenerator, FilterableGenerator
:undoc-members:
:private-members:
:special-members: __init__
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-dev/zhcls/generator.rst | generator.rst |
Base - 基础类
=============
Base 类是所有知乎数据相关类的基类,网络请求仅在 :any:`Base` 类和
:any:`BaseGenerator` 类中实现。
:any:`Base` 类提供了可以重载的函数
函数让子类可以自定义网络请求的 URL,method,params,data 等参数。
.. inheritance-diagram:: zhihu_oauth.zhcls.base zhihu_oauth.zhcls.answer zhihu_oauth.zhcls.article zhihu_oauth.zhcls.collection zhihu_oauth.zhcls.column zhihu_oauth.zhcls.comment zhihu_oauth.zhcls.people zhihu_oauth.zhcls.me zhihu_oauth.zhcls.question zhihu_oauth.zhcls.topic
:parts: 1
.. automodule:: zhihu_oauth.zhcls.base
:members:
:undoc-members:
:special-members: __init__
:private-members:
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-dev/zhcls/base.rst | base.rst |
Streaming JSON - 流式 JSON
==========================
本模块用于处理在 知乎 API 返回的 JSON数据里的一部分流式 JSON 数据。
流式 JSON 属性的含义请看\ :ref:`这里 <intro_streaming_json>`。
如 :any:`People.locations`,:any:`Question.suggest_edit` 等。
Class intro - 类的介绍
----------------------
.. autoclass:: zhihu_oauth.zhcls.streaming.StreamingJSON
:members:
:undoc-members:
:special-members: __init__, __getitem__, __getattr__, __iter__
Ancillary decorator - 配套装饰器
--------------------------------
下面这个装饰器就是各知乎类中用于标明哪些是流式 JSON 属性的,
类里的定义只是为了方便构建文档。真正返回数据的操作均由这一装饰器完成。
.. autofunction:: zhihu_oauth.zhcls.streaming.streaming
| zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-dev/zhcls/stream.rst | stream.rst |
from __future__ import print_function, unicode_literals
import base64
import getpass
import itertools
import os
import warnings
import requests
import requests.packages.urllib3 as urllib3
from .exception import (
UnexpectedResponseException, NeedCaptchaException, MyJSONDecodeError
)
from .helpers import shield
from .oauth.before_login_auth import BeforeLoginAuth
from .oauth.setting import (
CAPTCHA_URL, LOGIN_URL, LOGIN_DATA, CLIENT_ID, APP_SECRET
)
from .oauth.token import ZhihuToken
from .oauth.utils import login_signature
from .oauth.zhihu_oauth import ZhihuOAuth
from .setting import DEFAULT_CAPTCHA_FILENAME, ADAPTER_WITH_RETRY
from .utils import need_login
from .zhcls.generator import generator_of, SearchResultGenerator
from .zhcls.search import search_type_to_t, SearchResultSection, SearchType
from .zhcls.streaming import StreamingJSON
from .zhcls.urls import (
LIVE_ENDED_URL,
LIVE_ONGOING_URL,
LIVE_TAGS_URL,
SEARCH_API_URL,
)
from .zhcls.utils import zhihu_obj_url_parse
__all__ = ['ZhihuClient']
try:
# noinspection PyShadowingBuiltins,PyUnresolvedReferences
input = raw_input
except NameError:
pass
try:
# noinspection PyUnresolvedReferences
bs64decode = base64.decodebytes
except AttributeError:
# for python 2
# noinspection PyDeprecation
bs64decode = base64.decodestring
class ZhihuClient:
def __init__(self, client_id=None, secret=None):
"""
知乎客户端,这是获取所有类的入口。
:param str|unicode client_id: 客户端 ID。
:param str|unicode secret: 客户端 ID 对应的 SECRET KEY。
:rtype: :class:`.ZhihuClient`
"""
self._session = requests.session()
# remove SSL Verify
self._session.verify = False
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Add auto retry for session
self._session.mount('http://', ADAPTER_WITH_RETRY)
self._session.mount('https://', ADAPTER_WITH_RETRY)
# client_id and secret shouldn't have default value
# after zhihu open api
self._client_id = client_id or CLIENT_ID
self._secret = secret or APP_SECRET
self._login_auth = BeforeLoginAuth(self._client_id)
self._token = None
def need_captcha(self):
"""
.. note::
一般来说此方法不需要手动调用。
在调用 :meth:`.login` 时捕获 :class:`.NeedCaptchaException` 即可。
而 :meth:`.login_in_terminal` 会自动处理需要验证码的情况。
:return: 下次登录是否需要验证码。
:rtype: bool
:raise: :class:`.UnexpectedResponseException` 知乎返回的数据和预期格式不符。
"""
res = self._session.get(CAPTCHA_URL, auth=self._login_auth)
try:
j = res.json()
return j['show_captcha']
except (MyJSONDecodeError, KeyError):
# noinspection PyTypeChecker
raise UnexpectedResponseException(
CAPTCHA_URL, res,
'a json data with show_captcha item'
)
def get_captcha(self):
"""
:return: 如果需要验证码,则返回 bytes 型验证码,不需要则返回 None。
:rtype: None | bytes
:raise: :class:`.UnexpectedResponseException` 知乎返回的数据和预期格式不符
"""
if self.need_captcha():
res = self._session.put(CAPTCHA_URL, auth=self._login_auth)
try:
j = res.json()
return bs64decode(j['img_base64'].encode('utf-8'))
except (MyJSONDecodeError, ValueError, KeyError):
raise UnexpectedResponseException(
CAPTCHA_URL,
res,
'a json string contain a img_base64 item.'
)
return None
def login(self, username, password, captcha=None):
"""
登录知乎的主要方法。
.. warning:: 关于手机号登录
手机号登录时请在手机号前加上 ``+86``
:param str|unicode username: 邮箱或手机号。
:param str|unicode password: 密码。
:param str|unicode captcha: 验证码,可以为空。
:return: 二元元组,第一个元素表示是否成功,第二个元素表示失败原因。
:rtype: tuple(bool, str)
:raise: :class:`.NeedCaptchaException` 此次登录需要验证码
"""
if captcha is None:
try:
if self.need_captcha():
raise NeedCaptchaException
except UnexpectedResponseException as e:
return False, str(e)
else:
res = self._session.post(
CAPTCHA_URL,
auth=self._login_auth,
data={'input_text': captcha}
)
try:
json_dict = res.json()
if 'error' in json_dict:
return False, json_dict['error']['message']
except (MyJSONDecodeError, ValueError, KeyError) as e:
return False, str(e)
data = dict(LOGIN_DATA)
data['username'] = username
data['password'] = password
data['client_id'] = self._client_id
login_signature(data, self._secret)
res = self._session.post(LOGIN_URL, auth=self._login_auth, data=data)
try:
json_dict = res.json()
if 'error' in json_dict:
return False, json_dict['error']['message']
else:
self._token = ZhihuToken.from_dict(json_dict)
self._session.auth = ZhihuOAuth(self._token)
return True, ''
except (MyJSONDecodeError, ValueError, KeyError) as e:
return False, str(e)
def login_in_terminal(self, username=None, password=None,
use_getpass=True, captcha_filename=None):
"""
为在命令行模式下使用本库的用户提供的快捷登录方法。
在未提供 username 或 password 参数时会在终端中请求输入。
.. note:: 此方法会自动处理验证码需要验证码情况。
:param str|unicode username: 邮箱或手机号。
:param str|unicode password: 密码。
:param bool use_getpass: 输入密码时是否使用密码模式(不回显输入字符),默认为 True。
提供此参数是因为在 Windows 环境下 IDE 的控制台可能由于某些原因,getpass 无法被
正常使用,此时提供 False 参数即可。(new in version > 0.0.16)
:param str|unicode captcha_filename: 如果需要输入验证码,
本参数指定验证码存放的文件名,如果不提供则使用默认文件名。
:return: .. seealso:: :meth:`.login`
"""
print('----- Zhihu OAuth Login -----')
print('使用手机号登录的时候请在手机号前加上 +86')
username = username or input('email/phone: ')
if password is None:
if use_getpass:
with warnings.catch_warnings():
warnings.simplefilter('ignore', getpass.GetPassWarning)
password = getpass.getpass(str('password: '))
else:
password = input('password: ')
try:
success, reason = self.login(username, password)
except NeedCaptchaException:
print('Need for a captcha, getting it......')
captcha_image = self.get_captcha()
captcha_filename = captcha_filename or DEFAULT_CAPTCHA_FILENAME
with open(captcha_filename, 'wb') as f:
f.write(captcha_image)
print('Please open {0} for captcha'.format(
os.path.abspath(captcha_filename)))
captcha = input('captcha: ')
os.remove(os.path.abspath(captcha_filename))
success, reason = self.login(username, password, captcha)
if success:
print('Login success.')
else:
print('Login failed, reason: {}'.format(reason))
return success, reason
def create_token(self, filename, username=None, password=None):
"""
另一个快捷方法,作用为调用 :meth:`.login_in_terminal`
如果成功则将 token 储存文件中。
:param str|unicode filename: token 保存的文件名
:param str|unicode username: 邮箱或手机号
:param str|unicode password: 密码
:return: .. seealso:: :meth:`.login`
"""
success, reason = self.login_in_terminal(username, password)
if success:
self.save_token(filename)
print('Token file created success.')
else:
print('Token file created failed.')
return success, reason
def load_token(self, filename):
"""
通过载入 token 文件来达到登录状态。
.. seealso:: :meth:`.save_token`
:param str|unicode filename: token 文件名。
:return: 无返回值,也就是说其实不知道是否登录成功。
"""
self._token = ZhihuToken.from_file(filename)
self._session.auth = ZhihuOAuth(self._token)
@need_login
def save_token(self, filename):
"""
将通过登录获取到的 token 保存为文件,必须是已登录状态才能调用。
.. seealso:: :meth:`.load_token`
:param str|unicode filename: 将 token 储存为文件。
:return: 无返回值。
"""
self._token.save(filename)
def is_login(self):
"""
:return: 是否已登录。但其实只是检查内部的 token 是否是 None。
:rtype: bool
"""
return self._token is not None
@need_login
def test_api(self, method, url, params=None, data=None):
"""
开发时用的测试某个 API 返回的 JSON 用的便捷接口。
:param str|unicode method: HTTP 方式, GET or POST or OPTION, etc。
:param str|unicode url: API 地址。
:param dict params: GET 参数。
:param dict data: POST 参数。
:return: 访问结果。
:rtype: request.Response
"""
return self._session.request(method, url, params, data)
def set_proxy(self, proxy):
""" 设置 http 和 https 代理或者 sock5代理(requests 已经可以支持 socks 代理)
因为由 :any:`ZhihuClient` 生成的知乎类对象和本对象使用同一
session,所以设置代理后,对所有由当前对象生成的知乎对象均会
使用设置的代理。
.. note:: 如果需要使用 socks 代理,需要安装 pysocks
``sudo pip install pysocks>=1.5.6,!=1.5.7``
:param str|unicode proxy: 形如 'http://user:[email protected]:3128/'
或者 'socks5://user:pass@host:port'。
传入 None 表示清除代理设置。
:return: None
"""
if proxy is None:
self._session.proxies.clear()
else:
self._session.proxies.update({'http': proxy, 'https': proxy})
# ----- get zhihu classes from ids -----
@need_login
def answer(self, aid):
"""
获取答案对象,需要 Client 是登录状态。
:param int aid: 答案 ID。
:举例:
https://www.zhihu.com/question/xxxxxx/answer/1234567
的答案 ID 是 1234567。
:rtype: :any:`Answer`
"""
from .zhcls.answer import Answer
return Answer(aid, None, self._session)
@need_login
def article(self, aid):
"""
获取文章对象,需要 Client 是登录状态。
:param int aid: 文章 ID。
:举例: https://zhuanlan.zhihu.com/p/1234567 的文章 ID 是 1234567。
:rtype: :any:`Article`
"""
from .zhcls.article import Article
return Article(aid, None, self._session)
@need_login
def collection(self, cid):
"""
获取收藏夹对象,需要 Client 是登录状态。
:param int cid: 收藏夹 ID
:举例: https://www.zhihu.com/collection/1234567 的收藏夹 ID 是 1234567。
:rtype: :any:`Collection`
"""
from .zhcls.collection import Collection
return Collection(cid, None, self._session)
@need_login
def column(self, cid):
"""
获取专栏对象,需要 Client 是登录状态。
:param str|unicode cid: 专栏 ID,注意,类型是字符串。
:举例: https://zhuanlan.zhihu.com/abcdefg 的专栏 ID 是 abcdefg。
:rtype: :any:`Column`
"""
from .zhcls.column import Column
return Column(cid, None, self._session)
@need_login
def live(self, lid):
"""
获取收 Live 对象,需要 Client 是登录状态。
:param int lid: Live ID
:举例:
https://www.zhihu.com/lives/778748004768178176
的 Live ID 是 778748004768178176。
:rtype: :any:`Live`
"""
from .zhcls.live import Live
return Live(lid, None, self._session)
@need_login
def me(self):
"""
获取当前登录的用户,需要 Client 是登录状态。
.. note::
:class:`Me` 类继承于 :class:`People`,是一个不同于其他用户的类。
这个类用于提供各种操作,比如点赞,评论,私信等。
:rtype: :any:`Me`
"""
from .zhcls import Me
return Me(self._token.user_id, None, self._session)
@need_login
def people(self, pid):
"""
获取用户对象,需要 Client 是登录状态。
:param str|unicode pid: 用户 ID,注意,类型是字符串。
:举例: https://www.zhihu.com/people/abcdefg 的用户 ID 是 abcdefg。
:rtype: :any:`People`
"""
from .zhcls.people import People
return People(pid, None, self._session)
@need_login
def pin(self, pid):
"""
获取分享(Pin)对象,需要 Client 是登录状态。
:param int pid: Pin ID,。
:举例: https://www.zhihu.com/pin/123123123123 的 Pin ID 是 123123123123。
:rtype: :any:`Pin`
"""
from .zhcls.pin import Pin
return Pin(pid, None, self._session)
@need_login
def question(self, qid):
"""
获取问题对象,需要 Client 是登录状态。
:param int qid: 问题 ID。
:举例: https://www.zhihu.com/question/1234567 的问题 ID 是 1234567。
:rtype: :any:`Question`
"""
from .zhcls.question import Question
return Question(qid, None, self._session)
@need_login
def topic(self, tid):
"""
获取话题对象,需要 Client 是登录状态。
:param int tid: 话题 ID。
:举例: https://www.zhihu.com/tipoc/1234567 的话题 ID 是 1234567。
:rtype: :any:`Topic`
"""
from .zhcls.topic import Topic
return Topic(tid, None, self._session)
@need_login
def from_url(self, url):
"""
通过知乎的 URL 创建对象,需要 client 是登录状态。
对象的 URL 请参见对应方法的描述。如 :any:`Answer` 类的 URL 的描述在
:any:`ZhihuClient.answer` 方法的文档里,其余类似。
.. note:: 提示
本方法也支持省略了开头的 ``https://``,或者结尾有多余的 ``/`` 的 URL。
:param str|unicode url: 知乎对象的网址
:return: 对应的知乎对象
"""
obj_id, obj_type = zhihu_obj_url_parse(url)
if obj_id:
return getattr(self, obj_type)(obj_id)
raise ValueError('Invalid zhihu object url!')
# ----- search -----
def search(self, query, search_type=SearchType.GENERAL):
assert search_type in SearchType
return SearchResultGenerator(
url=SEARCH_API_URL,
session=self._session,
t=search_type_to_t(search_type),
q=query,
)
def search_unfold(self, query):
for results in self.search(query):
if isinstance(results, SearchResultSection):
for r in results:
yield r
else:
yield results
# ----- generator -----
@property
@need_login
def lives(self):
"""
所有 Live,内部只是封装了 :any:`lives_ongoing` 和 :any:`lives_ended`
两个生成器的数据,作为一个快捷方法。
"""
for live in itertools.chain(
shield(self.lives_ongoing),
shield(self.lives_ended)
):
yield live
@property
@need_login
@generator_of(LIVE_ENDED_URL, 'live', format_id=False)
def lives_ended(self):
"""
已经结束的 Live
"""
return None
@property
@need_login
@generator_of(LIVE_ONGOING_URL, 'live', format_id=False)
def lives_ongoing(self):
"""
正在开放的 Live
"""
return None
@property
@need_login
def live_tags(self):
from .zhcls.live import LiveTag
res = self.test_api('GET', LIVE_TAGS_URL)
try:
data = res.json()
assert data['success'] is True
data = StreamingJSON(data['data'])
except (MyJSONDecodeError, ValueError):
raise UnexpectedResponseException(
LIVE_TAGS_URL,
res,
'a json string contains [success] and [data] attr.'
)
for category in data:
for tag in category.data:
yield LiveTag(tag.id, tag.raw_data(), self._session)
# TODO: add token check and refresh | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/client.py | client.py |
from __future__ import unicode_literals
import datetime
import warnings
import requests.packages.urllib3 as urllib3
from .zhcls.activity import Activity, ActType
from .zhcls.generator import BaseGenerator, ActivityGenerator
from .zhcls.streaming import StreamingJSON
from .zhcls.utils import SimpleEnum
from .exception import ZhihuException, ZhihuWarning
__all__ = ['ActivityFormatter', 'SHIELD_ACTION', 'act2str', 'shield', 'ts2str']
SHIELD_ACTION = SimpleEnum(
['EXCEPTION', 'PASS', 'STOP']
)
"""
ActType 是用于表示 shield 抵挡 Exception 达到最大次数后的动作的枚举类,取值如下:
================= ====================
常量名 说明
================= ====================
EXCEPTION 抛出异常
PASS 跳过,获取下一个数据
STOP 结束处理
================= ====================
"""
def shield(inner, durability=3, start_at=0, action=SHIELD_ACTION.EXCEPTION):
"""
shield 函数用于自动处理知乎的各种生成器
(如 :any:`People.followers`, :any:`Question.answers`) 在获取分页数据时出错的情况。
.. warning:: 用户动态的生成器因为获取方式比较特殊,无法被 shield 保护
用法:
比如我们想获取关注了某个专栏的用户分别关注了哪些话题……
.. code-block:: python
column = client.column('zijingnotes')
result = []
for user in shield(column.followers, action=SHIELD_ACTION.PASS):
L = []
print('Start proc user', user.name)
if user.over:
print(user.over_reason)
continue
for topic in shield(user.following_topics):
print('Add topic', topic.name)
L.append(topic.name)
result.append(L)
# output result
:param inner: 需要被保护的生成器
:param int durability: 耐久度,表示获取同一数据最多连续出错几次
:param int start_at: 从第几个数据开始获取
:param action: 当耐久度消耗完后的动作,参见 :any:`SHIELD_ACTION`,默认动作是抛出异常
:return: 新的生成器……
"""
if not isinstance(inner, BaseGenerator):
raise ValueError('First argument must be Zhihu Generator Classes')
if isinstance(inner, ActivityGenerator):
raise ValueError('Activity Generator is the only one can\'t be shield')
offset = start_at
hp = durability
while True:
i = -1
try:
for i, x in enumerate(inner.jump(offset)):
yield x
hp = durability
break
except (ZhihuException, urllib3.exceptions.MaxRetryError) as e:
offset += i + 1
hp -= 1
warnings.warn(
'[{type}: {e}] be shield when get NO.{offset} data'.format(
type=e.__class__.__name__,
e=e,
offset=offset
),
ZhihuWarning
)
if hp == 0:
if action is SHIELD_ACTION.EXCEPTION:
raise e
elif action is SHIELD_ACTION.PASS:
offset += 1
hp = durability
elif action is SHIELD_ACTION.STOP:
break
else:
raise e
def ts2str(ts, fmt=None, offset=8):
"""
将时间戳转换为表示时间的字符串。
:param int ts: 精确到秒的 unix timestamp
:param fmt: 格式化文本,默认值 ``%Y-%m-%d %H:%M:%S``
:param offset: 当前时区偏移,单位小时,默认为 8 小时
:return: 时间戳转换为时间的字符串表示
:rtype: str
"""
offset *= 3600 # 时区偏移
if fmt is None:
fmt = '%Y-%m-%d %H:%M:%S'
return datetime.datetime.utcfromtimestamp(ts + offset).strftime(fmt)
_DEFAULT_ACTIVITY_FORMATTER_MANY_ONE = [
({
ActType.CREATE_ARTICLE, ActType.CREATE_QUESTION,
ActType.FOLLOW_COLLECTION, ActType.FOLLOW_COLUMN,
ActType.FOLLOW_QUESTION, ActType.JOIN_LIVE, ActType.PUBLISH_LIVE,
ActType.VOTEUP_ARTICLE, ActType.VOTEUP_EBOOK
}, '{act.action_text} 「{act.target.title}」'),
({
ActType.FOLLOW_ROUNDTABLE,
ActType.FOLLOW_TOPIC
}, '{act.action_text} 「{act.target.name}」'),
]
_DEFAULT_ACTIVITY_FORMATTER_ONE_ONE = {
ActType.CREATE_ANSWER: '{act.action_text} 「{act.target.question.title}」',
ActType.VOTEUP_ANSWER: '{act.action_text} 「{act.target.question.title}」'
' by 「{act.target.author.name}」',
ActType.CREATE_PIN: '{act.action_text} {act.target.excerpt_title}',
ActType.LIKE_PIN: '{act.action_text} {act.target.excerpt_title}'
' by 「{act.target.author.name}」',
ActType.COLLECT_ANSWER: '{act.action_text} '
'「{act.target[answer].question.title}」'
' by 「{act.target[answer].author.name}」'
' 到收藏夹 「{act.target[collection].title}」',
ActType.COLLECT_ARTICLE: '{act.action_text} 「{act.target[article].title}」'
' 到收藏夹 「{act.target[collection].title}」'
}
class ActivityFormatter(object):
"""
这是将 Activity 转换为字符串的辅助类,一般情况下不需要使用,直接使用辅助函数
:any:`act2str` 即可。
如果你需要自定义格式化模板,请参考下面的用法:
.. code-block:: python
class MyActivityFormatter(ActivityFormatter):
def __init__(self, user_name):
self._user_name = user_name
def like_pin_formatter(self, act):
content_summary = next(act.target.contents).content[:20]
return '{i} 赞了 {act.target.author.name} 的分享: {content}'.format(
i=self._user_name, act=act, content=content_summary,
)
create_pin_formatter = '{act.action_text} 一些东西'
guxizhao = client.people('guxizhao')
formatter = MyActivityFormatter(guxizhao.name)
for act in guxizhao.activities:
print(ts2str(act.created_time), formatter(act))
.. note:: 执行结果
除了发表分享和对分享点赞这两个类型的 Activity 之外,其他类型的格式化结果均和
:any:`act2str` 函数一致。
ActType.LIKE_PIN 类型的会被转换成 ``xxx 赞同了 yyy 的分享:<分享内容的前20字>``
ActType.CREATE_PIN 类型的会被转换成 ``xxx 分享了 一些东西``
简单来说就是你可以继承 :any:`ActivityFormatter` 类,然后定义一些函数或者常量,名称是
ActType 类型的小写形式 + ``_formatter``。
如果它是个函数,需要接受一个 :any:`ActType` 或者 :any:`StreamingJSON` 对象,
返回一个字符串模板。如果直接是个变量那就直接被当成模板使用。
模板里一律用 ``act`` 代表 :any:`Activity` 对象。
"""
@staticmethod
def __check_type(act):
return isinstance(act, Activity) \
or (
isinstance(act, StreamingJSON) and
hasattr(act, 'type') and
act.type in ActType
)
def __call__(self, act):
if not self.__check_type(act):
raise ValueError('Only support Activity objects.')
attr_name = act.type.lower() + '_formatter'
if hasattr(self, attr_name):
fmt = getattr(self, attr_name)
if hasattr(fmt, '__call__'):
fmt = fmt(act)
assert isinstance(fmt, str), \
'Formatter must be a str of a function like ' \
'func(act: Activity) -> str'
elif act.type in _DEFAULT_ACTIVITY_FORMATTER_ONE_ONE:
fmt = _DEFAULT_ACTIVITY_FORMATTER_ONE_ONE[act.type]
else:
found = False
for ks, v in _DEFAULT_ACTIVITY_FORMATTER_MANY_ONE:
if act.type in ks:
found = True
fmt = v
assert found, 'Unknown ActType ' + act.type
# noinspection PyUnboundLocalVariable
return fmt.format(act=act)
act2str = ActivityFormatter()
"""
一个将 ``xxx.activities`` 返回的 :any:`Activity` 或 :any:`StreamingJSON`
对象转换为可读的字符串的辅助函数。例子:
.. code-block:: python
guxizhao = client.people('guxizhao')
for act in guxizhao.activities:
print(ts2str(act.created_time), act2str(act))
.. note:: 结果
2016-12-08 01:18:20 顾惜朝关注了问题
「如何评价美剧《西部世界》(Westworld)第一季第十集(S01E10)?」
2016-12-07 23:18:02 顾惜朝赞同了回答
「如何看待民生银行性骚扰事件?」 by 「meta」
2016-12-07 13:20:32 顾惜朝赞了文章
「冒牌高校教授,正版绝命毒师」
2016-12-07 10:59:43 顾惜朝赞同了回答
「你最喜欢的故事是什么?」 by 「胡不归」
2016-12-07 01:30:51 顾惜朝赞了文章
「专访 | 关于《西部世界》季终集,死过一千次的泰迪这样说」
2016-12-06 20:23:59 顾惜朝赞同了回答
「如何看待知乎上「一知半解」也要强行回答的回答者?」 by 「河森堡」
2016-12-06 01:57:30 顾惜朝关注了收藏夹 「你丫竟然在图片里下毒?!!」
2016-12-05 22:23:25 顾惜朝赞同了回答
「《你的名字。》中彗星碎片撞击地面的影响是否合理?」 by 「Macro kuo」
2016-12-04 00:36:24 顾惜朝赞了文章 「Google Earth: 这才是真·情怀」
如果想自定义描述模板,请参见 :any:`ActivityFormatter` 类。
""" | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/helpers.py | helpers.py |
from __future__ import unicode_literals
try:
from json import JSONDecodeError as MyJSONDecodeError
except ImportError:
MyJSONDecodeError = Exception
"""
在 Py3 下是 json.JSONDecodeError。
在 Py2 下是 BaseException,因为 Py2 的 json 模块没有提供解析异常。
"""
__all__ = [
# warnings
'ZhihuWarning',
'IgnoreErrorDataWarning',
'GetEmptyResponseWhenFetchData',
'CantGetTicketsWarning',
'CantGetTickets',
# exceptions
'ZhihuException',
'BadUseWarning',
'SliceBadUseWarning',
'UnexpectedResponseException',
'GetDataErrorException',
'NeedCaptchaException',
'NeedLoginException',
'IdMustBeIntException',
'UnimplementedException',
'MyJSONDecodeError',
]
class ZhihuException(Exception):
pass
class UnexpectedResponseException(ZhihuException):
def __init__(self, url, res, expect):
"""
服务器回复了和预期格式不符的数据
:param str|unicode url: 当前尝试访问的网址
:param request.Response res: 服务器的回复
:param str|unicode expect: 一个用来说明期望服务器回复的数据格式的字符串
"""
self.url = url
self.res = res
self.expect = expect
def __repr__(self):
return 'Get an unexpected response when visit url ' \
'[{self.url}], we expect [{self.expect}], ' \
'but the response body is [{self.res.text}]'.format(self=self)
__str__ = __repr__
class UnimplementedException(ZhihuException):
def __init__(self, what):
"""
处理当前遇到的情况的代码还未实现,只是开发的时候用于占位
.. note:: 一般用户不用管这个异常
:param str|unicode what: 用来描述当前遇到的情况
"""
self.what = what
def __repr__(self):
return 'Meet a unimplemented condition: {self.what}. ' \
'Please send this error message to developer ' \
'to get help.'.format(self=self)
__str__ = __repr__
class GetDataErrorException(UnexpectedResponseException):
def __init__(self, url, res, expect):
"""
:class:`UnexpectedResponseException` 的子类,
尝试获取服务器给出的错误信息。如果获取失败则显示父类的出错信息。
.. seealso:: :class:`UnexpectedResponseException`
"""
super(GetDataErrorException, self).__init__(url, res, expect)
try:
self.reason = res.json()['error']['message']
except (MyJSONDecodeError, KeyError):
self.reason = None
def __repr__(self):
if self.reason:
return 'A error happened when get data: {0}'.format(self.reason)
else:
base = super(GetDataErrorException, self).__repr__()
return 'Unknown error! ' + base
__str__ = __repr__
class TokenError(ZhihuException):
def __init__(self, msg):
self._msg = msg
def __repr__(self):
return self._msg
class NeedCaptchaException(ZhihuException):
def __init__(self):
"""
登录过程需要验证码
"""
pass
def __repr__(self):
return 'Need a captcha to login, ' \
'please catch this exception and ' \
'use client.get_captcha() to get it.'
__str__ = __repr__
class NeedLoginException(ZhihuException):
def __init__(self, what):
"""
使用某方法需要登录而当前客户端未登录
:param str|unicode what: 当前试图调用的方法名
"""
self.what = what
def __repr__(self):
return 'Need login to use the [{self.what}] method.'.format(self=self)
__str__ = __repr__
class IdMustBeIntException(ZhihuException):
def __init__(self, func):
"""
获取对应的知乎类时,试图传递不是整数型的 ID
:param function func: 当前试图调用的方法名
"""
self.func = func.__name__
def __repr__(self):
return 'You must provide a integer id ' \
'to use function: {self.func}'.format(self=self)
__str__ = __repr__
class ZhihuWarning(UserWarning):
def __init__(self, message, *args, **kwargs):
super(ZhihuWarning, self).__init__(*args)
self._message = message
def __str__(self):
return str(self._message)
__repr__ = __str__
class BadUseWarning(ZhihuWarning):
def __init_(self, message, *arg, **kwargs):
super(BadUseWarning, self).__init__(message, *arg, **kwargs)
class SliceBadUseWarning(BadUseWarning):
def __init__(self, item, *args, **kwargs):
super(SliceBadUseWarning, self).__init__(
'从 {item.start} 开始的 Slice 浪费了很多网络请求,'
'因为会把 0 - {item.start} 的所有内容都获取一遍。'
'推荐使用 generate.jump({item.start})[:{0}] 的用法。'.format(
item.stop - item.start, item=item
),
*args, **kwargs
)
class IgnoreErrorDataWarning(ZhihuWarning):
def __init__(self, message, *args, **kwargs):
super(IgnoreErrorDataWarning, self).__init__(message, *args, **kwargs)
GetEmptyResponseWhenFetchData = IgnoreErrorDataWarning(
'试图获取下一项时,服务器返回了空数据。'
'如果您是在获取某用户的粉丝,那么您可能遇到了知乎 5020 限制。'
'虽然不知道为什么,但是好像知乎限制 API 只能访问前 5020 个粉丝,'
'我也很为难,但是这是知乎做的限制,突破不了呀。'
'目前在遇到这个问题时只能当作获取完处理了。'
)
class CantGetTicketsWarning(ZhihuWarning):
def __init__(self, message, *args, **kwargs):
super(CantGetTicketsWarning, self).__init__(message, *args, **kwargs)
CantGetTickets = CantGetTicketsWarning(
'只能获取未参与过(也即 live.role == \'visitor\')的 Live 的票价信息,'
'对于其他情况此接口不会返回任何东西。这不会造成 for in 循环报错,'
'但如果你不想看到这个警告,请……请在调用 tickets 属性前自行判断一下,'
'或者偷懒强行过滤掉这个 warning :)'
)
class UnimplementedWarning(ZhihuWarning):
def __init__(self, e, *args, **kwargs):
super(UnimplementedWarning, self).__init__(str(e), *args, **kwargs) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/exception.py | exception.py |
# from __future__ import unicode_literals
try:
# python2
from urllib import urlencode
except ImportError:
# python3
# noinspection PyUnresolvedReferences,PyCompatibility
from urllib.parse import urlencode
ZHIHU_API_ROOT = 'https://api.zhihu.com'
"""知乎 API 的根目录"""
# ------- Zhihu OAuth Keys -------
CLIENT_ID = '8d5227e0aaaa4797a763ac64e0c3b8'
"""
默认的 CLIENT ID。
如果 :class:`.ZhihuClient` 构造时没有提供 CLIENT ID,则使用这个值。
"""
APP_SECRET = 'ecbefbf6b17e47ecb9035107866380'
"""
默认的 SECRET。
如果 :class:`.ZhihuClient` 构造时没有提供 SECRET,则使用这个值。
"""
# ------- Zhihu Client Info -------
API_VERSION = '3.0.54'
"""
模拟 Android 官方客户端使用的参数,表示使用的 API 版本。
如果 :class:`.ImZhihuAndroidClient` 构造时没有提供 api_version,则使用这个值。
"""
APP_VERSION = '4.18.0'
"""
模拟 Android 官方客户端使用的参数,表示使用的 APP 版本。
如果 :class:`.ImZhihuAndroidClient` 构造时没有提供 app_version,则使用这个值。
"""
APP_BUILD = 'release'
"""
模拟 Android 官方客户端使用的参数,表示使用的 APP 的 Build 类型。
如果 :class:`.ImZhihuAndroidClient` 构造时没有提供 app_build,则使用这个值。
"""
UUID = 'AHBCVBVCDAtLBfZCo1SYbPj8SgivYjqcGCs='
"""
新加的一个东西,暂时不知道是啥的 ID
"""
DEFAULT_UA = 'Futureve/4.18.0 Mozilla/5.0 (Linux; Android 6.0; ' \
'Google Nexus 5 - 6.0.0 - API 23 - 1080x1920 Build/MRA58K; wv) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 ' \
'Chrome/44.0.2403.119 Mobile Safari/537.36 ' \
'Google-HTTP-Java-Client/1.22.0 (gzip)'
"""
新版本的 API 开始检查 UA了。
"""
APP_ZA = urlencode({
'OS': 'Android',
'Release': '6.0',
'Model': 'Google Nexus 5 - 6.0.0 - API 23 - 1080x1920',
'VersionName': APP_VERSION,
'VersionCode': '477',
'Width': '1080',
'Height': '1920',
'Installer': 'Google Play',
})
"""
模拟 Android 官方客户端使用的参数,表示使用的 APP 的 杂项数据。
如果 :class:`.ImZhihuAndroidClient` 构造时没有提供 app_za,则使用这个值。
.. note::
它是一个 url encode 后的 dict
参见 :meth:`.ImZhihuAndroidClient.__init__`
"""
# ------- Zhihu API URL for Login -------
CAPTCHA_URL = ZHIHU_API_ROOT + '/captcha'
"""
验证码相关
:GET: 是否需要验证码
:PUT: 获取验证码
:POST: 提交验证码
"""
# sign_in - POST - 用户登录
LOGIN_URL = ZHIHU_API_ROOT + '/sign_in'
"""
OAuth 登录地址
"""
LOGIN_DATA = {
'grant_type': 'password',
'source': 'com.zhihu.android',
'client_id': '',
'signature': '',
'timestamp': '',
'username': '',
'password': '',
}
"""
登录数据格式。需要填充的只有用户名和密码。
`client_id` 会由 :class:`.ZhihuClient` 填写。
`timestamp` 和 `signature` 会由 :class:`.ZhihuClient` 内部调用的
:func:`.login_signature` 自动填写。
""" | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/oauth/setting.py | setting.py |
from __future__ import unicode_literals
import json
import pickle
import time
from ..exception import MyJSONDecodeError
__all__ = ['ZhihuToken']
class ZhihuToken:
def __init__(self, user_id, uid, access_token, expires_in, token_type,
refresh_token, cookie, lock_in=None, unlock_ticket=None):
"""
知乎令牌。
尽量不要直接使用这个类,而是用 :meth:`ZhihuToken.from_str` 或
:meth:`ZhihuToken.form_dict` 或
:meth:`ZhihuToken.from_file` 方法来构造。
.. note::
本类仅在 :class:`.ZhihuClient` 类内使用,一般用户不需要了解。
:param str|unicode user_id: 用户 ID
:param int uid: 某个数字型用户 ID,貌似没啥用
:param str|unicode access_token: 最重要的访问令牌
:param int expires_in: 过期时间
:param str|unicode token_type: 令牌类型
:param str|unicode refresh_token: 刷新令牌
:param str|unicode cookie: 登录成功后需要加上这段 Cookies
:param int lock_in: 不知道用处
:param str|unicode unlock_ticket: 不知道用处
"""
self._create_at = time.time()
self._user_id = uid
self._uid = user_id
self._access_token = access_token
self._expires_in = expires_in
self._expires_at = self._create_at + self._expires_in
self._token_type = token_type
self._refresh_token = refresh_token
self._cookie = cookie
# 以下两个属性暂时不知道用处
self._lock_in = lock_in
self._unlock_ticket = unlock_ticket
@staticmethod
def from_str(json_str):
"""
从字符串读取 token。
:param str|unicode json_str: 一个合法的代表知乎 Token 的 JSON 字符串
:rtype: :class:`ZhihuToken`
:raise ValueError: 提供的参数不合法时
"""
try:
return ZhihuToken.from_dict(json.loads(json_str))
except (MyJSONDecodeError, ValueError):
raise ValueError(
'{json_str} is NOT a valid zhihu token json string.'.format(
json_str=json_str
))
@staticmethod
def from_dict(json_dict):
"""
从字典读取 token。
:param dict json_dict: 一个代表知乎 Token 的字典
:rtype: :class:`ZhihuToken`
:raise ValueError: 提供的参数不合法时
"""
try:
return ZhihuToken(**json_dict)
except TypeError:
raise ValueError(
'{json_dict} is NOT a valid zhihu token json.'.format(
json_dict=json_dict
))
@staticmethod
def from_file(filename):
"""
从文件读取 token。
:param str|unicode filename: 文件名
:rtype: :class:`ZhihuToken`
"""
with open(filename, 'rb') as f:
return pickle.load(f)
def save(self, filename):
"""
将 token 保存成文件。
:param str|unicode filename: 文件名
:return: 无返回值
"""
with open(filename, 'wb') as f:
pickle.dump(self, f)
@property
def user_id(self):
"""
:return: 获取用户 ID
:rtype: str
"""
return self._user_id
@property
def type(self):
"""
:return: 获取验证类型
:rtype: str
"""
return self._token_type
@property
def token(self):
"""
:return: 获取访问令牌
:rtype: str
"""
return self._access_token | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/oauth/token.py | token.py |
from __future__ import unicode_literals
from .generator import generator_of
from .normal import normal_attr
from .people import People
from .urls import (
ANSWER_CANCEL_THANKS_URL,
ANSWER_CANCEL_UNHELPFUL_URL,
ANSWER_COLLECT_URL,
ANSWER_DETAIL_URL,
ANSWER_THANKS_URL,
ANSWER_UNHELPFUL_URL,
ANSWER_VOTERS_URL,
ARTICLE_COLLECT_URL,
ARTICLE_DETAIL_URL,
ARTICLE_VOTE_URL,
BLOCK_PEOPLE_URL,
CANCEL_BLOCK_PEOPLE_URL,
COLLECTION_CANCEL_FOLLOW_URL,
COLLECTION_DETAIL_URL,
COLLECTION_FOLLOWERS_URL,
COLUMN_CANCEL_FOLLOW_URL,
COLUMN_FOLLOWERS_URL,
COMMENT_CANCEL_VOTE_URL,
COMMENT_DETAIL_URL,
COMMENT_VOTE_URL,
FEEDS_URL,
LIVE_LIKE_URL,
PEOPLE_CANCEL_FOLLOWERS_URL,
PEOPLE_FOLLOWERS_URL,
PEOPLE_FOLLOWING_COLLECTIONS_URL,
PIN_DETAIL_URL,
PIN_VOTERS_URL,
QUESTION_CANCEL_FOLLOWERS_URL,
QUESTION_FOLLOWERS_URL,
SELF_DETAIL_URL,
SEND_COMMENT_URL,
SEND_MESSAGE_URL,
TOPIC_CANCEL_FOLLOW_URL,
TOPIC_FOLLOWERS_URL,
WHISPERS_URL,
)
from .utils import get_result_or_error
__all__ = ['Me']
class Me(People):
def __init__(self, pid, cache, session):
"""
是 :any:`People` 的子类,表示当前登录的用户。
除了提供用户的基本信息外,还提供各种用户操作
(点赞,评论,收藏,私信、删除等)。
.. inheritance-diagram:: Me
.. seealso:: :class:`People`
"""
super(Me, self).__init__(pid, cache, session)
def _build_url(self):
return SELF_DETAIL_URL
# ---------- simple info ---------
@property
@normal_attr()
def created_at(self):
return None
@property
@normal_attr()
def draft_count(self):
return None
@property
@normal_attr()
def email(self):
return None
@property
@normal_attr()
def friendly_score(self):
return None
@property
@normal_attr()
def has_daily_recommend_permission(self):
return None
@property
@normal_attr()
def is_active(self):
return None
@property
@normal_attr()
def is_baned(self):
return None
@property
@normal_attr()
def is_force_renamed(self):
return None
@property
@normal_attr()
def is_locked(self):
return None
@property
@normal_attr()
def is_moments_user(self):
"""
不知道是啥。
"""
return None
@property
@normal_attr()
def phone_no(self):
return None
@property
@normal_attr()
def uid(self):
"""
没什么用的东西。
"""
return None
# ----- generators -----
@property
@generator_of(FEEDS_URL, format_id=False)
def feeds(self):
"""
用户信息流,参见 :any:`Feed`
"""
return None
@property
@generator_of(PEOPLE_FOLLOWING_COLLECTIONS_URL, 'collection')
def following_collections(self):
"""
.. warning:: 注意
这一方法是 :any:`Me` 类独有的,其父类 :any:`People` 类没有此方法。
根本原因是知乎并不允许获取除自己(登录用户)以外用户关注的收藏夹,
至于为什么,我哪知道呀 QAQ
"""
return None
@property
@generator_of(WHISPERS_URL)
def whispers(self):
"""
私信列表
"""
return None
# ----- operations -----
def vote(self, what, op='up'):
"""
投票操作。也就是赞同,反对,或者清除(取消赞同和反对)。
操作对象可以是答案,文章,分享和评论。
:param what: 要点赞的对象,可以是 :any:`Answer` 或 :any:`Article`
或 :any:`Comment` 或 :any:`Pin` 对象。
:param str|unicode op: 对于答案可取值 'up', 'down', 'clear',
分别表示赞同、反对和清除。
对于文章,评论和分享,只能取 'up' 和 'clear'。默认值是 'up'。
:return: 表示结果的二元组,第一项表示是否成功,第二项表示原因。
:rtype: (bool, str)
:raise: :any:`UnexpectedResponseException`
当服务器回复和预期不符,不知道是否成功时。
"""
from . import Answer, Article, Comment, Pin
if isinstance(what, Answer):
if op not in {'up', 'down', 'clear'}:
raise ValueError(
'Operate must be up, down or clear for Answer.')
return self._common_vote(ANSWER_VOTERS_URL, what, op)
elif isinstance(what, Article):
if op not in {'up', 'clear'}:
raise ValueError('Operate must be up or clear for Article')
return self._common_vote(ARTICLE_VOTE_URL, what, op)
elif isinstance(what, Comment):
if op not in {'up', 'clear'}:
raise ValueError('Operate must be up or clear for Comment')
return self._common_click(
what, op == 'clear', COMMENT_VOTE_URL,
COMMENT_CANCEL_VOTE_URL
)
elif isinstance(what, Pin):
if op not in {'up', 'clear'}:
raise ValueError('Operate must be up or clear for Comment')
return self._common_click(
what, op == 'clear', PIN_VOTERS_URL,
PIN_VOTERS_URL
)
else:
raise TypeError(
'Unable to voteup a {0}.'.format(what.__class__.__name__))
def thanks(self, answer, thanks=True):
"""
感谢或者取消感谢答案。
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
:param Answer answer: 要感谢的答案
:param bool thanks: 如果是想取消感谢,请设置为 False
"""
from .answer import Answer
if not isinstance(answer, Answer):
raise TypeError('This method only accept Answer object.')
return self._common_click(answer, not thanks,
ANSWER_THANKS_URL, ANSWER_CANCEL_THANKS_URL)
def unhelpful(self, answer, unhelpful=True):
"""
给答案点没有帮助,或者取消没有帮助。
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
:param Answer answer: 要操作的答案
:param bool unhelpful: 如果是想撤销没有帮助,请设置为 False
"""
from .answer import Answer
if not isinstance(answer, Answer):
raise TypeError('This method only accept Answer object.')
return self._common_click(answer, not unhelpful,
ANSWER_UNHELPFUL_URL,
ANSWER_CANCEL_UNHELPFUL_URL)
def follow(self, what, follow=True):
"""
关注或者取消关注问题/话题/用户/专栏/收藏夹/Live。
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
:param what: 操作对象
:param bool follow: 要取消关注的话把这个设置成 False
"""
from . import Question, Topic, People, Column, Collection, Live
if isinstance(what, Question):
return self._common_click(what, not follow,
QUESTION_FOLLOWERS_URL,
QUESTION_CANCEL_FOLLOWERS_URL)
elif isinstance(what, Topic):
return self._common_click(what, not follow, TOPIC_FOLLOWERS_URL,
TOPIC_CANCEL_FOLLOW_URL)
elif isinstance(what, People):
what._get_data()
return self._common_click(what, not follow, PEOPLE_FOLLOWERS_URL,
PEOPLE_CANCEL_FOLLOWERS_URL)
elif isinstance(what, Column):
return self._common_click(what, not follow, COLUMN_FOLLOWERS_URL,
COLUMN_CANCEL_FOLLOW_URL)
elif isinstance(what, Collection):
return self._common_click(what, not follow,
COLLECTION_FOLLOWERS_URL,
COLLECTION_CANCEL_FOLLOW_URL)
elif isinstance(what, Live):
return self._common_click(what, not follow,
LIVE_LIKE_URL, LIVE_LIKE_URL)
else:
raise TypeError(
'Unable to follow a {0}.'.format(what.__class__.__name__))
def block(self, what, block=True):
"""
屏蔽用户
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
:param People what: 操作对象,用户
:param bool block: 如果要取消屏蔽请设置为 False
"""
from . import People
if isinstance(what, People):
return self._common_block(what, not block, BLOCK_PEOPLE_URL,
CANCEL_BLOCK_PEOPLE_URL)
else:
raise TypeError(
'Unable to block a {0}.'.format(what.__class__.__name__))
def collect(self, what, collection, collect=True):
"""
收藏答案/文章进收藏夹。
.. warning::
就算你提供的是别人的收藏夹也会返回成功……但是操作其实是无效的
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
:param Answer|Article what: 要收藏的答案
:param Collection collection: 要加入哪个收藏夹
:param bool collect: 如果想要取消收藏请设置为 False
"""
from . import Answer, Article, Collection
if isinstance(what, Answer):
url = ANSWER_COLLECT_URL
elif isinstance(what, Article):
url = ARTICLE_COLLECT_URL
else:
raise TypeError('Unable to add a {0} to collection.'.format(
what.__class__.__name__))
if not isinstance(collection, Collection):
raise TypeError('Unable add answer to a {0}.'.format(
collection.__class__.__name__))
if collect:
data = {'add_collections': collection.id}
else:
data = {'remove_collections': collection.id}
url = url.format(what.id)
res = self._session.put(url, data=data)
return get_result_or_error(url, res)
def message(self, who, content):
"""
发送私信。
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
:param People who: 接收者
:param str|unicode content: 私信内容
"""
from . import People
if not isinstance(who, People):
raise TypeError(
'Unable to send message to {0}'.format(who.__class__.__name__))
_ = who.name
data = {
'receiver_id': who.id,
'content': content,
}
res = self._session.post(SEND_MESSAGE_URL, data=data)
return get_result_or_error(SEND_MESSAGE_URL, res)
def comment(self, what, content, parent=None):
"""
向答案,文章,问题,收藏夹,Pin 发送评论
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
.. warning:: 奇怪
让我很诧异的是,就算「想要回复的评论」不属于「想要评论的主体」,
知乎的 API 也会返回执行成功。而且经过测试,这条回复真的有效,
会出现在评论主体的评论列表里。暂时不知道被评论用户的会不会收到消息。
另外,莫名其妙的还可以回复自己的评论……
:param what: 向哪里发送评论,可以是 :any:`Answer`, :any:`Article`
:any:`Question`, :any:`Collection`, :any:`Pin`
:param str|unicode content: 评论内容
:param Comment parent: 想要回复的评论,默认值为 None,则为正常的添加评论
"""
from . import Answer, Article, Question, Collection, Comment, Pin
data = {'content': content}
if parent is not None:
if not isinstance(parent, Comment):
raise TypeError(
'parent comment must be Comment object, {0} given.'.format(
parent.__class__.__name__))
data.update({'comment_id': parent.id})
if isinstance(what, (Answer, Article, Collection, Question, Pin)):
data.update({'type': what.__class__.__name__.lower(),
'resource_id': what.id})
else:
raise TypeError('Can\'t add comment to a {0}.'.format(
what.__class__.__name__))
res = self._session.post(SEND_COMMENT_URL, data=data)
return get_result_or_error(SEND_COMMENT_URL, res)
def delete(self, what):
"""
删除……一些东西,目前可以删除答案,评论,收藏夹,文章,Pin。
.. seealso::
返回值和可能的异常同 :any:`vote` 方法
.. warning::
请注意,本方法没有经过完整的测试,加上删除操作不可撤销,
所以使用时请谨慎。
:param what: 要删除的对象,可以是 :any:`Answer`, :any:`Comment`,
:any:`Collection`, :any:`Article`, :any:`Pin`
"""
from . import Answer, Comment, Collection, Article, Pin
if isinstance(what, Answer):
url = ANSWER_DETAIL_URL.format(what.id)
elif isinstance(what, Comment):
url = COMMENT_DETAIL_URL.format(what.id)
elif isinstance(what, Collection):
url = COLLECTION_DETAIL_URL.format(what.id)
elif isinstance(what, Article):
url = ARTICLE_DETAIL_URL.format(what.id)
elif isinstance(what, Pin):
url = PIN_DETAIL_URL.format(what.id)
else:
raise TypeError(
'Can\'t delete a {0}.'.format(what.__class__.__name__))
res = self._session.delete(url)
return get_result_or_error(url, res)
def _common_click(self, what, cancel, click_url, cancel_url):
if cancel:
method = 'DELETE'
url = cancel_url.format(what.id, self.id)
else:
method = 'POST'
url = click_url.format(what.id)
res = self._session.request(method, url)
return get_result_or_error(url, res)
def _common_vote(self, url, what, op):
data = {
'voteup_count': 0,
'voting': {'up': 1, 'down': -1, 'clear': 0}[op],
}
url = url.format(what.id)
res = self._session.post(url, data=data)
return get_result_or_error(url, res)
def _common_block(self, what, cancel, block_url, cancel_url):
_ = what.name
if cancel:
method = 'DELETE'
data = None
url = cancel_url.format(what.id)
else:
method = 'POST'
data = {'people_id': what.id}
url = block_url
res = self._session.request(method, url, data=data)
return get_result_or_error(url, res) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/me.py | me.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .other import other_obj
from .normal import normal_attr
from .urls import (
COLLECTION_DETAIL_URL,
COLLECTION_CONTENTS_URL,
COLLECTION_COMMENTS_URL,
COLLECTION_FOLLOWERS_URL,
)
from .utils import int_id
class Collection(Base):
@int_id
def __init__(self, cid, cache, session):
super(Collection, self).__init__(cid, cache, session)
def _build_url(self):
return COLLECTION_DETAIL_URL.format(self.id)
# ---- simple info -----
@property
@normal_attr()
def answer_count(self):
return None
@property
@normal_attr()
def created_time(self):
return None
@property
@other_obj('people')
def creator(self):
return None
@property
@normal_attr()
def comment_count(self):
return None
@property
@normal_attr()
def description(self):
return None
@property
@normal_attr()
def follower_count(self):
return None
@property
@normal_attr()
def is_public(self):
return None
@property
@normal_attr()
def title(self):
return None
@property
@normal_attr()
def updated_time(self):
return None
# ----- generators -----
@property
def answers(self):
"""
获取收藏夹里的所有答案。
.. warning:: 无法被 shield
因为内部是调用 :any:`Collection.contents` 的,
所以此生成器无法被 :any:`shield` 保护。
但是内部其实是用 shield 保护过 contents 的获取的,
如果这个生成器异常了那还是处理下吧。
.. seealso:: :any:`Collection.articles`, :any:`Collection.contents`
"""
from .answer import Answer
from ..helpers import shield
contents = self.contents
if contents is None:
return
# noinspection PyTypeChecker
for x in shield(contents):
if isinstance(x, Answer):
yield x
@property
def articles(self):
"""
获取收藏夹里的所有文章。
.. warning:: 无法被 shield
因为内部是调用 :any:`Collection.contents` 的,
所以此生成器无法被 :any:`shield` 保护。
但是内部其实是用 shield 保护过 contents 的获取的,
如果这个生成器异常了那还是处理下吧。
.. seealso:: :any:`Collection.answers`, :any:`Collection.contents`
"""
from .article import Article
from ..helpers import shield
contents = self.contents
if contents is None:
return
# noinspection PyTypeChecker
for x in shield(contents):
if isinstance(x, Article):
yield x
@property
@generator_of(COLLECTION_COMMENTS_URL)
def comments(self):
return None
@property
@generator_of(COLLECTION_CONTENTS_URL, 'CollectionContent')
def contents(self):
"""
新版知乎专栏支持收藏文章了,这个生成器生成的对象可能是 :any:`Answer` 也可能是
:any:`Article`,使用时要用 ``isinstance`` 判断类型后再获取对应对象的属性。
.. code-block:: python
from zhihu_oauth import ZhihuClient, Answer, Article
collection = client.collection(37770691)
for content in collection.contents:
if isinstance(content, Answer):
answer = content
print(answer.question.title)
elif isinstance(content, Article):
article = content
print(article.title)
如果你只需要答案或者只需要文章类型的数据,可以使用 :any:`Collection.answers`
或者 :any:`Collection.articles` 进行获取。
不过需要注意的是,这两个属性内部其实会调用 :any:`Collection.contents`,
然后只返回相应类型的对象。所以其实也是遍历了所有内容的,
效率与使用本函数然后自己判断类型一样。
.. seealso:: :any:`Collection.answers`, :any:`Collection.articles`
"""
return None
@property
@generator_of(COLLECTION_FOLLOWERS_URL, 'people')
def followers(self):
"""
.. warning:: 注意!
知乎的这个 API 有问题,返回一些之后会将 is_end 设置为 True,
导致无法获取到所有的关注者。
并且此问题在知乎官方 Android APP 上也存在。你可以试着
找个很多人关注的收藏夹,然后查看关注者,一直往下拉。
大概加载 100 - 200(不固定,有时候一个都出不来)
之后就没法往下刷了。
起码在我这个地区是这样的。欢迎各路少侠反馈。
"""
# TODO: collection.followers 这个 API 不稳定
return None | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/collection.py | collection.py |
from __future__ import unicode_literals
from .normal import normal_attr
from .streaming import StreamingJSON
from .utils import build_zhihu_obj_from_dict, get_class_from_name, SimpleEnum
from ..exception import UnimplementedException
__all__ = ['Activity', 'ActType']
_VERB_TO_ACT_TYPE_MAP = {
'LIVE_JOIN': 'JOIN_LIVE',
'ANSWER_CREATE': 'CREATE_ANSWER',
'QUESTION_CREATE': 'CREATE_QUESTION',
'MEMBER_COLLECT_ANSWER': 'COLLECT_ANSWER',
'MEMBER_COLLECT_ARTICLE': 'COLLECT_ARTICLE',
'MEMBER_CREATE_ARTICLE': 'CREATE_ARTICLE',
'MEMBER_CREATE_PIN': 'CREATE_PIN',
'MEMBER_FOLLOW_COLLECTION': 'FOLLOW_COLLECTION',
'MEMBER_FOLLOW_COLUMN': 'FOLLOW_COLUMN',
'QUESTION_FOLLOW': 'FOLLOW_QUESTION',
'MEMBER_FOLLOW_ROUNDTABLE': 'FOLLOW_ROUNDTABLE',
'MEMBER_FOLLOW_TOPIC': 'FOLLOW_TOPIC',
'MEMBER_LIKE_PIN': 'LIKE_PIN',
'ANSWER_VOTE_UP': 'VOTEUP_ANSWER',
'MEMBER_VOTEUP_ARTICLE': 'VOTEUP_ARTICLE',
'EBOOK_VOTE_UP': 'VOTEUP_EBOOK',
'LIVE_PUBLISH': 'PUBLISH_LIVE',
'TOPIC_FOLLOW': 'FOLLOW_TOPIC',
}
# TODO: Check if two FOLLOW_TOPIC necessary
ActType = SimpleEnum(_VERB_TO_ACT_TYPE_MAP.values())
"""
ActType 是用于表示用户动态类型的枚举类,可供使用的常量有:
================= ================ ======================
常量名 说明 `target` 属性类型
================= ================ ======================
COLLECT_ANSWER 收藏答案 比较特殊,见下文
COLLECT_ARTICLE 收藏文章 比较特殊,见下文
CREATE_ANSWER 回答问题 :any:`Answer`
CREATE_ARTICLE 发表文章 :any:`Article`
CREATE_PIN 发表分享 :any:`Pin`
CREATE_QUESTION 提出问题 :any:`Question`
FOLLOW_COLLECTION 关注收藏夹 :any:`Collection`
FOLLOW_COLUMN 关注专栏 :any:`Column`
FOLLOW_QUESTION 关注问题 :any:`Question`
FOLLOW_ROUNDTABLE 关注圆桌 :any:`StreamingJSON`
FOLLOW_TOPIC 关注话题 :any:`Topic`
LIKE_PIN 赞了分享 :any:`Pin`
JOIN_LIVE 参加 Live :any:`Live`
PUBLISH_LIVE 举办 Live :any:`Live`
VOTEUP_ANSWER 赞同回答 :any:`Answer`
VOTEUP_ARTICLE 赞同文章 :any:`Article`
VOTEUP_EBOOK 赞了电子书 :any:`StreamingJSON`
================= ================ ======================
收藏答案和收藏文章的 Target 属性是一个 dict,结构如下:
.. code-block:: javascript
{
'answer/article': <Answer object> or <Article object>,
'collection': <Collection object>,
}
``answer/article`` 表示被收藏的答案/文章,``collection`` 表示被收藏进的收藏夹,因为只有这两个动作有两个
操作对象,所以特殊处理了一下。
"""
def _verb_to_act_type(verb):
type_str = _VERB_TO_ACT_TYPE_MAP.get(verb, None)
if type_str is None:
raise UnimplementedException(
'Unknown activity verb: {0}'.format(verb)
)
return getattr(ActType, _VERB_TO_ACT_TYPE_MAP[verb])
class Activity(object):
def __new__(cls, data, session):
if data['verb'] == 'MEMBER_FOLLOW_ROUNDTABLE':
data['type'] = ActType.FOLLOW_ROUNDTABLE
return StreamingJSON(data)
elif data['verb'] == 'EBOOK_VOTE_UP':
data['type'] = ActType.VOTEUP_EBOOK
return StreamingJSON(data)
else:
return super(Activity, cls).__new__(cls)
def __init__(self, data, session):
"""
表示用户的一条动态。
:any:`type <Activity.type>` 属性标识了动态的类型,其取值及意义请参见
:any:`ActType`。
:any:`target <Activity.target>` 属性表示这次动态操作的目标,根据
`type` 的不同,这个属性的类型也不同。但是基本都是
`type` 的最后一个单词表示的类型,请看下面的例子。
.. note:: 举例
.. code-block:: python
from zhihu_oauth import ZhihuClient, ActType # 记得要导入 ActType
client = ZhihuClient()
# Client 登录过程省略
me = client.me()
for act in me.activities:
if act.type == ActType.CREATE_ANSWER:
print(act.target.question.title)
上面这段代码只处理类型是创建答案的动态,此时 `act.target` 就是
:any:`Answer` 类型,和 `CREATE_ANSWER` 的 `ANSWER` 对应。
"""
self._data = data
self._type = _verb_to_act_type(data['verb'])
self._session = session
self._get_target()
@property
@normal_attr()
def action_text(self):
return None
@property
@normal_attr()
def created_time(self):
"""
用户动态的时间戳。
"""
return None
@property
def type(self):
"""
动态的类型。
.. seealso:: :any:`ActType`
"""
return self._type
@property
def target(self):
"""
动态的操作目标。
.. seealso:: :any:`Activity.__init__`, :any:`ActType`
"""
return self._target
def _get_target(self):
pos = self._type.rfind('_')
if pos == -1:
raise UnimplementedException('Unable to get class from type name')
filename = self._type[pos + 1:].lower()
class_name = filename.capitalize()
obj_cls = get_class_from_name(class_name, filename)
self._target = build_zhihu_obj_from_dict(
self._data['target'], self._session, cls=obj_cls
)
# 对收藏答案类型的特殊处理
if self._type in {ActType.COLLECT_ANSWER, ActType.COLLECT_ARTICLE}:
from .collection import Collection
obj = self._target
collection = build_zhihu_obj_from_dict(
self._data['target']['collection'], self._session,
cls=Collection,
)
self._target = {
'collection': collection,
class_name.lower(): obj,
} | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/activity.py | activity.py |
from copy import deepcopy
from ..exception import UnimplementedException
from .utils import ConstValue, build_zhihu_obj_from_dict, SimpleEnum
__all__ = ['SearchResult', 'SearchResultSection', 'SearchType']
_search_type_t_map = {
'GENERAL': 'general',
'PEOPLE': 'people',
'TOPIC': 'topic',
'COLUMN': 'column',
'LIVE': 'live',
# 'EBOOK': 'publication',
# EBOOK 是电子书搜索,目前电子书类还没写,暂时不开放。
}
SearchType = SimpleEnum(_search_type_t_map.keys())
"""
================= ================ ======================
常量名 说明 备注
================= ================ ======================
GENERAL 综合搜索 * 见下备注
PEOPLE 搜索用户
TOPIC 搜索话题
COLUMN 搜索专栏
LIVE 搜索 Live
EBOOK 搜索电子书 现在电子书类还没写,所以此常量暂时不能使用
================= ================ ======================
* 综合搜索是最复杂的一个,但是只有它能搜索到问题和答案。
一般情况下综合搜索会返回几个除了问题和答案类型之外的 :any:`SearchResultSection`,
然后返回 Answer,Article,Question 型的 :any:`SearchResult`,具体处理方法看后面文档吧。
"""
def search_type_to_t(search_type):
return _search_type_t_map[search_type]
class SearchResult(object):
_TYPE_KEY = ConstValue('type')
_RESULT_INDICATOR = ConstValue('search_result')
_RESULT_INDICATOR_TYPO = ConstValue('searach_result')
_RESULT_HIGHLIGHT_KEY = ConstValue('highlight')
_RESULT_OBJ_KEY = ConstValue('object')
_HIGHLIGHT_TITLE_KEY = ConstValue('title')
_HIGHLIGHT_DESC_KEY = ConstValue('description')
def __init__(self, data, session):
if data[self._TYPE_KEY] != self._RESULT_INDICATOR and \
data[self._TYPE_KEY] != self._RESULT_INDICATOR_TYPO:
raise ValueError("Must be a {} type dict, {} provided".format(
self._RESULT_INDICATOR, data
))
self._data = data
self._session = session
self._highlight = self._data.get(self._RESULT_HIGHLIGHT_KEY, {})
@property
def highlight_title(self):
"""
标题,其中搜索关键词被高亮,是 HTML 格式的字符串,特殊字符被 escape 了,高亮的部分在 <em> 标签之间。
print 出来的话不是很好读……
"""
return self._highlight[self._HIGHLIGHT_TITLE_KEY] \
if self._HIGHLIGHT_TITLE_KEY in self._highlight \
else ''
@property
def highlight_desc(self):
"""
description,搜索结果的内容。
同 :any:`highlight_title`。
"""
return self._highlight[self._HIGHLIGHT_DESC_KEY] \
if self._HIGHLIGHT_DESC_KEY in self._highlight \
else ''
@property
def obj(self):
"""
搜索结果对应的知乎类对象,可能是各种类型,使用前需要自行判断,
"""
obj = self._data[self._RESULT_OBJ_KEY]
# 因为搜索结果里的用户属性,比如标题,名字等,都会被 <em> 标签标记成高亮,所以
# 这里不使用他们当作 cache
return build_zhihu_obj_from_dict(obj, self._session, use_cache=None)
def raw_data(self):
"""
返回搜索结果的原始数据的拷贝,是个 dict。
"""
return deepcopy(self._data)
class SearchResultSection(object):
_TYPE_KEY = ConstValue('type')
_SECTION_INDICATOR = ConstValue('search_section')
_SECTION_DATA_LIST_KEY = ConstValue('data_list')
_SECTION_TYPE_KEY = ConstValue('section_type')
_SECTION_HAS_MORE_KEY = ConstValue('has_more')
def __init__(self, data, session):
"""
:any:`SearchResultSection` 对象是可迭代的,``for xxx in results``
一般会生成 :any:`SearchResult` 型数据。
"""
if data[self._TYPE_KEY] != self._SECTION_INDICATOR:
raise ValueError("Must be a {} type dict, {} provided".format(
self._SECTION_INDICATOR, data
))
self._data = data
self._session = session
self._index = 0
self._len = len(self._data[self._SECTION_DATA_LIST_KEY])
@property
def type(self):
"""
表示这一 Section 里的 :any:`SearchResult` 的知乎类对象是什么类型。
:rtype: str|unicode
"""
return self._data[self._SECTION_TYPE_KEY]
@property
def has_more(self):
"""
如果用 type 类型进行搜索,能否得到更多结果。
比如: ::
self.type == 'people' and self.has_more == True
那么表示: ::
client.search('something', SearchType.PEOPLE)
能获取到此 Section 的更多结果。
:rtype: bool
"""
return self._data[self._SECTION_HAS_MORE_KEY]
def raw_data(self):
"""
同 :any:`SearchResult.raw_data`
"""
return deepcopy(self._data)
def __iter__(self):
self._index = 0
return self
def __len__(self):
return self._len
def __next__(self):
try:
obj = self[self._index]
except IndexError:
self._index = 0
raise StopIteration
self._index += 1
return obj
next = __next__
def __getitem__(self, item):
if not isinstance(item, int):
raise TypeError('Need an int as index, not {0}'.format(type(item)))
if item >= self._len:
raise IndexError()
data = self._data[self._SECTION_DATA_LIST_KEY][item]
return data_to_section_or_result(data, self._session)
def data_to_section_or_result(data, session):
try:
return SearchResult(data, session)
except ValueError:
pass
try:
return SearchResultSection(data, session)
except ValueError:
raise UnimplementedException(
"Unknown search result dict [{}]".format(data)) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/search.py | search.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .normal import normal_attr
from .urls import (
COMMENT_CONVERSION_URL,
COMMENT_REPLIES_URL,
)
from .utils import build_zhihu_obj_from_dict, int_id
__all__ = ['Comment']
class Comment(Base):
@int_id
def __init__(self, cid, cache, session):
super(Comment, self).__init__(cid, cache, session)
def _get_data(self):
self._data = None
def _build_url(self):
return ''
# ----- simple info -----
@property
@normal_attr()
def allow_delete(self):
return None
@property
@normal_attr()
def allow_like(self):
return None
@property
@normal_attr()
def allow_reply(self):
return None
@property
@normal_attr()
def ancestor(self):
"""
不知道是啥,貌似永远都是 False。
"""
return None
@property
def author(self):
from .people import People
if self._cache and 'author' in self._cache:
cache = self._cache['author']
else:
self._get_data()
if self._data and 'author' in self._data:
cache = self._data['author']
else:
cache = None
if cache:
if 'member' in cache:
cache = cache['member']
return People(cache['id'], cache, self._session)
else:
return None
@property
@normal_attr()
def content(self):
return None
@property
@normal_attr()
def created_time(self):
return None
@property
@normal_attr()
def is_author(self):
"""
当前登录的用户是否是评论作者,也即表示是否是自己发送的评论。
"""
return None
@property
@normal_attr()
def is_delete(self):
"""
是否被删除?话说被删除了还能获取到?我没测试……
"""
return None
@property
@normal_attr()
def is_parent_author(self):
"""
也没搞懂这个属性,貌似永远和 :meth:`is_author` 保持一致。
"""
return None
@property
def reply_to(self):
"""
获取这条评论的父评论的作者,如果并没有回复谁则返回 None
:rtype: People
"""
from .people import People
if self._cache and 'reply_to_author' in self._cache:
cache = self._cache['reply_to_author']
else:
self._get_data()
if self._data and 'reply_to_author' in self._data:
cache = self._data['reply_to_author']
else:
cache = None
if cache:
if 'member' in cache:
cache = cache['member']
return build_zhihu_obj_from_dict(cache, self._session, cls=People)
else:
return None
@property
@normal_attr()
def resource_type(self):
"""
是对什么东西的评论。
======== ==========
值(str) 说明
======== ==========
answer 答案
article 文章
question 问题
favlist 收藏夹
pin 分享
======== ==========
"""
return None
@property
@normal_attr()
def vote_count(self):
return None
@property
@normal_attr()
def voting(self):
"""
是否对这条评论点了赞。
"""
return None
# ----- generators -----
@property
@generator_of(COMMENT_REPLIES_URL, 'comment')
def replies(self):
"""
应该是用于实现「对话列表」的。
:return: 回复本条评论的所有评论的列表(生成器)。
:rtype: collections.Iterable[Comment]
"""
return None
@property
@generator_of(COMMENT_CONVERSION_URL, 'comment')
def conversation(self):
"""
应该是用于实现「查看对话」的。
有的评论有这个属性,有个没有,我也没搞清楚规律。
:return: 包含此条评论的对话,体现为评论列表(生成器)
:rtype: collections.Iterable[Comment]
"""
return None | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/comment.py | comment.py |
from __future__ import unicode_literals
import re
# ------- Zhihu API URLs --------
ZHIHU_API_ROOT = 'https://api.zhihu.com'
# ----- 用户相关 -----
# self - GET - 获取自身资料
SELF_DETAIL_URL = ZHIHU_API_ROOT + '/people/self'
# people - GET - 详情
PEOPLE_DETAIL_URL = ZHIHU_API_ROOT + '/people/{}'
# people.answers - GET - 回答
PEOPLE_ANSWERS_URL = PEOPLE_DETAIL_URL + '/answers'
# people.articles - GET - 文章
PEOPLE_ARTICLES_URL = PEOPLE_DETAIL_URL + '/articles'
# people.collections - GET - 收藏夹
PEOPLE_COLLECTIONS_URL = PEOPLE_DETAIL_URL + '/collections_v2'
# people.columns - GET - 专栏
PEOPLE_COLUMNS_URL = PEOPLE_DETAIL_URL + '/columns'
# people.followers - GET - 粉丝
# me.follow - POST - 关注用户
PEOPLE_FOLLOWERS_URL = PEOPLE_DETAIL_URL + '/followers'
# me.follow - DELETE - 取消关注用户
PEOPLE_CANCEL_FOLLOWERS_URL = PEOPLE_FOLLOWERS_URL + '/{}'
# me.following_collections - GET - 关注的收藏夹
PEOPLE_FOLLOWING_COLLECTIONS_URL = PEOPLE_DETAIL_URL + '/following_collections'
# people.following_columns - GET - 关注的专栏
PEOPLE_FOLLOWING_COLUMNS_URL = PEOPLE_DETAIL_URL + '/following_columns'
# people.following_questions - GET - 关注的问题
PEOPLE_FOLLOWING_QUESTIONS_URL = PEOPLE_DETAIL_URL + '/following_questions'
# people.following_topics - GET - 关注的话题
PEOPLE_FOLLOWING_TOPICS_URL = PEOPLE_DETAIL_URL + '/following_topics'
# people.followings - GET - 关注的人
PEOPLE_FOLLOWINGS_URL = PEOPLE_DETAIL_URL + '/followees'
# people.questions - GET - 用户提的问题
PEOPLE_QUESTIONS_URL = PEOPLE_DETAIL_URL + '/questions'
# people.activities - GET - 用户最近动态
PEOPLE_ACTIVITIES_URL = PEOPLE_DETAIL_URL + '/activities'
# people.lives - GET - 用户 Live(包括参与的和组织的)
PEOPLE_LIVES_URL = PEOPLE_DETAIL_URL + '/lives'
# people.liked_lives - GET - 用户感兴趣的 Live
PEOPLE_LIKED_LIVES_URL = ZHIHU_API_ROOT + '/lives/people/{}/like_lives'
# people.pins - GET - 用户的分享
PEOPLE_PINS_URL = PEOPLE_DETAIL_URL + '/pins'
# ----- 答案相关 -----
# answer - GET - 详情
# me.delete - DELETE - 删除答案
ANSWER_DETAIL_URL = ZHIHU_API_ROOT + '/answers/{}'
# answer.collections - GET - 所在收藏夹
ANSWER_COLLECTIONS_URL = ANSWER_DETAIL_URL + '/collections'
# me.collect - PUT - 加入收藏夹
ANSWER_COLLECT_URL = ANSWER_DETAIL_URL + '/collections_v2'
# answer.comment - GET - 评论
ANSWER_COMMENTS_URL = ANSWER_DETAIL_URL + '/comments'
# answer.voters - GET - 点赞用户
# me.vote - POST - 给答案投票
ANSWER_VOTERS_URL = ANSWER_DETAIL_URL + '/voters'
# me.thanks - POST - 给答案点感谢
ANSWER_THANKS_URL = ANSWER_DETAIL_URL + '/thankers'
# me.thanks - DELETE - 取消感谢
ANSWER_CANCEL_THANKS_URL = ANSWER_THANKS_URL + '/{}'
# me.unhelpful - POST - 没有帮助
ANSWER_UNHELPFUL_URL = ANSWER_DETAIL_URL + '/nothelpers'
# me.unhelpful - DELETE - 取消没有帮助
ANSWER_CANCEL_UNHELPFUL_URL = ANSWER_UNHELPFUL_URL + '/{}'
# ----- 问题相关 -----
# question - GET - 详情
QUESTION_DETAIL_URL = ZHIHU_API_ROOT + '/questions/{}'
# question.answers - GET - 回答
QUESTION_ANSWERS_URL = QUESTION_DETAIL_URL + '/answers'
# question.comments - GET - 评论
QUESTION_COMMENTS_URL = QUESTION_DETAIL_URL + '/comments'
# question.answers - GET - 关注者
# me.follow - POST - 关注问题
QUESTION_FOLLOWERS_URL = QUESTION_DETAIL_URL + '/followers'
# me.follower - DELETE - 取消关注
QUESTION_CANCEL_FOLLOWERS_URL = QUESTION_FOLLOWERS_URL + '/{}'
# question.topics - GET - 所属话题
QUESTION_TOPICS_URL = QUESTION_DETAIL_URL + '/topics'
# ----- 话题相关 -----
# topic - GET - 详情
TOPIC_DETAIL_URL = ZHIHU_API_ROOT + '/topics/{}'
# topic.activities - GET - 动态
TOPIC_ACTIVITIES_URL = TOPIC_DETAIL_URL + '/activities_new'
# topic.best_answers - GET - 精华回答
TOPIC_BEST_ANSWERS_URL = TOPIC_DETAIL_URL + '/best_answers'
# topic.best_answerers - GET - 最佳回答者
TOPIC_BEST_ANSWERERS_URL = TOPIC_DETAIL_URL + '/best_answerers'
# topic.children - GET - 子话题
TOPIC_CHILDREN_URL = TOPIC_DETAIL_URL + '/children'
# topic.children - GET - 父话题
TOPIC_PARENTS_URL = TOPIC_DETAIL_URL + '/parent'
# topic.unanswered_questions - GET - 未回答的问题
TOPIC_UNANSWERED_QUESTION = TOPIC_DETAIL_URL + '/unanswered_questions'
# topic.index - GET - 话题索引
TOPIC_INDEX_URL = TOPIC_DETAIL_URL + '/topic_index'
# topic.followers - GET - 关注者
# me.follow - POST - 关注话题
TOPIC_FOLLOWERS_URL = TOPIC_DETAIL_URL + '/followers'
# me.follow - DELETE - 取消关注
TOPIC_CANCEL_FOLLOW_URL = TOPIC_FOLLOWERS_URL + '/{}'
# ----- 收藏夹相关 -----
# collection - GET - 详情
# me.delete - DELETE - 删除收藏夹
COLLECTION_DETAIL_URL = ZHIHU_API_ROOT + '/collections/{}'
# collection.contents - GET - 所有收藏的内容(包括答案和文章)
COLLECTION_CONTENTS_URL = COLLECTION_DETAIL_URL + '/contents'
# collection.comments - GET - 评论
COLLECTION_COMMENTS_URL = COLLECTION_DETAIL_URL + '/comments'
# collection.followers - GET - 粉丝
# me.follow - POST - 关注专栏
COLLECTION_FOLLOWERS_URL = COLLECTION_DETAIL_URL + '/followers'
# me.follow - DELETE - 取消关注
COLLECTION_CANCEL_FOLLOW_URL = COLLECTION_FOLLOWERS_URL + '/{}'
# ----- 专栏相关 -----
# column - GET - 详情
COLUMN_DETAIL_URL = ZHIHU_API_ROOT + '/columns/{}'
# column.articles - GET - 文章
COLUMN_ARTICLES_URL = COLUMN_DETAIL_URL + '/articles'
# column.followers - GET - 关注者
# me.follow - POST - 关注专栏
COLUMN_FOLLOWERS_URL = COLUMN_DETAIL_URL + '/followers'
# me.follow - DELETE - 取消关注
COLUMN_CANCEL_FOLLOW_URL = COLUMN_FOLLOWERS_URL + '/{}'
# ----- 文章相关 -----
# article - GET - 详情
# me.delete - DELETE - 删除文章
ARTICLE_DETAIL_URL = ZHIHU_API_ROOT + '/articles/{}'
# article.vote - GET: - 获取点赞用户(无效)
# me.vote - POST - 点赞
ARTICLE_VOTE_URL = ARTICLE_DETAIL_URL + '/voters'
# article.comments - GET - 评论
ARTICLE_COMMENTS_URL = ARTICLE_DETAIL_URL + '/comments'
# me.collect - PUT - 收藏
ARTICLE_COLLECT_URL = ARTICLE_DETAIL_URL + '/collections'
# ----- 评论相关 -----
# me.comment - POST - 发表评论
SEND_COMMENT_URL = ZHIHU_API_ROOT + '/comments'
# me.delete - DELETE - 删除评论
COMMENT_DETAIL_URL = ZHIHU_API_ROOT + '/comments/{}'
# comment.replies - GET - 评论的回复
COMMENT_REPLIES_URL = COMMENT_DETAIL_URL + '/replies'
# comment.conversation - GET - 评论的对话
COMMENT_CONVERSION_URL = COMMENT_DETAIL_URL + '/conversation'
# me.vote - POST - 给评论点赞
COMMENT_VOTE_URL = COMMENT_DETAIL_URL + '/voters'
# me.vote - DELETE - 取消点赞
COMMENT_CANCEL_VOTE_URL = COMMENT_VOTE_URL + '/{}'
# ----- Pin 分享相关 -----
# pin - GET - 详情
PIN_DETAIL_URL = ZHIHU_API_ROOT + '/pins/{}'
# pin.comments - GET - 评论
PIN_COMMENTS_URL = PIN_DETAIL_URL + '/comments'
# pin.voters - GET - 点赞者
PIN_VOTERS_URL = PIN_DETAIL_URL + '/likers'
# ----- Live 相关 -----
# live - GET - 详情
LIVE_DETAIL_URL = ZHIHU_API_ROOT + '/lives/{}'
# live.participants - GET - Live 参与者
# 后两个 API 只会给出是好友的参与者和不是好友的参与者,目前均未使用,因为含义不是很清楚
LIVE_MEMBERS_URL = LIVE_DETAIL_URL + '/members'
LIVE_MEMBERS_FRIENDS_URL = LIVE_DETAIL_URL + '/members/friends'
LIVE_MEMBERS_NON_FRIENDS_URL = LIVE_DETAIL_URL + '/members/nonfriends'
# live.related - GET - 相关 Live
LIVE_RELATED_URL = LIVE_DETAIL_URL + '/related'
# client.lives_ongoing - GET - 所有正在开放的 Live
# LiveTag.lives_ongoing - GET - 所有正在开放的 Live (需要附加 query 参数 tags = <tagid>)
LIVE_ONGOING_URL = ZHIHU_API_ROOT + '/lives/ongoing'
# client.lives_ended - GET - 所有已结束的 Live
# LiveTag.lives_ended - GET - 所有已结束的 Live (需要附加 query 参数 tags = <tagid>)
LIVE_ENDED_URL = ZHIHU_API_ROOT + '/lives/ended'
# client.lives_ended - GET - 所有已结束的 Live
# LiveTag.lives_ended - GET - 所有已结束的 Live (需要附加 query 参数 tags = <tagid>)
LIVE_TAGS_URL = ZHIHU_API_ROOT + '/lives/tags'
# live.tickets - GET - Live 票价
LIVE_TICKETS_URL = LIVE_DETAIL_URL + '/apply'
LIVE_TICKETS_QUIET_URL = LIVE_TICKETS_URL + '/quiet' # 座位已满时的票价接口
LIVE_TICKETS_ENDED_URL = LIVE_TICKETS_URL + '/ended' # Live 已结束的票价接口
# me.follow - POST - 感兴趣 Live
LIVE_LIKE_URL = LIVE_DETAIL_URL + '/like'
# ----- 通知相关(暂时未使用) -----
NOTIFICATION_CONTENTS_URL = ZHIHU_API_ROOT + '/notifications/contents'
NOTIFICATION_LIKES_URL = ZHIHU_API_ROOT + '/notifications/likes'
NOTIFICATION_FOLLOWS_URL = ZHIHU_API_ROOT + '/notifications/follows'
# ----- 私信相关 -----
# me.whispers - GET - 获取用户私信对话列表
WHISPERS_URL = ZHIHU_API_ROOT + '/inbox'
# whisper.messages - GET - 获取用户某一对话的消息列表
MESSAGES_URL = ZHIHU_API_ROOT + '/messages'
# me.message - POST - 发送私信
SEND_MESSAGE_URL = ZHIHU_API_ROOT + '/messages'
# ----- 搜索 -----
SEARCH_API_URL = ZHIHU_API_ROOT + '/search_v3'
# ----- 其他操作 -----
# me.block - POST - 屏蔽用户
BLOCK_PEOPLE_URL = ZHIHU_API_ROOT + '/settings/blocked_users'
# me.block - DELETE - 取消屏蔽用户
CANCEL_BLOCK_PEOPLE_URL = BLOCK_PEOPLE_URL + '/{}'
# me.feeds - GET - 用户首页 Feed
FEEDS_URL = ZHIHU_API_ROOT + '/topstory'
##############################
# ----- Zhihu Web URLs ----- #
##############################
ZHIHU_WEB_ROOT = 'https://www.zhihu.com'
re_answer_url = re.compile(
r'^(?:https?://)?www.zhihu.com/question/\d+/answer/(\d+)/?$')
"""
答案 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_article_url = re.compile(r'^(?:https?://)?zhuanlan.zhihu.com/p/(\d+)/?$')
"""
文章 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_collection_url = re.compile(
r'^(?:https?://)?www.zhihu.com/collection/(\d+)/?$')
"""
收藏夹 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_column_url = re.compile(r'^(?:https?://)?zhuanlan.zhihu.com/([^/ ]+)/?$')
"""
专栏 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_live_url = re.compile(r'^(?:https?://)?www.zhihu.com/lives/(\d+)/?$')
"""
Live URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_people_url = re.compile(r'^(?:https?://)?www.zhihu.com/people/([^/ ]+)/?$')
"""
用户 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_pin_url = re.compile(r'^(?:https?://)?www.zhihu.com/pin/(\d+)/?$')
"""
Pin URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_question_url = re.compile(r'^(?:https?://)?www.zhihu.com/question/(\d+)/?$')
"""
问题 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
re_topic_url = re.compile(r'^(?:https?://)?www.zhihu.com/topic/(\d+)/?$')
"""
问题 URL 的正则,用于 :any:`ZhihuClient.from_url` 方法。
"""
RE_TYPE_MAP = {
# RE type
re_answer_url: 'answer',
re_article_url: 'article',
re_collection_url: 'collection',
re_column_url: 'column',
re_live_url: 'live',
re_pin_url: 'pin',
re_people_url: 'people',
re_question_url: 'question',
re_topic_url: 'topic',
}
"""
用于 :any:`zhihu_obj_url_parse` 方法,键是正则,值为类型名。
""" | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/urls.py | urls.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .normal import normal_attr
from .other import other_obj
from .urls import (
TOPIC_ACTIVITIES_URL,
TOPIC_BEST_ANSWERERS_URL,
TOPIC_BEST_ANSWERS_URL,
TOPIC_CHILDREN_URL,
TOPIC_DETAIL_URL,
TOPIC_FOLLOWERS_URL,
TOPIC_INDEX_URL,
TOPIC_PARENTS_URL,
TOPIC_UNANSWERED_QUESTION,
)
from .utils import build_zhihu_obj_from_dict, int_id
__all__ = ['Topic', 'TopicIndex', "TopicIndexSection"]
class Topic(Base):
@int_id
def __init__(self, tid, cache, session):
super(Topic, self).__init__(tid, cache, session)
def _build_url(self):
return TOPIC_DETAIL_URL.format(self.id)
# ---- simple info -----
@property
@normal_attr()
def avatar_url(self):
return None
@property
@normal_attr('best_answers_count')
def best_answer_count(self):
return None
@property
def best_answers_count(self):
return self.best_answer_count
@property
@normal_attr()
def excerpt(self):
return None
@property
def father_count(self):
return self.parent_count
@property
@normal_attr('followers_count')
def follower_count(self):
return None
@property
def followers_count(self):
return self.follower_count
@property
@other_obj("TopicIndex", module_filename='topic')
def index(self):
"""
话题索引
:rtype: :any:`TopicIndex`
"""
return {'id': self.id}
@property
@normal_attr()
def introduction(self):
return None
@property
@normal_attr()
def name(self):
return None
@property
@normal_attr('father_count')
def parent_count(self):
return None
@property
@normal_attr('questions_count')
def question_count(self):
return None
@property
def questions_count(self):
return self.question_count
@property
@normal_attr()
def unanswered_count(self):
return None
# ----- generators -----
@property
@generator_of(TOPIC_ACTIVITIES_URL, 'TopicActivity')
def activities(self):
"""
:any:`Question` 和 :any:`Answer` 的混合迭代器,使用时注意判断。
.. code-block:: Python
for act in topic.activities:
if isinstance(act, Answer):
# pass
else:
assert(isinstance(act, Question))
# pass
"""
return None
@property
@generator_of(TOPIC_BEST_ANSWERS_URL, 'answer')
def best_answers(self):
"""
精华回答
"""
return None
@property
@generator_of(TOPIC_BEST_ANSWERERS_URL, 'people')
def best_answerers(self):
"""
好像叫,最佳回答者吧……
best_answerers……知乎真会起名字……
"""
return None
@property
@generator_of(TOPIC_CHILDREN_URL, 'topic')
def children(self):
"""
子话题
"""
return None
@property
@generator_of(TOPIC_FOLLOWERS_URL, 'people')
def followers(self):
return None
@property
@generator_of(TOPIC_PARENTS_URL, 'topic')
def parents(self):
"""
父话题
"""
return None
@property
@generator_of(TOPIC_UNANSWERED_QUESTION, 'question')
def unanswered_questions(self):
"""
其实基本上就等于「所有问题」,知乎客户端上的所有问题选项卡就是用的这个接口。
"""
return None
class TopicIndex(Base):
_SECTIONS_KEY = 'topic_index_modules'
_EDITORS_KEY = 'topic_index_editors'
@int_id
def __init__(self, tiid, cache, session):
super(TopicIndex, self).__init__(tiid, cache, session)
self._get_data()
def _build_url(self):
return TOPIC_INDEX_URL.format(self.id)
# ---------- simple info ---------
@property
def id(self):
"""
没什么用,获取到的其实是对应 Topic 的 ID
"""
return self._id
# ---------- generators ---------
@property
def sections(self):
"""
话题索引分为各个部分,这个属性是各个部分的迭代器。
用法示例:
.. code-block:: Python
for section in topic.index.sections:
print(section.title)
# Other operator of section
:rtype: :any:`TopicIndexSection` 的迭代器
"""
for data in self._data[self._SECTIONS_KEY]:
yield TopicIndexSection(data, self._session)
@property
def editors(self):
"""
.. code-block:: Python
for people in topic.index.editors:
print(people.name)
# Other operator of people
:rtype: 索引编辑者(:any:`People` 对象)的迭代器。
"""
for data in self._data[self._EDITORS_KEY]:
yield build_zhihu_obj_from_dict(data, self._session)
class TopicIndexSection(object):
"""
自身是一个 :any:`Question` 对象的迭代器,附带了 title 属性和另一个相关话题的迭代器。
.. code-block:: Python
for section in topic.index.sections:
print(section.title, ':')
for question in section:
print(question.title)
for topic in section.related_topics:
print(topic.name)
"""
_TYPE_KEY = 'type'
_SECTION_INDICATOR = 'topic_index_module'
_SECTION_DATA_LIST_KEY = 'items'
_RELATED_TOPICS_KEY = 'relatedtopics'
_TITLE_KEY = 'title'
def __init__(self, data, session):
if data[self._TYPE_KEY] != self._SECTION_INDICATOR:
raise ValueError("Must be a {} type dict, {} provided".format(
self._SECTION_INDICATOR, data
))
self._data = data
self._session = session
self._index = 0
self._len = len(self._data[self._SECTION_DATA_LIST_KEY])
# ---------- simple info ---------
@property
def title(self):
return self._data[self._TITLE_KEY]
# ---------- generator ----------
@property
def related_topics(self):
"""
:rtype: :any:`Topic` 对象的迭代器
"""
for data in self._data[self._RELATED_TOPICS_KEY]:
yield build_zhihu_obj_from_dict(data, self._session)
# ---------- self as a generator ---------
def __iter__(self):
self._index = 0
return self
def __len__(self):
return self._len
def __next__(self):
try:
obj = self[self._index]
except IndexError:
self._index = 0
raise StopIteration
self._index += 1
return obj
next = __next__
def __getitem__(self, item):
if not isinstance(item, int):
raise TypeError('Need an int as index, not {0}'.format(type(item)))
if item >= self._len:
raise IndexError()
data = self._data[self._SECTION_DATA_LIST_KEY][item]
return build_zhihu_obj_from_dict(data, self._session) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/topic.py | topic.py |
from __future__ import unicode_literals
import abc
from .normal import normal_attr
from .utils import INT_ID_KEY
from ..exception import MyJSONDecodeError, GetDataErrorException
__all__ = ['Base']
class Base(object):
def __init__(self, zhihu_obj_id, cache, session):
"""
.. note:: Cache 与 Data
:any:`Base` 类的 ``cache`` 参数表示已知的属性值。一般由另一个对象的
JSON 数据中的一个属性充当。
比如 :any:`Answer.author` 方法,由于在请求 :any:`Answer` 的数据时,
原始 JSON 数据中就有关于作者的一些简单信息。比如 name,id,headline。
在使用此方法时就会将这些不完整的数据传递到 ``answer`` 对象 (类型为
:any:`People`)的 ``cache`` 中。这样一来,在执行
``answer.author.name`` 时,取出名字的操作可以省去一次网络请求。
:any:`normal_attr`,:any:`other_obj` 和 :any:`streaming` 装饰器都会
优先使用 ``cache`` 中的数据,当获取失败时才会调用
:any:`_get_data` 方法请求数据。
:param zhihu_obj_id: 构建知乎对象所用的 ID
:param dict cache: 缓存数据,就是已知的这个对象的属性集
:param session: 网络请求 Session
"""
self._id = zhihu_obj_id
self._cache = cache
self._session = session
self._data = None
self._refresh_times = 0
@property
@normal_attr()
def id(self):
return getattr(self, '_id', None)
def _get_data(self):
"""
调用知乎 API 接口获取数据的主要方法。
url 从 :any:`_build_url` 中获取。
method 从 :any:`_method` 中获取。
params 从 :any:`_build_params` 中获取。
data 从 :any:`_build_data` 中获取。
:raise: 当返回的数据无法被解析成 JSON
或 JSON 中含有 'message' 字段时,会抛出 :any:`GetDataErrorException`
"""
if self._data is None:
url = self._build_url()
res = self._session.request(
self._method(),
url=url,
params=self._build_params(),
data=self._build_data(),
)
e = GetDataErrorException(
url,
res,
'a valid Zhihu {0} JSON data'.format(self.__class__.__name__),
)
try:
json_dict = res.json()
if 'error' in json_dict:
raise e
id_field = getattr(self, 'ID_FIELD_NAME', 'id')
if hasattr(self, INT_ID_KEY) and id_field in json_dict:
json_dict.update({id_field: int(json_dict[id_field])})
self._data = json_dict
except MyJSONDecodeError:
raise e
@abc.abstractmethod
def _build_url(self):
"""
子类 **必须** 重载这一函数,提供获取数据的 API URL。
一般格式为 ZHIHU_XXX_URL.format(self.id)
"""
return ''
# noinspection PyMethodMayBeStatic
def _build_params(self):
"""
子类可以重载这一函数,提供请求 API 时要传递的参数。默认值为 None。
"""
return None
# noinspection PyMethodMayBeStatic
def _build_data(self):
"""
子类可以重载这一函数,提供请求 API 时要传递的数据。默认值为 None。
"""
return None
# noinspection PyMethodMayBeStatic
def _method(self):
"""
子类可以重载这一函数,提供 HTTP 请求的类型,默认值为 GET。
"""
return 'GET'
def refresh(self):
"""
删除自身的 cache 和 data,下一次获取属性会重新向知乎发送请求,获取最新数据。
"""
self._data = self._cache = None
self._refresh_times += 1
@property
def pure_data(self):
"""
调试用。返回现在对象内的 JSON 数据。
如果对象没有 cache 也没有 data,会自动发送数据请求 data。
"""
if not self._cache:
self._get_data()
return {
'cache': self._cache,
'data': self._data,
} | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/base.py | base.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .normal import normal_attr
from .streaming import streaming
from .urls import (
PEOPLE_ACTIVITIES_URL,
PEOPLE_ANSWERS_URL,
PEOPLE_ARTICLES_URL,
PEOPLE_COLLECTIONS_URL,
PEOPLE_COLUMNS_URL,
PEOPLE_DETAIL_URL,
PEOPLE_FOLLOWERS_URL,
PEOPLE_FOLLOWING_COLUMNS_URL,
PEOPLE_FOLLOWING_QUESTIONS_URL,
PEOPLE_FOLLOWING_TOPICS_URL,
PEOPLE_FOLLOWINGS_URL,
PEOPLE_LIKED_LIVES_URL,
PEOPLE_LIVES_URL,
PEOPLE_PINS_URL,
PEOPLE_QUESTIONS_URL,
)
from .utils import ConstValue, build_zhihu_obj_from_dict
from ..exception import GetDataErrorException
__all__ = ['ANONYMOUS', 'Badge', 'People']
class _Anonymous(object):
def __init__(self):
self.id = 0
self.name = '匿名用户'
def __getattr__(self, _):
# 匿名用户除了姓名和 ID 以外所有属性均为 None
return None
ANONYMOUS = _Anonymous()
"""
.. role:: py_code(code)
:language: python
统一的匿名用户对象,可以使用 :py_code:`if people is ANONYMOUS:` 判断是否是匿名用户
"""
class People(Base):
def __new__(cls, pid, cache, session):
if pid == '0':
return ANONYMOUS
else:
return super(People, cls).__new__(cls)
def __init__(self, pid, cache, session):
self._over_e = None
super(People, self).__init__(pid, cache, session)
def _build_url(self):
return PEOPLE_DETAIL_URL.format(self.id)
@property
def over(self):
"""
尝试获取用户信息,如果出错返回 True,没出错返回 False。
一般来说出错的情况只有「被知乎反屏蔽系统限制」……所以这函数起名叫 over =。=
调用结果如果是 True,则可使用 :any:`over_reason` 函数获取原因。
.. note:: 例子
.. code-block:: python
for follower in me.followers:
if follower.over:
print(follower.over_reason)
continue
print(follower.name)
# ... process follower data
.. note:: 也可以不用此函数,用 ``try...catch`` 来处理也行
.. code-block:: python
from zhihu_oauth import ZhihuClient, GetDataErrorException
for follower in me.followers:
try:
# get and process user data
except GetDataErrorException as e:
print('Get data error', e.reason)
:return: 是否被限制
:rtype: bool
"""
if self._over_e is not None:
return True
try:
self._get_data()
return False
except GetDataErrorException as e:
self._over_e = e
return True
@property
def over_reason(self):
"""
获取无法得到用户信息的原因。
.. warning::
此方法只能在 :any:`over` 方法调用结果为 True 之后才能调用。
:rtype: str
"""
if self._over_e is None:
return None
return self._over_e.reason if \
self._over_e.reason else \
str(self._over_e)
# ---------- simple info ---------
@property
@normal_attr()
def answer_count(self):
return None
@property
@normal_attr('articles_count')
def article_count(self):
return None
@property
def articles_count(self):
return self.article_count
@property
@normal_attr()
def avatar_url(self):
return None
@property
@streaming('badge', use_cache=False)
def _badge_data(self):
return []
@property
@streaming('org_detail', use_cache=False)
def _org_data(self):
return {}
@property
def badge(self):
"""
用户的徽章信息,获取到的是 :any:`Badge` 对象,使用方法请看 :any:`Badge` 类的文档。
目前包括「个人认证」,「话题优秀回答者」,「已认证机构」三种,
我也不知道有没有更多的呀,如果发现这个功能有 Bug 或者还有更多类型的徽章,
请到 Github 里提 Issue 或者直接联系我。
:rtype: Badge
"""
return Badge(self._badge_data, self._org_data, self._session)
@property
@streaming('badge')
def _cached_badge_data(self):
return []
@property
@streaming('org_detail')
def _cached_org_data(self):
return {}
@property
def cached_badge(self):
return Badge(
self._cached_badge_data,
self._cached_org_data,
self._session
)
@property
@streaming()
def business(self):
"""
用户所在行业。
常见返回值:
.. code-block:: python
{
'introduction': '',
'id': '19619368',
'url': 'https://api.zhihu.com/topics/19619368',
'type': 'topic',
'avatar_url': 'http://pic1.zhimg.com/e82bab09c_s.jpg',
'name': '计算机软件',
'excerpt': '',
}
使用属性时必须先判断是否有效,如
.. code-block:: python
if 'people.business:
data = people.business.name
"""
return {}
@property
@normal_attr('favorited_count')
def collected_count(self):
"""
被收藏次数。
"""
return None
@property
@normal_attr('favorite_count')
def collection_count(self):
"""
收藏夹数量。
"""
return None
@property
@normal_attr('columns_count')
def column_count(self):
return None
@property
def columns_count(self):
return self.column_count
@property
@normal_attr()
def description(self):
return None
@property
@streaming()
def educations(self):
"""
教育信息。
常见返回值:
.. code-block:: python
[
{
'major': {
'introduction': '计算机专业。<br>大众认为会是唯一会“修电脑”的专业。',
'id': '19639658',
'url': 'https://api.zhihu.com/topics/19639658',
'type': 'topic',
'avatar_url': 'http://pic2.zhimg.com/7e2fe4615_s.jpg',
'name': '计算机科学与技术',
'excerpt': '计算机专业。大众认为会是唯一会“修电脑”的专业。',
},
'school': {
'introduction': '',
'id': '1234567',
'url': 'https://api.zhihu.com/topics/1234567',
'type': 'topic',
'avatar_url': 'http://pic4.zhimg.com/8e6y3xd47_s.jpg',
'name': 'XX 大学',
'excerpt': '',
},
},
]
使用属性时必须先判断存不存在,如:
.. code-block:: python
for education in people.educations:
if 'school' in education:
data += education.school.name
if 'major' in education:
data += education.major.name'
"""
return []
@property
@streaming()
def employments(self):
"""
职业信息。
常见返回值:
.. code-block:: python
[
{
'job': {
'introduction': '',
'url': 'https://api.zhihu.com/topics/19551336',
'avatar_url': 'http://pic3.zhimg.com/4eac47b76_s.jpg',
'excerpt': '',
'type': 'topic',
'name': '测试',
'id': '19551336',
},
'company': {
'excerpt': '',
'url': '',
'avatar_url': 'http://pic1.zhimg.com/e82bab09c_s.jpg',
'introduction': '',
'type': 'topic',
'name': 'Gayhub',
'experience ': '',
'id': '',
},
},
],
使用属性时必须先判断存不存在,如:
.. code-block:: python
for employment in people.employments:
if 'company' in employment:
data += employment.company.name
if 'job' in employment:
data += employment.job.name'
"""
return []
@property
def favorite_count(self):
return self.collection_count
@property
def favorited_count(self):
return self.collected_count
@property
@normal_attr()
def follower_count(self):
return None
@property
@normal_attr('following_columns_count')
def following_column_count(self):
return None
@property
@normal_attr()
def following_count(self):
return None
@property
@normal_attr()
def following_question_count(self):
return None
@property
@normal_attr()
def following_topic_count(self):
return None
@property
@normal_attr()
def gender(self):
"""
性别。
======= ==========
值(int) 说明
======= ==========
0 女
1 男
-1 未填
======= ==========
.. warning:: 关于未填性别
目前版本的知乎 API 貌似未填性别的用户这个值也会变成 1
已经向知乎反馈了 Bug,但是不知道什么时候才会修。
"""
return None
@property
@normal_attr()
def headline(self):
"""
就是那个显示在名字后面的,和签名类似的东西。
"""
return None
@property
@normal_attr()
def hosted_live_count(self):
return None
@property
@normal_attr()
def independent_articles_count(self):
return None
@property
@normal_attr()
def is_bind_sina(self):
return None
@property
@normal_attr()
def is_blocking(self):
return None
@property
@normal_attr('is_followed')
def is_follower(self):
"""
:return: 此人是否关注当前登录用户。对于当前登录用户值为 False。
:rtype: bool
"""
return None
@property
@normal_attr()
def is_following(self):
"""
:return: 当前登录用户是否已关注此人。对于当前登录用户值为 False。
:rtype: bool
"""
return None
@property
@normal_attr()
def live_count(self):
"""
包括参与的和组织的 Live。
"""
return None
@property
@streaming()
def locations(self):
"""
常见返回值。
.. code-block:: python
[
{
'introduction': '天津,简称津,地处华北平原,balabala,
'url': 'https://api.zhihu.com/topics/19577238',
'avatar_url': 'http://pic4.zhimg.com/acad405e7_s.jpg',
'excerpt': '天津,简称津,地处华北平原 balabalabala',
'type': 'topic',
'name': '天津',
'id': '19577238',
},
],
使用属性时基本不用判断存不存在,如:
.. code-block:: python
for location in people.locations:
data += location.name
"""
return []
@property
@normal_attr()
def name(self):
return None
@property
@normal_attr()
def participated_live_count(self):
return None
@property
@normal_attr('pins_count')
def pin_count(self):
return None
@property
@normal_attr()
def question_count(self):
return None
@property
@normal_attr()
def shared_count(self):
return None
@property
@normal_attr()
def sina_weibo_name(self):
return None
@property
@normal_attr()
def sina_weibo_url(self):
return None
@property
@normal_attr()
def thanked_count(self):
return None
@property
@normal_attr()
def voteup_count(self):
return None
# ---------- generators ---------
@property
@generator_of(PEOPLE_ACTIVITIES_URL, 'activity')
def activities(self):
return None
@property
@generator_of(PEOPLE_ANSWERS_URL)
def answers(self):
return None
@property
@generator_of(PEOPLE_ARTICLES_URL)
def articles(self):
return None
@property
@generator_of(PEOPLE_COLLECTIONS_URL)
def collections(self):
return None
@property
@generator_of(PEOPLE_COLUMNS_URL)
def columns(self):
return None
@property
@generator_of(PEOPLE_FOLLOWERS_URL, 'people')
def followers(self):
"""
貌似知乎 API 有个限制,只允许获取前 5020 个粉丝,这好烦阿……
"""
return None
@property
@generator_of(PEOPLE_FOLLOWING_COLUMNS_URL, 'column')
def following_columns(self):
return None
@property
@generator_of(PEOPLE_FOLLOWING_QUESTIONS_URL, 'question')
def following_questions(self):
return None
@property
@generator_of(PEOPLE_FOLLOWING_TOPICS_URL, 'topic')
def following_topics(self):
return None
@property
@generator_of(PEOPLE_FOLLOWINGS_URL, 'people')
def followings(self):
return None
@property
@generator_of(PEOPLE_LIVES_URL)
def lives(self):
"""
举办和参加的 Live
"""
return None
@property
@generator_of(PEOPLE_LIKED_LIVES_URL, 'live')
def liked_lives(self):
"""
喜爱的 Live
.. warning:: 此接口未测试,不保证可用性。
"""
return None
@property
@generator_of(PEOPLE_PINS_URL)
def pins(self):
return None
@property
@generator_of(PEOPLE_QUESTIONS_URL)
def questions(self):
return None
class Badge(object):
IDENTITY = ConstValue('identity')
BEST_ANSWERER = ConstValue('best_answerer')
ORGANIZATION = ConstValue('organization')
def __init__(self, badge, org, session):
self._badge_data = badge
self._org_data = org
self._session = session
@property
def has_badge(self):
"""
是否有徽章。
有「个人认证」,「话题最佳回答者」,「已认证机构」 徽章中的**任何一个**就返回 ``True``。
没有徽章返回 ``False``。
"""
return bool(self._badge_data or self._org_data)
@property
def has_identity(self):
"""
是否有「个人认证」徽章。
"""
return bool(self._badge_data) and any(
[x.type == self.IDENTITY for x in self._badge_data]
)
@property
def is_best_answerer(self):
"""
是否有「最佳回答者」徽章。
"""
return bool(self._badge_data) and any(
[x.type == self.BEST_ANSWERER for x in self._badge_data]
)
@property
def is_organization(self):
"""
是否为「已认证机构」。
"""
return bool(self._org_data)
@property
def identity(self):
"""
「个人认证」的描述。
如果没有个人认证返回 ``None``。
"""
for badge in self._badge_data:
if badge.type == self.IDENTITY:
return badge.description
return None
@property
def topics(self):
"""
是哪些话题的最佳回答者,返回的是话题的迭代器。
可以这样用:
.. code-block:: python
for topic in people.badge.topics:
print(topic.name)
:rtype: Iterable[Topic]
"""
from .topic import Topic
for badge in self._badge_data:
if badge.type == self.BEST_ANSWERER:
for topic_data in badge.topics:
yield build_zhihu_obj_from_dict(
topic_data.raw_data(), self._session, cls=Topic
)
break
@property
def org_name(self):
"""
认证的机构名称,绝大多数情况是公司名称。
如果不是认证机构返回 ``None``。
"""
if not self.is_organization:
return None
return self._org_data.organization_name
@property
def org_home_page(self):
"""
认证的机构主页。
如果不是认证机构返回 ``None``。
"""
if not self.is_organization:
return None
return self._org_data.home_page
@property
def org_industry(self):
"""
认证的机构所在行业。
如果不是认证机构返回 ``None``。
"""
if not self.is_organization:
return None
return self._org_data.industry | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/people.py | people.py |
from __future__ import unicode_literals
import functools
import copy
__all__ = ['StreamingJSON', 'streaming']
class StreamingJSON:
def __init__(self, json_data):
"""
通过 ``dict`` 或者 ``list`` 来创建对象。
"""
if not isinstance(json_data, (dict, list)):
raise ValueError('Need dict or list to build StreamingJSON.')
self._json = copy.deepcopy(json_data)
def raw_data(self):
"""
有可能某些用户不喜欢使用 ``.`` 操作符而偏爱用 ``[]`` 来取字典内的数据,
所以提供此方法返回未处理的数据 **的副本**,
修改此副本对此对象内部数据无影响。
:return: 内部封装数据的副本
:rtype: dict|list
"""
return copy.deepcopy(self._json)
def __getattr__(self, item):
"""
重写 ``.`` 操作符。``item`` 参数为 ``.`` 后要取的属性。也即将 ``obj.xxx``
转换为 ``obj._json['xxx']``
重载后的 ``__getattr__`` 的流程为:
1. 判断 item 最后一个字符是不是 ``_``,若是则删去。这一步的作用是防止
item 与 Python 内置关键字冲突。 参见::any:`Question.redirection` 的
``from`` 数据以及 :ref:`说明 <tips-for-conflict-with-keyword>`。
2. 取出 ``obj = self._json[item]``,若不存在则抛出异常。
3. 如果 ``obj`` 是 ``dict`` 或者 ``list``, 返回 ``StreamingJSON(obj)``
4. 否则直接返回 ``obj``。
"""
if isinstance(self._json, dict):
# 防止和 Python 内置关键字冲突
if item.endswith('_'):
item = item[:-1]
if item in self._json:
obj = self._json[item]
if isinstance(obj, (dict, list)):
return StreamingJSON(obj)
else:
return obj
else:
raise AttributeError('No attr {0} in my data {1}!'.format(
item, self._json))
else:
raise ValueError('Can\'t use XX.xxx in list-like obj {0}, '
'please use XX[num].'.format(self._json))
def __getitem__(self, item):
"""
重写 ``[]`` 操作符。item 参数为 ``[]`` 内数组下表。也即将 ``obj[0]``
转换为 ``obj._json['0]``。
如果 ``self._json`` 不是 ``list`` 型,或 ``item`` 不是 ``int`` 型,
则抛出 ``ValueError``。
如果取出的 ``obj`` 是 ``dict`` 或 ``list``,返回 ``StreamingJSON(obj)``
否则直接返回 ``obj``。
"""
if isinstance(self._json, list) and isinstance(item, int):
obj = self._json[item]
if isinstance(obj, (dict, list)):
return StreamingJSON(obj)
else:
return obj
raise ValueError('Can\'t use XX[num] in dict-like obj {0}, '
'please use XX.xxx.'.format(self._json))
def __iter__(self):
"""
重写迭代行为。如果迭代对象是 ``dict`` 或 ``list``,返回
``StreamingJSON(obj)``,否则直接返回。
"""
def _iter():
for x in self._json:
if isinstance(x, (dict, list)):
yield StreamingJSON(x)
else:
yield x
return _iter()
def __len__(self):
return len(self._json)
def __str__(self):
return str(self._json)
def __repr__(self):
return repr(self._json)
def __contains__(self, item):
return item in self._json
def __bool__(self):
return True if self._json else False
def __nonzero__(self):
return self.__bool__()
def streaming(name_in_json=None, use_cache=True):
"""
本装饰器的作用为:
1. 标识这个属性为流式 JSON 属性。
2. 自动从对象的数据中取出对应属性,构建成 :any:`StreamingJSON` 对象。
取数据流程如下:
1. 如果 ``use_cache`` 为真,转 2,否则转 3。
2. 尝试从 ``cache`` 中取需要的数据。失败转 3,成功转 5。
3. 如果 ``data`` 不存在,则调用知乎 API 获取。
4. 尝试从 ``data`` 中取需要的数据。失败则
将被装饰方法的调用结果视为取到的数据。
5. 如果取到数据是 ``dict`` 或 ``list`` 类型,则返回使用
:any:`StreamingJSON` 包装过的结果。如果不是则抛出 ``ValueError`` 异常。
.. seealso:: 关于 cache 和 data
请看 :any:`Base` 类中的\ :any:`说明 <Base.__init__>`。
:param name_in_json: 要取的数据在 JSON
中的名字。可空,默认为使用本装饰器的的方法名。
:param use_cache: 是否使用缓存的数据。默认为 ``True``。如果为
``False`` 则只使用 data。
:raise ValueError: 当最终取到的数据不是 ``dict`` 或 ``list`` 类型时。
"""
def wrappers_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
name = name_in_json if name_in_json else func.__name__
if use_cache and self._cache and name in self._cache:
cache = self._cache[name]
else:
self._get_data()
if self._data and name in self._data:
cache = self._data[name]
else:
cache = func(self, *args, **kwargs)
if isinstance(cache, (dict, list)):
return StreamingJSON(cache)
else:
raise TypeError('Only dict and list can be StreamingJSON.')
return wrapper
return wrappers_wrapper | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/streaming.py | streaming.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .other import other_obj
from .normal import normal_attr
from .streaming import streaming
from .utils import common_save, int_id
from .urls import (
ARTICLE_COMMENTS_URL,
ARTICLE_DETAIL_URL,
)
__all__ = ['Article']
class Article(Base):
@int_id
def __init__(self, aid, cache, session):
super(Article, self).__init__(aid, cache, session)
def _build_url(self):
return ARTICLE_DETAIL_URL.format(self.id)
# ----- simple info -----
@property
@other_obj('people')
def author(self):
return None
@property
@streaming()
def can_comment(self):
"""
.. seealso:: :any:`Answer.can_comment`
"""
return None
@property
@other_obj()
def column(self):
"""
文章所属专栏。
.. warning:: 当文章不属于任何专栏时值为 None,使用其属性前应先做检查。
"""
return None
@property
@normal_attr()
def comment_count(self):
return None
@property
@normal_attr()
def comment_permission(self):
"""
.. seealso:: :any:`Answer.comment_permission`
"""
return None
@property
@normal_attr()
def content(self):
return None
@property
@normal_attr()
def excerpt(self):
return None
@property
@normal_attr()
def image_url(self):
return None
@property
@streaming(use_cache=False)
def suggest_edit(self):
"""
.. seealso:: :any:`Answer.suggest_edit`
"""
return None
@property
@normal_attr()
def title(self):
return None
@property
@normal_attr('updated')
def updated_time(self):
return None
@property
@normal_attr()
def voteup_count(self):
return None
# ----- generators -----
@property
@generator_of(ARTICLE_COMMENTS_URL)
def comments(self):
return None
# TODO: article.voters, API 接口未知
# ----- other operate -----
def save(self, path='.', filename=None, invalid_chars=None):
"""
除了默认文件名是文章标题外,和 :any:`Answer.save` 完全一致。
.. seealso:: :any:`Answer.save`
.. note:: TIPS
建议的使用方法:
.. code-block:: python
for article in column.articles:
print(article.title)
article.save(column.title)
"""
if self._cache is None:
self._get_data()
common_save(path, filename, self.content, self.title, invalid_chars) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/article.py | article.py |
from __future__ import unicode_literals
import itertools
import warnings
from ..exception import (
MyJSONDecodeError,
UnexpectedResponseException,
CantGetTickets
)
from .base import Base
from .generator import generator_of
from .normal import normal_attr
from .other import other_obj
from .streaming import streaming
from .urls import (
LIVE_DETAIL_URL,
LIVE_ENDED_URL,
LIVE_MEMBERS_URL,
LIVE_ONGOING_URL,
LIVE_RELATED_URL,
LIVE_TICKETS_URL,
LIVE_TICKETS_ENDED_URL,
LIVE_TICKETS_QUIET_URL,
)
from .utils import build_zhihu_obj_from_dict, int_id
__all__ = ['Live', 'LiveTag', 'LiveTicket', 'LiveBadge']
class LiveBadge(Base):
@int_id # 0, 1, 2
def __init__(self, lbid, cache, session):
super(LiveBadge, self).__init__(lbid, cache, session)
def _build_url(self):
return None
@property
@normal_attr()
def avatar_url(self):
return None
@property
@normal_attr()
def name(self):
return None
class LiveTag(Base):
@int_id # 101, 102, ..., 201, 202, ..., 301, 302, ...
def __init__(self, ltid, cache, session):
super(LiveTag, self).__init__(ltid, cache, session)
def _build_url(self):
return None
@property
@normal_attr('available_num')
def available_count(self):
return None
@property
@normal_attr()
def created_at(self):
return None
@property
@normal_attr('live_num')
def live_count(self):
return None
@property
@normal_attr()
def name(self):
return None
@property
@normal_attr()
def score(self):
return None
# ----- generators -----
@property
@generator_of(LIVE_ONGOING_URL, 'LiveOfTag')
def lives_ongoing(self):
return None
@property
@generator_of(LIVE_ENDED_URL, 'LiveOfTag')
def lives_ended(self):
return None
@property
def lives(self):
from ..helpers import shield
for live in itertools.chain(
shield(self.lives_ongoing),
shield(self.lives_ended)
):
yield live
class LiveTicket(Base):
ID_FIELD_NAME = 'product_id'
@int_id # id 其实是 Live ID
def __init__(self, product_id, cache, session):
super(LiveTicket, self).__init__(product_id, cache, session)
def _build_url(self):
return None
@property
@other_obj('LiveBadge', module_filename='live')
def badge(self):
return None
@property
@normal_attr(ID_FIELD_NAME)
def id(self):
return self._id
@property
@streaming('price')
def __price(self):
return None
@property
def price(self):
return self.__price.amount
@property
def price_unit(self):
return self.__price.unit
class Live(Base):
@int_id
def __init__(self, lid, cache, session):
super(Live, self).__init__(lid, cache, session)
def _build_url(self):
return LIVE_DETAIL_URL.format(self.id)
# ----- simple info -----
@property
@normal_attr()
def alert(self):
"""
提示语,就是客户端里显示为淡蓝色背景的那一块文字。
"""
return None
@property
@normal_attr()
def can_speak(self):
return None
@property
@normal_attr()
def created_at(self):
return None
@property
@normal_attr()
def description(self):
return None
@property
@normal_attr()
def ends_at(self):
return None
@property
@normal_attr()
def ends_in(self):
"""
正数表示还剩多久结束,0 应该表示已经结束了,如果是负数表示…………表示啥呢?
"""
return None
@property
@streaming('fee')
def __fee(self):
return None
@property
def fee(self):
"""
费用(一般这是最低票价),不过数值是 x 100 的,比如 999 表示 9.99
"""
return self.__fee.amount
@property
def fee_unit(self):
"""
费用的单位,一般就是 RMB 吧…………
"""
return self.__fee.unit
@property
@normal_attr()
def feedback_score(self):
"""
反馈评分,应该是 0 - 5 吧。
"""
return None
@property
@normal_attr()
def has_feedback(self):
"""
是否有反馈?
"""
return None
@property
@normal_attr()
def is_admin(self):
return None
@property
@normal_attr()
def in_promotion(self):
"""
是否处于促销中
"""
return None
@property
@normal_attr()
def is_muted(self):
return None
@property
@normal_attr()
def liked(self):
return None
@property
def liked_count(self):
return self.liked_num
@property
@normal_attr()
def liked_num(self):
return None
@property
@normal_attr()
def note(self):
return None
@property
@normal_attr()
def purchasable(self):
"""
可否购买?
"""
return None
@property
@normal_attr()
def role(self):
"""
返回一个字符串,表示于 Live 的关系。
'visitor' 表示未参与 Live。
‘audience’ 表示参与了 Live,作为观众。
'<一个我不知道的值>' 表示是组织者,因为我没开过 Live,所以不知道是什么值。
'<另一个我不知道的值>' 表示是协作者,我也没协助过别人,所以也不知道 =。=
"""
return None
@property
@streaming('seats')
def seat(self):
"""
Live 参与情况
常见返回值:
.. code-block: javascript
{
"max": 500, // 最多 500 人参与
"taken": 278, // 已有 278 人参与
}
做了两个 shortcut 属性 `:any:`seat_max` 和 :any:`seat_taken`, 可以直接使用。
"""
return None
@property
def seat_max(self):
"""
最大参与人数,其实是从 :any:`seat` 属性里取的。
"""
return self.seat.max
@property
def seat_taken(self):
"""
已参与人数,其实是从 :any:`seat` 属性里取的。
"""
return self.seat.taken
@property
@streaming('speaker')
def __speaker(self):
return None
@property
@other_obj('people', 'READ_FROM_RETURN_VALUE')
def speaker(self):
"""
演讲者,:any:`People` 对象。
"""
return self.__speaker.member.raw_data()
@property
@normal_attr()
def starts_at(self):
return None
@property
@normal_attr()
def subject(self):
"""
Live 的主题,其实就是标题,所以有同功能的属性 :any:`Live.title`
"""
return None
@property
def title(self):
"""
.. seealso:: :any:`subject`
"""
return self.subject
# ----- generators -----
@property
@streaming('cospeakers')
def __cospeakers(self):
return None
@property
def cospeakers(self):
from .people import People
# noinspection PyTypeChecker
for people in self.__cospeakers:
yield build_zhihu_obj_from_dict(
people.raw_data(), self._session, cls=People
)
@property
@generator_of(LIVE_MEMBERS_URL, 'PeopleWithLiveBadge')
def participants(self):
"""
参与 Live 的人,这个生成器用法比较奇特,请看下面的例子:
.. code-block:: python
live = client.live(789426202925346816)
for role, badge, people in live.members:
print(role, badge.name, people.name)
其中 role 为 'audience' 表示观众,除了这个值暂时没发现别的取值。
badge 为一个 :any:`LiveBadge` 对象,一般能用到的也就 id 和 name 属性。
第三个 people 就是标准的 :any:`People` 对象了。
"""
return None
@property
@generator_of(LIVE_RELATED_URL, 'live')
def related(self):
return None
@property
@streaming('tags')
def __tags(self):
return None
@property
def tags(self):
"""
返回 :any:`LiveTag` 对象的生成器,但目前看来应该每个 Live 只有一个 Tag。
"""
# noinspection PyTypeChecker
for tag in self.__tags:
yield LiveTag(tag.id, tag.raw_data(), self._session)
def __try_ticket_request(self, url):
res = self._session.post(url)
try:
data = res.json()
return data
except MyJSONDecodeError:
raise UnexpectedResponseException(
url, res,
'a json string.',
)
@property
def tickets(self):
"""
.. warning::
此接口无法用于当前登录用户已参与的 Live。当强行调用时,
此函数将产生一个警告并且不会有任何返回。
正常情况下返回的是 :any:`LiveTicket` 对象的生成器。
"""
if self.role != 'visitor':
warnings.warn(CantGetTickets)
else:
normal_url = LIVE_TICKETS_URL.format(self.id)
quiet_url = LIVE_TICKETS_QUIET_URL.format(self.id)
ended_url = LIVE_TICKETS_ENDED_URL.format(self.id)
if self.ends_in == 0:
url = ended_url
elif self.seat_taken >= self.seat_max:
url = quiet_url
else:
url = normal_url
data = self.__try_ticket_request(url)
if 'error' in data and 'code' in data['error']:
if data['error']['code'] == 4046:
data = self.__try_ticket_request(quiet_url)
elif data['error']['code'] == 4048:
data = self.__try_ticket_request(ended_url)
try:
data = data['product_list']
except KeyError:
raise UnexpectedResponseException(
quiet_url, data,
'a json string contains [product_list] attr.',
)
for ticket in data:
yield build_zhihu_obj_from_dict(
ticket, self._session, cls=LiveTicket,
id_key=LiveTicket.ID_FIELD_NAME,
) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/live.py | live.py |
from __future__ import unicode_literals
import importlib
import functools
import os
from .urls import RE_TYPE_MAP
from ..exception import (
IdMustBeIntException,
MyJSONDecodeError,
UnexpectedResponseException,
UnimplementedException,
)
try:
# Py3
# noinspection PyCompatibility
from html.parser import HTMLParser
except ImportError:
# Py2
# noinspection PyCompatibility,PyUnresolvedReferences
from HTMLParser import HTMLParser
__all__ = [
'zhihu_obj_url_parse',
'DEFAULT_INVALID_CHARS', 'EXTRA_CHAR_FOR_FILENAME',
'remove_invalid_char', 'add_serial_number',
'SimpleHtmlFormatter',
'SimpleEnum', 'ConstValue',
]
NOT_INT_ID_CLS_NAME = {'column', 'people', 'me'}
INT_ID_KEY = '_id_is_int'
"""
ID 不需要是数字的类名集合
"""
def int_id(func):
"""
装饰器。作用于 :class:`.ZhihuClient` 中需要整型 ID 来构建对应知乎类的方法。
作用就是个强制类型检查。
:raise: :class:`.IdMustBeIntException` 当传过来的 ID 不是整型的时候
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
some_id = args[0]
except IndexError:
some_id = None
if not isinstance(some_id, int):
raise IdMustBeIntException(self.__class__)
setattr(self, INT_ID_KEY, True)
return func(self, *args, **kwargs)
return wrapper
def get_class_from_name(name, module_filename=None):
cls_name = name.capitalize() if name.islower() else name
file_name = module_filename or cls_name.lower()
try:
imported_module = importlib.import_module(
'.' + file_name,
'zhihu_oauth.zhcls'
)
return getattr(imported_module, cls_name)
except (ImportError, AttributeError):
raise UnimplementedException(
'Unknown zhihu obj type [{}]'.format(name)
)
def build_zhihu_obj_from_dict(
data, session, use_cache=True, type_name=None,
filename=None, cls=None, id_key='id', type_key='type'):
obj_cls = cls or get_class_from_name(type_name or data[type_key], filename)
obj_id = data[id_key]
if obj_cls.__name__.lower() not in NOT_INT_ID_CLS_NAME:
obj_id = int(obj_id)
data.update({id_key: obj_id})
return obj_cls(obj_id, data if use_cache else None, session)
def zhihu_obj_url_parse(url):
for pattern, obj_type in RE_TYPE_MAP.items():
match = pattern.match(url)
if match:
need_int = obj_type not in NOT_INT_ID_CLS_NAME
obj_id = match.group(1)
if need_int:
obj_id = int(obj_id)
return obj_id, obj_type
return None, None
def can_get_from(name, data):
return name in data and not isinstance(data[name], (dict, list))
DEFAULT_INVALID_CHARS = {':', '*', '?', '"', '<', '>', '|', '\r', '\n'}
EXTRA_CHAR_FOR_FILENAME = {'/', '\\'}
def remove_invalid_char(dirty, invalid_chars=None, for_path=False):
if invalid_chars is None:
invalid_chars = set(DEFAULT_INVALID_CHARS)
else:
invalid_chars = set(invalid_chars)
invalid_chars.update(DEFAULT_INVALID_CHARS)
if not for_path:
invalid_chars.update(EXTRA_CHAR_FOR_FILENAME)
return ''.join([c for c in dirty if c not in invalid_chars]).strip()
def add_serial_number(file_path, postfix):
full_path = file_path + postfix
if not os.path.isfile(full_path):
return full_path
num = 1
while os.path.isfile(full_path):
# noinspection PyUnboundLocalVariable
try:
# noinspection PyCompatibility,PyUnresolvedReferences
serial = unicode(str(num))
except NameError:
serial = str(num)
full_path = file_path + ' - ' + serial.rjust(3, '0') + '.' + postfix
num += 1
return full_path
_BASE_HTML_HEADER = """<meta name="referrer" content="no-referrer" />
<meta charset="utf-8" />
"""
class SimpleHtmlFormatter(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._level = 0
self._last = ''
self._in_code = False
self._prettified = [_BASE_HTML_HEADER]
def handle_starttag(self, tag, attrs):
if not self._in_code:
self._prettified.extend(['\t'] * self._level)
self._prettified.append('<' + tag)
for name, value in attrs:
self._prettified.append(' ' + name + '="' + value + '"')
self._prettified.append('>')
if not self._in_code:
self._prettified.append('\n')
if tag != 'br' and tag != 'img':
self._level += 1
if tag == 'code':
self._in_code = True
self._last = tag
def handle_endtag(self, tag):
if tag != 'br' and tag != 'img':
self._level -= 1
if not self._in_code:
self._prettified.extend(['\t'] * self._level)
self._prettified.append('</' + tag + '>')
if not self._in_code:
self._prettified.append('\n')
self._last = tag
if tag == 'code':
self._in_code = False
def handle_startendtag(self, tag, attrs):
if not self._in_code:
self._prettified.extend(['\t'] * self._level)
self._prettified.append('<' + tag)
for name, value in attrs:
self._prettified.append(' ' + name + '="' + value + '"')
self._prettified.append('/>')
self._last = tag
def handle_data(self, data):
if not self._in_code:
self._prettified.extend(['\t'] * self._level)
if self._last == 'img':
self._prettified.append('<br>\n')
self._prettified.extend(['\t'] * self._level)
self._prettified.append(data)
if not self._in_code:
self._prettified.append('\n')
def handle_charref(self, name):
self._prettified.append('&#' + name)
def handle_entityref(self, name):
self._prettified.append('&' + name + ';')
def error(self, message):
self._prettified = ['error when parser the html file.']
def prettify(self):
return ''.join(self._prettified)
class SimpleEnum(set):
def __getattr__(self, item):
if item in self:
return item
raise AttributeError('No {0} in this enum class.'.format(item))
class ConstValue(object):
def __init__(self, value=None):
self._value = value
def __get__(self, instance, cls):
return self._value
def __set__(self, instance, value):
raise TypeError('Can\'t change value of a const var')
def get_result_or_error(url, res):
try:
json_dict = res.json()
if 'error' in json_dict:
return False, json_dict['error']['message']
elif 'success' in json_dict:
if json_dict['success']:
return True, ''
else:
return False, 'Unknown error'
else:
return True, ''
except (KeyError, MyJSONDecodeError):
raise UnexpectedResponseException(
url, res, 'a json contains voting result or error message')
def common_save(path, filename, content, default_filename, invalid_chars):
filename = filename or default_filename
filename = remove_invalid_char(filename, invalid_chars)
filename = filename or 'untitled'
path = path or '.'
path = remove_invalid_char(path, invalid_chars, True)
path = path or '.'
if not os.path.isdir(path):
os.makedirs(path)
full_path = os.path.join(path, filename)
full_path = add_serial_number(full_path, '.html')
formatter = SimpleHtmlFormatter()
formatter.feed(content)
with open(full_path, 'wb') as f:
f.write(formatter.prettify().encode('utf-8')) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/utils.py | utils.py |
from __future__ import unicode_literals
import abc
import functools
import itertools
import sys
import warnings
from .utils import build_zhihu_obj_from_dict
from ..exception import (
GetEmptyResponseWhenFetchData,
MyJSONDecodeError,
SliceBadUseWarning,
TokenError,
UnexpectedResponseException,
UnimplementedException,
UnimplementedWarning,
)
__all__ = [
'BaseGenerator', 'FilterableGenerator',
'ActivityGenerator', 'AnswerGenerator', 'ArticleGenerator',
'CollectionContentGenerator', 'CollectionGenerator',
'ColumnGenerator', 'CommentGenerator',
'FeedGenerator',
'LiveGenerator', 'LiveOfTagGenerator',
'MessageGenerator',
'PeopleGenerator', 'PeopleWithLiveBadgeGenerator', 'PinGenerator',
'QuestionGenerator',
'SearchResultGenerator',
'TopicGenerator',
'WhisperGenerator',
]
class BaseGenerator(object):
def __init__(self, url, session, **default_params):
"""
基础生成器类。
:param url: 首次请求网址。后续网址在 API 的返回数据中会给出。
:param session: 网络会话。
:param default_params: 需要加到每次请求中的 get query params
"""
self._url = url
self._session = session
self._index = 0
self._data = []
self._up = 0
self._next_url = self._url
self._need_sleep = 0.5
self._default_params = dict(default_params if default_params else {})
self._extra_params = {}
def _fetch_more(self):
"""
获取下一页数据。
内部流程:
1. 从 self._extra_params 中获取附加请求参数,并发送请求。
2. 将响应解析成 JSON,如果出错则抛出异常。
3. 如果 JSON 数据未出错(没有名为 ``error`` 的键),则转 4。
- 如果错误名是 'ERR_CONVERSATION_NOT_FOUND' 则转 7(其实这是个 dirty hack,
因为有些评论没有对话列表,而我有没有找到判断方法。)
- 将等待时间翻倍,若其值超过最长等待时间限制,转 7。
否则 sleep 当前值然后返回。
(因为这里没有改变下一页所以下次会继续请求统一页面)
4. 将等待时间重置为 0.5 s。
5. 将数据添加到对象内部数据库中。
6. 如果数据表示未达到末尾,则根据数据设置下一次请求地址,返回。
7. 将下一次请求网址设为 None,这表示所有数据均取完,返回。
:raise: :any:`UnexpectedResponseException`
"""
params = dict(self._default_params)
params.update(self._extra_params)
# `offset` params only used in first request
if self._next_url != self._url and 'offset' in params:
del params['offset']
res = self._session.get(self._next_url, params=params)
try:
json_dict = res.json()
# Empty data({}, []) as end
if not json_dict:
warnings.warn(GetEmptyResponseWhenFetchData)
self._next_url = None
return
# Server knows error happened
if 'error' in json_dict:
error = json_dict['error']
# comment conversion hack, as end
if 'name' in error:
if error['name'] == 'ERR_CONVERSATION_NOT_FOUND':
self._next_url = None
return
# token error
if 'code' in error:
if error['code'] == 100:
raise TokenError(error['message'])
# other error
raise UnexpectedResponseException(
self._next_url,
res,
"a json string, has data and paging"
)
self._up += len(json_dict['data'])
self._data.extend(json_dict['data'])
if json_dict['paging']['is_end']:
self._next_url = None
else:
self._next_url = json_dict['paging']['next']
except (MyJSONDecodeError, AttributeError):
raise UnexpectedResponseException(
self._next_url,
res,
'a json string, has data and paging'
)
@abc.abstractmethod
def _build_obj(self, data):
"""
这是个抽象方法,子类需要自己实现创建对象并返回的操作。
子类的操作很简单,下文文档中就不详细写了。
:param data: 提供的数据,为返回的 JSON 数据的 data 列表中的一个 dict。
:return: 构建出的对象。
"""
return None
def __getitem__(self, item):
"""
重载自身的 ``[int]`` 操作符。逻辑如下:
1. 如果要求的 index 小于现在对象内部数据库中对象数量,
从数据库中数据,使用 _build_obj 出构建对象并返回。
2. 如果下一页地址不为 None,则调用 :any:`_fetch_more` 请求更多数据。
否则抛出 IndexError 异常表示超出范围。
3. 因为请求过程中更新了数据库,再转 1。
结合 :any:`_fetch_more` 能更好地理解本函数。
:param int item: 索引,必须为整型。
:return: 对应的对象。
:raise IndexError: 请求完全部数据后,索引还是大于数据库内数据量。
"""
if not isinstance(item, (int, slice)):
raise TypeError('Need an int or slice as index, not {0}'
.format(type(item)))
if isinstance(item, slice):
# Get the start, stop, and step from the slice
if slice.start != 0:
warnings.warn(SliceBadUseWarning(item))
return itertools.islice(self, item.start, item.stop, item.step)
if item < 0:
raise ValueError('index must >= 0, {} provided'.format(item))
while item >= self._up:
if self._next_url is not None:
self._fetch_more()
else:
raise IndexError('list index out of range')
return self._build_obj(self._data[item])
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __iter__(self):
self._reset()
return self
def __next__(self):
"""
提供迭代方式访问数据集,即 ``for xx in obj.xxxs`` 。
对象内有一个变量 ``_index`` 保存着下一次要迭代的下标。
每次用户迭代时,使用被 :any:`__getitem__ <BaseGenerator.__getitem__>`
方法重写过的 self[self._index] 操作符尝试获取对象。
如果引发了 ``IndexError`` 则表示数据获取完毕。此时提供一个
``StopIteration`` 结束迭代,并把 ``_index`` 变量置为 0 为下次迭代做准备。
如果成功获取到数据则把 ``_index + 1``,然后返回对象。
结合 :any:`__getitem__ <BaseGenerator.__getitem__>` 能更好地理解本函数。
"""
obj = None
while obj is None:
try:
obj = self[self._index]
except IndexError:
self._index = 0
raise StopIteration
self._index += 1
return obj
next = __next__
def order_by(self, what):
"""
有些 API 可以根据 GET 参数来控制数据的排序,只需流式的调用本函数即可。
目前发现支持的使用方式只有:
- ``People.answers.order_by('votenum')``,
表示按赞数排序获取某人答案。默认为按时间。
(由于 Me 类继承于 People,所以 ``me.answers``)也可以。
如果我发现了其他方式会更新文档。
.. warning:: 注意
使用这一函数会重置对象内部的所有数据,
再次取数据将从头开始。
其实就是个 :any:`add_params` 的封装……
:param str|unicode what: 按什么排序……
"""
return self.add_params(order_by=what)
def jump(self, n):
"""
忽略前 n 个数据,直接去获取第 n+1 个数据
:param int n: 跳过多少数据
"""
return self.add_params(offset=int(n))
def _reset(self):
"""
重置数据。
"""
del self._data[:]
self._index = 0
self._up = 0
self._next_url = self._url
self._need_sleep = 0.5
def set_params(self, *_, **params):
"""
自定义请求时的 params,如果不了解知乎 OAuth API 的话并没有什么用。
.. warning:: 注意
使用这一函数会重置对象内部的所有数据,
再次取数据将从头开始。
使用方式:``for xxx in obj.xxxs.set_params(a='b', c='d'):``
"""
self._extra_params.clear()
return self.add_params(**params)
def add_params(self, *_, **params):
"""
添加请求时的 params,如果不了解知乎 OAuth API 的话并没有什么用。
.. note:: 注意
使用这一函数会重置对象内部的除了额外 params 外的数据,
再次取数据将从头开始。
使用方式:``for xxx in obj.xxxs.add_params(a='b').add_params(b='b'):``
"""
self._reset()
self._extra_params.update(params)
return self
class FilterableGenerator(BaseGenerator):
def __init__(self, url, session, filter_set, obj_cls, **kwargs):
self._condition = None
self._condition_is_func = False
self._filter_set = filter_set
self._obj_cls = obj_cls
super(FilterableGenerator, self).__init__(url, session, **kwargs)
def filter(self, cond):
self._condition_is_func = False
if isinstance(cond, str) and cond in self._filter_set:
cond = {cond}
elif isinstance(cond, set) \
and all([x in self._filter_set for x in cond]):
pass
elif hasattr(cond, '__call__'):
self._condition_is_func = True
else:
raise ValueError('Argument cond can only be '
'XxxType.xxx, or a set of theme, '
'or a callable like func(obj) -> Bool.')
self._condition = cond
return self
def _build_obj(self, data):
try:
obj = self._obj_cls(data, self._session)
except UnimplementedException as e:
warnings.warn(UnimplementedWarning(e))
return None
if self._condition is not None:
if self._condition_is_func:
if not self._condition(obj):
return None
elif obj.type not in self._condition:
return None
return obj
class ActivityGenerator(FilterableGenerator):
def __init__(self, url, session, **kwargs):
from .activity import Activity, ActType
super(ActivityGenerator, self).__init__(
url, session, ActType, Activity, **kwargs
)
def filter(self, cond):
"""
设置想要获取哪个或哪些类型的动态。例子:
.. code-block:: python
xxx = client.people('xxx')
for act in xxx.activities.filter(ActType.VOTEUP_ANSWER):
print(ts2str(act.created_time), act2str(act))
参数也可以是多个 :any:`ActType` 的 set:
.. code-block:: python
xxx = client.people('xxx')
filter_types = {
ActType.VOTEUP_ANSWER,
ActType.VOTEUP_ARTICLE,
ActType.FOLLOW_QUESTION,
}
for act in xxx.activities.filter(filter_types):
print(ts2str(act.created_time), act2str(act))
参数还可以是一个接收 :any:`Activity` 对象,返回值为 Bool 的函数:
.. code-block:: python
xxx = client.people('xxx')
for act in xxx.activities.filter(lambda x: '游戏' in act2str(x)):
print(ts2str(act.created_time), act2str(act))
.. warning:: 需要注意的是,使用 filter 并不会减少网络请求,使程序变快
因为知乎官方并没有提供根据动态类型获取动态的接口,
所以内部实现其实依旧获取了用户的所有动态,只是在生成器返回数据时进行了过滤。
:param ActType|{ActType}|callable cond: 过滤条件
"""
return super(ActivityGenerator, self).filter(cond)
class AnswerGenerator(BaseGenerator):
def __init__(self, url, session):
super(AnswerGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .answer import Answer
return build_zhihu_obj_from_dict(
data, self._session, cls=Answer
)
class ArticleGenerator(BaseGenerator):
def __init__(self, url, session):
super(ArticleGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .article import Article
return build_zhihu_obj_from_dict(
data, self._session, cls=Article
)
class CollectionContentGenerator(BaseGenerator):
def __init__(self, url, session):
super(CollectionContentGenerator, self).__init__(url, session)
def _build_obj(self, data):
if data['type'] in {'answer', 'article'}:
return build_zhihu_obj_from_dict(data, self._session)
else:
raise UnimplementedException(
'Unknown collection content type: {0}. '
'Please send this error message to '
'developer to get help.'.format(data['type'])
)
class CollectionGenerator(BaseGenerator):
def __init__(self, url, session):
super(CollectionGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .collection import Collection
return build_zhihu_obj_from_dict(
data, self._session, cls=Collection
)
class ColumnGenerator(BaseGenerator):
def __init__(self, url, session):
super(ColumnGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .column import Column
return build_zhihu_obj_from_dict(
data, self._session, cls=Column
)
class CommentGenerator(BaseGenerator):
def __init__(self, url, session):
super(CommentGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .comment import Comment
return build_zhihu_obj_from_dict(
data, self._session, cls=Comment
)
class FeedGenerator(FilterableGenerator):
def __init__(self, url, session, **kwargs):
from .feed import Feed, FeedType
super(FeedGenerator, self).__init__(
url, session, FeedType, Feed, **kwargs
)
def filter(self, cond):
"""
参见 :any:`ActivityGenerator.filter` 函数,使用方式基本相同
例子见 :any:`Feed`
"""
return super(FeedGenerator, self).filter(cond)
class LiveGenerator(BaseGenerator):
def __init__(self, url, session, **kwargs):
super(LiveGenerator, self).__init__(url, session, **kwargs)
def _build_obj(self, data):
from .live import Live
return build_zhihu_obj_from_dict(
data, self._session, cls=Live
)
class LiveOfTagGenerator(LiveGenerator):
def __init__(self, url, session, **kwargs):
super(LiveOfTagGenerator, self).__init__(url, session, **kwargs)
class MessageGenerator(BaseGenerator):
def __init__(self, url, session, **kwargs):
super(MessageGenerator, self).__init__(url, session, **kwargs)
def _build_obj(self, data):
from .message import Message
return build_zhihu_obj_from_dict(
data, self._session, cls=Message
)
class PeopleGenerator(BaseGenerator):
def __init__(self, url, session):
super(PeopleGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .people import People
# hack for topic.best_answerers
if data['type'] == 'best_answerers':
data = data['member']
return build_zhihu_obj_from_dict(
data, self._session, cls=People
)
class PeopleWithLiveBadgeGenerator(BaseGenerator):
def __init__(self, url, session):
super(PeopleWithLiveBadgeGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .people import People
from .live import LiveBadge
return (
data['role'],
build_zhihu_obj_from_dict(
data['badge'], self._session, cls=LiveBadge,
),
build_zhihu_obj_from_dict(
data['member'], self._session, cls=People,
),
)
class PinGenerator(BaseGenerator):
def __init__(self, url, session):
super(PinGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .pin import Pin
return build_zhihu_obj_from_dict(
data, self._session, cls=Pin
)
class QuestionGenerator(BaseGenerator):
def __init__(self, url, session):
super(QuestionGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .question import Question
return build_zhihu_obj_from_dict(
data, self._session, cls=Question
)
class SearchResultGenerator(BaseGenerator):
def __init__(self, url, session, **kwargs):
super(SearchResultGenerator, self).__init__(url, session, **kwargs)
def _build_obj(self, data):
from .search import data_to_section_or_result, SearchResult
res = data_to_section_or_result(data, self._session)
# Bypass Zhihu AD result in search results
if isinstance(res, SearchResult):
try:
_ = res.obj
except UnimplementedException as e:
if '[promotion]' in str(e):
return None
raise e
return res
class TopicGenerator(BaseGenerator):
def __init__(self, url, session):
super(TopicGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .topic import Topic
return build_zhihu_obj_from_dict(
data, self._session, cls=Topic
)
class TopicActivityGenerator(BaseGenerator):
def __init__(self, url, session):
super(TopicActivityGenerator, self).__init__(url, session)
def _build_obj(self, data):
answer_list = data.get('answers', [])
answer_count = len(answer_list)
if answer_count == 0:
# Only question
return build_zhihu_obj_from_dict(data, self._session)
elif answer_count == 1:
return build_zhihu_obj_from_dict(answer_list[0], self._session)
else:
raise UnimplementedWarning(UnimplementedException(
'I think question generated by Topic.activities has only '
'one answer at most, if you see this warning, '
'which proves that I am wrong, '
'please open a issue in Github with the data ' + str(data)
))
class WhisperGenerator(BaseGenerator):
def __init__(self, url, session):
super(WhisperGenerator, self).__init__(url, session)
def _build_obj(self, data):
from .whisper import Whisper
return build_zhihu_obj_from_dict(
data, self._session, cls=Whisper
)
def generator_of(url_pattern, class_name=None, format_id=True, **params):
def wrappers_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
from .people import People
cls_name = class_name or func.__name__
if cls_name.endswith('s'):
cls_name = cls_name[:-1]
if cls_name.islower():
cls_name = cls_name.capitalize()
gen_cls_name = cls_name + 'Generator'
try:
gen_cls = getattr(sys.modules[__name__], gen_cls_name)
except AttributeError:
return func(self, *args, **kwargs)
if isinstance(self, People):
self._get_data()
default_params = params
if gen_cls is MessageGenerator:
# self is whisper object,
# who attr is people object, for who i'm talking to
default_params['sender_id'] = self.who.id
elif gen_cls is ActivityGenerator:
# 获取用户动态需要加上 action_feed=true
# 如果不加某些用户动态获取中途会出错
default_params['action_feed'] = 'true'
elif gen_cls is LiveOfTagGenerator:
default_params['tags'] = self.id
if format_id:
url = url_pattern.format(self.id)
else:
url = url_pattern
return gen_cls(url, self._session, **default_params)
return wrapper
return wrappers_wrapper | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/generator.py | generator.py |
from __future__ import unicode_literals
import itertools
from .base import Base
from .generator import generator_of
from .normal import normal_attr
from .other import other_obj
from .streaming import streaming, StreamingJSON
from .urls import (
PIN_COMMENTS_URL,
PIN_DETAIL_URL,
PIN_VOTERS_URL,
ZHIHU_WEB_ROOT,
)
from .utils import (
build_zhihu_obj_from_dict,
int_id,
zhihu_obj_url_parse,
SimpleEnum,
)
from ..exception import UnimplementedException
__all__ = ['Pin', 'PinContent', 'PCType']
class Pin(Base):
@int_id
def __init__(self, pid, cache, session):
super(Pin, self).__init__(pid, cache, session)
def _build_url(self):
return PIN_DETAIL_URL.format(self.id)
# ----- simple attrs -----
@property
@other_obj('people')
def author(self):
return None
@property
@normal_attr()
def comment_count(self):
return None
@property
@normal_attr()
def comment_permission(self):
return None
@property
@normal_attr('created')
def created_time(self):
return None
@property
@normal_attr()
def excerpt_title(self):
"""
简要标题文字
"""
return None
@property
@normal_attr()
def like_count(self):
return None
@property
@normal_attr('updated')
def updated_time(self):
return None
@property
@streaming('content')
def _contents(self):
return []
@property
def contents(self):
"""
因为一个分享里含有多个内容,所以 contents 属性是迭代器,需要这样使用:
.. code-block:: Python
for act in someone.activities.filter(ActType.LINK_PIN):
pin = act.target
for pc in pin.contents:
# do something with pc
迭代器返回 :any:`PinContent` 类型的对象,具体用法参看
:any:`_pin_content_type_map` 和 any:`PinContent`。
"""
for data in self._contents:
yield PinContent(data, self._session)
@property
@normal_attr()
def content_html(self):
return None
# ----- generators -----
@property
@generator_of(PIN_COMMENTS_URL)
def comments(self):
return None
@property
@generator_of(PIN_VOTERS_URL, 'people')
def voters(self):
return None
_pin_content_type_map = {
'text': 'TEXT',
'image': 'IMAGE',
'link': 'LINK',
'quote': 'QUOTE',
}
"""
:any:`PinContent` 的类型,每种类型能够使用的属性都不一样,具体请看下表:
+--------+---------------+------------------------------------+
| 类型 | 属性 | 含义 |
+========+===============+====================================+
| TEXT | text | 用户对分享的评论文字 |
+--------+---------------+------------------------------------+
| | src | 图片地址 |
| IMAGE +---------------+------------------------------------+
| | width, height | 图片宽高 |
+--------+---------------+------------------------------------+
| | title | 网页的标题 |
| +---------------+------------------------------------+
| LINK | url | 网页地址 |
| +---------------+------------------------------------+
| | image_url | 网页的 ICON 地址 |
+--------+---------------+------------------------------------+
| QUOTE | quote | 引用的文字 |
+--------+---------------+------------------------------------+
| | type | 类型,可用 is PcType.TEXT 判断 |
| +---------------+------------------------------------+
| | subtype | 表示内容中包含的知乎对象类型, |
| | | 参见 :any:`_subtype_zhihu_obj_map` |
| 通用 +---------------+------------------------------------+
| | obj | 分享内容是知乎相关链接的话 |
| | | 可使用此属性获取到对应知乎类对象 |
| +---------------+------------------------------------+
| | | 获取主要的数据 |
| | content | TEXT -> text, LINK -> url |
| | | IMAGE -> src, QUOTE -> quote |
+--------+---------------+------------------------------------+
"""
_subtype_zhihu_obj_map = {
'问题': 'QUESTION',
'回答': 'ANSWER',
'专栏': 'COLUMN',
'文章': 'ARTICLE',
'收藏夹': 'COLLECTION',
'话题': 'TOPIC',
'个人主页': 'PEOPLE',
'Live': 'LIVE',
# ----- no zhihu object -----
'none': 'NONE',
}
"""
:any:`PinContent.obj_type` 的取值枚举。
使用方法大概如下:
.. code-block:: Python
for act in someone.activities.filter(ActType.LIKE_PIN):
pin = act.target
for content in pin.contents:
if content.obj_type is not PCType.NONE:
print(content.obj)
"""
_type_zhihu = 'zhihu'
PCType = SimpleEnum(
itertools.chain(
_pin_content_type_map.values(),
_subtype_zhihu_obj_map.values(),
)
)
"""
参见 :any:`_pin_content_type_map` 和 :any:`_subtype_zhihu_obj_map`。
"""
_type_content_method_map = {
PCType.TEXT: 'text',
PCType.IMAGE: 'src',
PCType.QUOTE: 'quote',
PCType.LINK: 'url'
}
"""
从 :any:`PinContent` 的类型对应到 :any:`PinContent.content` 属性需要使用的方法名
"""
class PinContent(object):
"""
PinContent 是表示分享的内容的类,因为其类型很多,所以单独成类,设计理念和
:any:`Activity` 类似。
每一个 :any:`Pin` 可能含有多个 :any:`PinContent`,
一般第一个是用户自对自己分享的东西的评论,余下的是 TA 分享的东西。
:any:`PinContent` 主要有四个类型,具体请看 :any:`_pin_content_type_map`。
使用方法如下:
.. code-block:: Python
for act in me.activities.filter(ActType.LIKE_PIN):
for content in act.target.contents:
print(content.type, content.obj_type, end=' ')
if content.type is PCType.TEXT:
print(content.text)
elif content.type is PCType.QUOTE:
print(content.quote)
elif content.type is PCType.LINK:
print(content.title, content.url, content.image_url)
elif content.type is PCType.IMAGE:
print(content.src, content.width, content.height)
print('-' * 20)
或者使用 :any:`PinContent.content` 获取对于每种类型的主要内容。
参见 :any:`_type_content_method_map`
"""
def __init__(self, data, session):
assert isinstance(data, StreamingJSON)
self._data = data
self._session = session
# ----- Text type ----
@property
def text(self):
assert self.type is PCType.TEXT
return self._data.content
# ----- Image type -----
@property
def src(self):
assert self.type is PCType.IMAGE
return self._data.url
@property
def width(self):
assert self.type is PCType.IMAGE
return self._data.width
@property
def height(self):
assert self.type is PCType.IMAGE
return self._data.height
# ----- Normal link type -----
@property
def title(self):
assert self.type is PCType.LINK
return self._data.title
@property
def url(self):
assert self.type is PCType.LINK or self.type is PCType.QUOTE
return self._data.url
@property
def image_url(self):
assert self.type is PCType.LINK
return self._data.image_url
# ----- Quote -----
@property
def quote(self):
assert self.type is PCType.QUOTE
return self._data.content
# ------ Common -----
@property
def type(self):
if self._data.type not in _pin_content_type_map:
raise UnimplementedException(
'Unknown pin content type [{}]'.format(self._data.type)
)
return getattr(PCType, _pin_content_type_map[self._data.type])
@property
def obj_type(self):
if self.type is PCType.LINK and self._data.link_type == _type_zhihu:
subtype = self._data.subtype
if subtype not in _subtype_zhihu_obj_map:
raise UnimplementedException(
'Unknown pin link obj_type [{}]'.format(subtype)
)
return getattr(PCType, _subtype_zhihu_obj_map[subtype])
if self.type is PCType.QUOTE:
obj_id, obj_type = zhihu_obj_url_parse(self._data.url)
if obj_id:
return getattr(PCType, obj_type.upper())
elif self._data.url.startswith(ZHIHU_WEB_ROOT):
raise UnimplementedException(
'Unknown pin quote obj_type, data [{}]'.format
(self._data)
)
return PCType.NONE
@property
def content(self):
return getattr(self, _type_content_method_map[self.type])
@property
def obj(self):
assert self.obj_type is not PCType.NONE
if self.type is PCType.LINK:
obj_id = self._data.token
else:
assert self.type is PCType.QUOTE
obj_id, _ = zhihu_obj_url_parse(self.url)
return build_zhihu_obj_from_dict(
{'id': obj_id}, self._session, use_cache=False
) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/pin.py | pin.py |
from __future__ import unicode_literals
from itertools import chain
from .streaming import StreamingJSON
from .utils import SimpleEnum, build_zhihu_obj_from_dict
from ..exception import UnimplementedException
__all__ = ['Feed', 'FeedType']
_VERB_TO_FEED_TYPE_MAP = {
'COLUMN_POPULAR_ARTICLE': 'COLUMN_POPULAR_ARTICLE',
'LIVE_JOIN': 'JOIN_LIVE',
'MEMBER_ANSWER_QUESTION': 'CREATE_ANSWER',
'MEMBER_ASK_QUESTION': 'CREATE_QUESTION',
'MEMBER_COLLECT_ANSWER': 'COLLECT_ANSWER',
'MEMBER_COLLECT_ARTICLE': 'COLLECT_ARTICLE',
'MEMBER_CREATE_ARTICLE': 'CREATE_ARTICLE',
'MEMBER_CREATE_PIN': 'CREATE_PIN',
'MEMBER_FOLLOW_COLLECTION': 'FOLLOW_COLLECTION',
'MEMBER_FOLLOW_COLUMN': 'FOLLOW_COLUMN',
'MEMBER_FOLLOW_QUESTION': 'FOLLOW_QUESTION',
'MEMBER_FOLLOW_ROUNDTABLE': 'FOLLOW_ROUNDTABLE',
'MEMBER_FOLLOW_TOPIC': 'FOLLOW_TOPIC',
'MEMBER_LIKE_PIN': 'LIKE_PIN',
'MEMBER_VOTEUP_ANSWER': 'VOTEUP_ANSWER',
'MEMBER_VOTEUP_ARTICLE': 'VOTEUP_ARTICLE',
'MEMBER_VOTEUP_EBOOK': 'VOTEUP_EBOOK',
'TOPIC_ACKNOWLEDGED_ANSWER': 'ANSWER_FROM_TOPIC',
'TOPIC_ACKNOWLEDGED_ARTICLE': 'ARTICLE_FROM_TOPIC',
'TOPIC_ACKNOWLEDGED_EBOOK': 'EBOOK_FROM_TOPIC',
'TOPIC_POPULAR_QUESTION': 'QUESTION_FROM_TOPIC',
}
_TYPE_TO_FEED_TYPE_MAP = {
'action_card': 'CARD',
'feed_advert': 'AD',
}
FeedType = SimpleEnum(
chain(_VERB_TO_FEED_TYPE_MAP.values(), _TYPE_TO_FEED_TYPE_MAP.values())
)
"""
FeedType 是用于表示首页信息流单个 Feed 类型的枚举类,可供使用的常量有:
====================== ================== ======================
常量名 说明 `target` 属性类型
====================== ================== ======================
COLLECT_ANSWER 收藏答案 :any:`Answer`
COLLECT_ARTICLE 收藏文章 :any:`Article`
COLUMN_POPULAR_ARTICLE 热门专栏文章 :any:`Article`
CREATE_ANSWER 回答问题 :any:`Answer`
CREATE_ARTICLE 发表文章 :any:`Article`
CREATE_PIN 发表分享 :any:`Pin`
CREATE_QUESTION 提出问题 :any:`Question`
FOLLOW_COLLECTION 关注收藏夹 :any:`Collection`
FOLLOW_COLUMN 关注专栏 :any:`Column`
FOLLOW_QUESTION 关注问题 :any:`Question`
FOLLOW_ROUNDTABLE 关注圆桌 :any:`StreamingJSON`
FOLLOW_TOPIC 关注话题 :any:`Topic`
LIKE_PIN 赞了分享 :any:`Pin`
VOTEUP_ANSWER 赞同回答 :any:`Answer`
VOTEUP_ARTICLE 赞同文章 :any:`Article`
VOTEUP_EBOOK 赞了电子书 :any:`StreamingJSON`
ANSWER_FROM_TOPIC 来自话题的答案 :any:`Answer`
ARTICLE_FROM_TOPIC 来自话题的文章 :any:`Article`
EBOOK_FROM_TOPIC 来自话题的电子书 :any:`StreamingJSON`
QUESTION_FROM_TOPIC 来自话题的问题 :any:`Question`
ACTION_CARD 卡片式广告 ``None``
AD 简单广告 ``None``
====================== ================== ======================
上述 ``ACTION_CARD`` 型 Feed 的内容请参见 :any:`Feed.promotions`;``AD``
型 Feed 内容参见 :any:`Feed.ad`。
"""
_NON_TRIVIAL_FEED_TYPES = {FeedType.CARD, FeedType.AD}
def _verb_to_feed_type(verb):
type_str = _VERB_TO_FEED_TYPE_MAP.get(verb, None)
if type_str is None:
raise UnimplementedException(
'Unknown feed verb: {0}'.format(verb)
)
return getattr(FeedType, type_str)
def _type_to_feed_type(t):
type_str = _TYPE_TO_FEED_TYPE_MAP.get(t, None)
if type_str is None:
raise UnimplementedException(
'Unknown feed type: {0}'.format(t)
)
return getattr(FeedType, type_str)
class Feed(object):
"""
表示用户首页信息流里的单个卡片数据。用户一般无法手动构造此对象,而需要通过
:any:`Me.feeds` 属性来获取 Feed Generator
使用示例:
.. code-block:: Python
from zhihu_oauth import ZhihuClient, FeedType
# your login code
me = client.me()
for feed in me.feeds:
if feed.type is FeedType.VOTEUP_ANSWER:
print(feed.action_text)
或者使用 Feed Generator 的 filter 函数来获取指定类型的 Feed:
.. code-block:: Python
for feed in me.feeds.filter({
FeedType.VOTEUP_ANSWER, FeedType.ANSWER_FROM_TOPIC,
FeedType.CREATE_ANSWER, FeedType.COLLECT_ANSWER,
}):
ans = feed.target
print(ans.question.title, ans.author.name, ans.voteup_count)
"""
def __init__(self, data, session):
self._data = data
self._session = session
if self._data.get('type', None) == 'feed':
self._type = _verb_to_feed_type(self._data['verb'])
else:
self._type = _type_to_feed_type(self._data['type'])
@property
def action_text(self):
"""
描述此 Feed 的文字,如 「XXX 赞同了回答」,「XXX 发表了文章」
"""
if self.type is not FeedType.CARD:
return self._data['action_text']
@property
def created_time(self):
"""
Feed 时间戳
"""
if self.type not in _NON_TRIVIAL_FEED_TYPES:
return self._data['created_time']
@property
def target(self):
"""
Feed 的主要内容。
`target` 在不同 :any:`Feed.type` 下有不同的类型。
参见 :any:`FeedType`
"""
if self.type not in _NON_TRIVIAL_FEED_TYPES:
if self.type in {
FeedType.FOLLOW_ROUNDTABLE, FeedType.VOTEUP_EBOOK,
FeedType.EBOOK_FROM_TOPIC,
}:
return StreamingJSON(self._data['target'])
return build_zhihu_obj_from_dict(
self._data['target'], self._session
)
@property
def type(self):
"""
Feed 的类型。
参见 :any:`FeedType`
"""
return self._type
@property
def actors(self):
"""
可以理解为 Feed 的来源(可能有多个),所以是一个 Generator。
对于 ``XXX_FROM_TOPIC`` 类型的 Feed,此属性为 :any:`Topic` 对象,
其他类型时大多为 :any:`People` 型。
卡片和广告型 Feed 没有此内容。
"""
if self.type not in _NON_TRIVIAL_FEED_TYPES:
for actor_data in self._data['actors']:
yield build_zhihu_obj_from_dict(actor_data, self._session)
@property
def promotions(self):
"""
``ACTION_CARD`` 型 Feed 专属属性,表示促销的内容列表,是一个 Generator
"""
if self.type is FeedType.ACTION_CARD:
for card in self._data['card']['promotion_list']:
yield build_zhihu_obj_from_dict(
card['elem'], self._session, type_name=card['type'],
)
@property
def ad(self):
"""
``AD`` 型 Feed 专属属性,表示广告内容。
因为应该没什么人用,所以直接返回了 JSON 内容的 :any:`StreamingJSON` 封装
"""
if self.type is FeedType.AD:
return StreamingJSON(self._data['ad']) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/feed.py | feed.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .normal import normal_attr
from .streaming import streaming
from .urls import (
QUESTION_DETAIL_URL,
QUESTION_ANSWERS_URL,
QUESTION_COMMENTS_URL,
QUESTION_FOLLOWERS_URL,
QUESTION_TOPICS_URL,
)
from .utils import int_id
__all__ = ['Question']
class Question(Base):
@int_id
def __init__(self, qid, cache, session):
super(Question, self).__init__(qid, cache, session)
def _build_url(self):
return QUESTION_DETAIL_URL.format(self._id)
# ----- simple info -----
@property
@normal_attr()
def allow_delete(self):
return None
@property
@normal_attr()
def answer_count(self):
return None
@property
@normal_attr()
def comment_count(self):
return None
@property
@normal_attr('created')
def created_time(self):
return None
@property
@normal_attr('except')
def excerpt(self):
"""
知乎返回的 json 里这一项叫做 except.... 也是醉了
"""
return None
@property
@normal_attr()
def follower_count(self):
return None
@property
@normal_attr()
def detail(self):
return None
@property
@streaming()
def redirection(self):
"""
常见返回值:
.. code-block:: python
{
'to':
{
'url': 'https://api.zhihu.com/questions/19570036',
'id': 19570036,
'type': 'question',
'title': '什么是「问题重定向」?如何正确使用该功能解决重复问题?'
},
'from':
[
{
'url': 'https://api.zhihu.com/questions/19772082',
'id': 19772082,
'type': 'question',
'title': '知乎上有重复的问题吗?'
},
{
'url': 'https://api.zhihu.com/questions/20830682',
'id': 20830682,
'type': 'question',
'title': '各位知友以为同一问题重复出现,知乎应如何应对?'
}
]
}
在使用 from 属性时遇到语法错误?请看 :ref:`说明 <tips-for-conflict-with-keyword>`
"""
return None
@property
@streaming()
def status(self):
return None
@property
@streaming(use_cache=False)
def suggest_edit(self):
"""
常见返回值:
.. code-block:: python
{'status': False', reason': ''}
{'status': True, 'reason': '问题表意不明'}
"""
return None
@property
@normal_attr()
def title(self):
return None
@property
@normal_attr()
def updated_time(self):
return None
# ----- generators -----
@property
@generator_of(QUESTION_ANSWERS_URL)
def answers(self):
return None
@property
@generator_of(QUESTION_COMMENTS_URL)
def comments(self):
return None
@property
@generator_of(QUESTION_FOLLOWERS_URL, 'people')
def followers(self):
return None
@property
@generator_of(QUESTION_TOPICS_URL)
def topics(self):
return None | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/question.py | question.py |
from __future__ import unicode_literals
from .base import Base
from .generator import generator_of
from .other import other_obj
from .normal import normal_attr
from .streaming import streaming
from .utils import common_save, int_id
from .urls import (
ANSWER_DETAIL_URL,
ANSWER_COLLECTIONS_URL,
ANSWER_COMMENTS_URL,
ANSWER_VOTERS_URL,
)
__all__ = ['Answer']
class Answer(Base):
@int_id
def __init__(self, aid, cache, session):
assert isinstance(aid, int)
super(Answer, self).__init__(aid, cache, session)
def _build_url(self):
return ANSWER_DETAIL_URL.format(self.id)
# ----- simple info -----
@property
@other_obj('people')
def author(self):
return None
@property
@streaming()
def can_comment(self):
"""
大概表示允不允许当前用户评论吧。
常见返回值:
.. code-block:: python
{
'status': True,
'reason': ''
}
"""
return None
@property
@normal_attr()
def comment_count(self):
return None
@property
@normal_attr()
def comment_permission(self):
"""
评论权限,现在已知有:
========== ========================
值(str) 说明
========== ========================
all 允许所有人评论
followee 允许答主关注的人评论
nobody 关闭评论
========== ========================
"""
return None
@property
@normal_attr()
def content(self):
return None
@property
@normal_attr()
def created_time(self):
return None
@property
@normal_attr()
def excerpt(self):
return None
@property
@normal_attr()
def is_copyable(self):
return None
@property
@normal_attr()
def is_mine(self):
return None
@property
@other_obj()
def question(self):
return None
@property
@streaming(use_cache=False)
def suggest_edit(self):
"""
答案是否处于「被建议修改」状态,常见返回值为:
.. code-block:: python
{'status': False, 'title': '', 'reason': '', 'tip': '', 'url': ''}
{
'status': True,
'title': '为什么回答会被建议修改',
'tip': '作者修改内容通过后,回答会重新显示。如果一周内未得到有效修改,回答会自动折叠',
'reason': '回答被建议修改:\\n不宜公开讨论的政治内容',
'url': 'zhihu://questions/24752645'
}
"""
return None
@property
@normal_attr()
def thanks_count(self):
return None
@property
@normal_attr()
def updated_time(self):
return None
@property
@normal_attr()
def voteup_count(self):
return None
# ----- generators -----
@property
@generator_of(ANSWER_COLLECTIONS_URL)
def collections(self):
return None
@property
@generator_of(ANSWER_COMMENTS_URL)
def comments(self):
return None
@property
@generator_of(ANSWER_VOTERS_URL, 'people')
def voters(self):
return None
# ----- other operate -----
def save(self, path='.', filename=None, invalid_chars=None):
"""
保存答案到当前文件夹。
:param str|unicode path: 目录名,可选。不提供的话会保存到当前目录。
:param str|unicode filename: 文件名,可选。
不提供的话会使用答主名。注意不要带后缀名
:param list[char] invalid_chars: 非法字符传列表。
目录名和文件名都会使用这个列表过滤一遍。
如果不提供则会使用内置的列表。
:return: 无返回值
.. note:: TIPS
建议的使用方法:
.. code-block:: python
# 对于保存问题的所有答案
for answer in question.answers:
print(answer.author.name)
answer.save(question.title)
# 对于保存收藏夹的所有答案
for answer in collection.answers:
name = answer.question.title + ' - ' + answer.author.name
print(name)
answer.save(collection.title, name)
因为这样会将答案保存在以问题标题(或者收藏夹名字)命名的文件夹里。
.. note:: TIPS
对于一个问题下有多个匿名用户的情况,不要担心,会被自动命名为
匿名用户 - 001.html,匿名用户 - 002.html……
.. todo:: 优化存在重复文件时的算法……
"""
if self._cache is None:
self._get_data()
common_save(path, filename, self.content,
self.author.name, invalid_chars) | zhihu-oauth | /zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/zhihu_oauth/zhcls/answer.py | answer.py |
import json
from .common import *
from .author import Author
class Me(Author):
"""封装了相关操作(如点赞,关注问题)的类。
请使用 :meth:`.ZhihuClient.me` 方法获取实例。
"""
def __init__(self, url, name, motto, photo_url, session):
super(Me, self).__init__(url, name, motto,
photo_url=photo_url, session=session)
def vote(self, something, vote='up'):
"""给答案或文章点赞或取消点赞
:param Answer/Post something: 需要点赞的答案或文章对象
:param str vote:
===== ================ ======
取值 说明 默认值
===== ================ ======
up 赞同 √
down 反对 X
clear 既不赞同也不反对 X
===== ================ ======
:return: 成功返回True,失败返回False
:rtype: bool
"""
from .answer import Answer
from zhihu import Post
if isinstance(something, Answer):
mapping = {
'up': 'vote_up',
'clear': 'vote_neutral',
'down': 'vote_down'
}
if vote not in mapping.keys():
raise ValueError('Invalid vote value: {0}'.format(vote))
if something.author.url == self.url:
return False
params = {'answer_id': str(something.aid)}
data = {
'_xsrf': something.xsrf,
'method': mapping[vote],
'params': json.dumps(params)
}
headers = dict(Default_Header)
headers['Referer'] = something.question.url[:-1]
res = self._session.post(Upvote_Answer_Url,
headers=headers, data=data)
return res.json()['r'] == 0
elif isinstance(something, Post):
mapping = {
'up': 'like',
'clear': 'none',
'down': 'dislike'
}
if vote not in mapping.keys():
raise ValueError('Invalid vote value: {0}'.format(vote))
if something.author.url == self.url:
return False
put_url = Upvote_Article_Url.format(
something.column_in_name, something.slug)
data = {'value': mapping[vote]}
headers = {
'Content-Type': 'application/json;charset=utf-8',
'Host': 'zhuanlan.zhihu.com',
'Referer': something.url[:-1],
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; '
'rv:39.0) Gecko/20100101 Firefox/39.0',
'X-XSRF-TOKEN': self._session.cookies.get('XSRF-TOKEN')
}
res = self._session.put(put_url, json.dumps(data), headers=headers)
return res.status_code == 204
else:
raise ValueError('argument something need to be '
'zhihu.Answer or zhihu.Post object.')
def thanks(self, answer, thanks=True):
"""感谢或取消感谢回答
:param Answer answer: 要感谢或取消感谢的回答
:param thanks: True-->感谢,False-->取消感谢
:return: 成功返回True,失败返回False
:rtype: bool
"""
from .answer import Answer
if isinstance(answer, Answer) is False:
raise ValueError('argument answer need to be Zhihu.Answer object.')
if answer.author.url == self.url:
return False
data = {
'_xsrf': answer.xsrf,
'aid': answer.aid
}
res = self._session.post(Thanks_Url if thanks else Cancel_Thanks_Url,
data=data)
return res.json()['r'] == 0
def follow(self, something, follow=True):
"""关注用户、问题、话题或收藏夹
:param Author/Question/Topic something: 需要关注的对象
:param bool follow: True-->关注,False-->取消关注
:return: 成功返回True,失败返回False
:rtype: bool
"""
from .question import Question
from .topic import Topic
from .collection import Collection
if isinstance(something, Author):
if something.url == self.url:
return False
data = {
'_xsrf': something.xsrf,
'method': ' follow_member' if follow else 'unfollow_member',
'params': json.dumps({'hash_id': something.hash_id})
}
res = self._session.post(Follow_Author_Url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Question):
data = {
'_xsrf': something.xsrf,
'method': 'follow_question' if follow else 'unfollow_question',
'params': json.dumps({'question_id': str(something.qid)})
}
res = self._session.post(Follow_Question_Url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Topic):
data = {
'_xsrf': something.xsrf,
'method': 'follow_topic' if follow else 'unfollow_topic',
'params': json.dumps({'topic_id': something.tid})
}
res = self._session.post(Follow_Topic_Url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Collection):
data = {
'_xsrf': something.xsrf,
'favlist_id': something.cid
}
res = self._session.post(
Follow_Collection_Url if follow else Unfollow_Collection_Url,
data=data)
return res.json()['r'] == 0
else:
raise ValueError('argument something need to be '
'zhihu.Author, zhihu.Question'
', Zhihu.Topic or Zhihu.Collection object.')
def add_comment(self, answer, content):
"""给指定答案添加评论
:param Answer answer: 答案对象
:param string content: 评论内容
:return: 成功返回 True,失败返回 False
:rtype: bool
"""
from .answer import Answer
if isinstance(answer, Answer) is False:
raise ValueError('argument answer need to be Zhihu.Answer object.')
if not content:
raise ValueError('answer content cannot be empty')
data = {
'method': 'add_comment',
'params': json.dumps({'answer_id': answer.aid, 'content': content}),
'_xsrf': answer.xsrf
}
res = self._session.post(Answer_Add_Comment_URL,
data=data)
return res.json()['r'] == 0
def send_message(self, author, content):
"""发送私信给一个用户
:param Author author: 接收私信用户对象
:param string content: 发送给用户的私信内容
:return: 成功返回 True,失败返回 False
:rtype: bool
"""
if isinstance(author, Author) is False:
raise ValueError('argument answer need to be Zhihu.Author object.')
if not content:
raise ValueError('answer content cannot be empty')
if author.url == self.url:
return False
data = {
'member_id': author.hash_id,
'content': content,
'token': '',
'_xsrf': author.xsrf
}
res = self._session.post(Send_Message_Url,
data=data)
return res.json()['r'] == 0
def block(self, something, block=True):
"""屏蔽某个用户、话题
:param Author/Topic something:
:param block: True-->屏蔽,False-->取消屏蔽
:return: 成功返回 True,失败返回 False
:rtype: bool
"""
from .topic import Topic
if isinstance(something, Author):
if something.url == self.url:
return False
data = {
'_xsrf': something.xsrf,
'action': 'add' if block else 'cancel',
}
block_author_url = something.url + 'block'
res = self._session.post(block_author_url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Topic):
tid = something.tid
data = {
'_xsrf': something.xsrf,
'method': 'add' if block else 'del',
'tid': tid,
}
block_topic_url = 'http://www.zhihu.com/topic/ignore'
res = self._session.post(block_topic_url, data=data)
return res.status_code == 200
else:
raise ValueError('argument something need to be '
'Zhihu.Author or Zhihu.Topic object.')
def unhelpful(self, answer, unhelpful=True):
"""没有帮助或取消没有帮助回答
:param Answer answer: 要没有帮助或取消没有帮助回答
:param unhelpful: True-->没有帮助,False-->取消没有帮助
:return: 成功返回 True,失败返回 False
:rtype: bool
"""
from .answer import Answer
if isinstance(answer, Answer) is False:
raise ValueError('argument answer need to be Zhihu.Answer object.')
if answer.author.url == self.url:
return False
data = {
'_xsrf': answer.xsrf,
'aid': answer.aid
}
res = self._session.post(Unhelpful_Url if unhelpful else Cancel_Unhelpful_Url,
data=data)
return res.json()['r'] == 0 | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/me.py | me.py |
import functools
import re
import os
from requests import Session
from bs4 import BeautifulSoup as _Bs
from bs4 import Tag, NavigableString
from requests.packages.urllib3.util import Retry
try:
__import__('lxml')
BeautifulSoup = lambda makeup: _Bs(makeup, 'lxml')
except ImportError:
BeautifulSoup = lambda makeup: _Bs(makeup, 'html.parser')
Default_Header = {'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://www.zhihu.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; '
'rv:39.0) Gecko/20100101 Firefox/39.0',
'Host': 'www.zhihu.com'}
Zhihu_URL = 'https://www.zhihu.com'
Login_URL = Zhihu_URL + '/login/email'
Captcha_URL = Zhihu_URL + '/captcha.gif'
Get_Profile_Card_URL = Zhihu_URL + '/node/MemberProfileCardV2'
Question_Get_More_Answer_URL = Zhihu_URL + '/node/QuestionAnswerListV2'
Answer_Add_Comment_URL = Zhihu_URL + '/node/AnswerCommentAddV2'
Answer_Comment_Box_URL = Zhihu_URL + '/node/AnswerCommentBoxV2'
Get_Answer_Comment_URL = Zhihu_URL + '/r/answers/{0}/comments'
Author_Get_More_Followers_URL = Zhihu_URL + '/node/ProfileFollowersListV2'
Author_Get_More_Followees_URL = Zhihu_URL + '/node/ProfileFolloweesListV2'
Author_Get_More_Follow_Column_URL = Zhihu_URL + \
'/node/ProfileFollowedColumnsListV2'
Author_Get_More_Follow_Topic_URL = Zhihu_URL + \
'/people/{0}/topics'
PROTOCOL = ''
Column_Url = 'http://zhuanlan.zhihu.com'
Column_API = Column_Url + '/api/columns'
Column_Data = Column_API + '/{0}'
Column_Posts_Data = Column_API + '/{0}/posts?limit=10&offset={1}'
Column_Post_Data = Column_Url + '/api/posts/{0}'
Post_Get_Upvoter = Column_Post_Data + '/likers'
Topic_Url = Zhihu_URL + '/topic'
Topic_Get_Children_Url = Topic_Url + '/{0}/organize/entire'
Topic_Get_More_Follower_Url = Topic_Url + '/{0}/followers'
Topic_Questions_Url = Topic_Url + '/{0}/questions'
Topic_Unanswered_Question_Url = Topic_Url + '/{0}/unanswered'
Topic_Top_Answers_Url = Topic_Url + '/{0}/top-answers'
Topic_Hot_Questions_Url = Topic_Url + '/{0}/hot'
Topic_Newest_Url = Topic_Url + '/{0}/newest'
Get_Me_Info_Url = Column_Url + '/api/me'
Upvote_Answer_Url = Zhihu_URL + '/node/AnswerVoteBarV2'
Upvote_Article_Url = Column_API + '/{0}/posts/{1}/rating'
Follow_Author_Url = Zhihu_URL + '/node/MemberFollowBaseV2'
Follow_Question_Url = Zhihu_URL + '/node/QuestionFollowBaseV2'
Follow_Topic_Url = Zhihu_URL + '/node/TopicFollowBaseV2'
Follow_Collection_Url = Zhihu_URL + '/collection/follow'
Unfollow_Collection_Url = Zhihu_URL + '/collection/unfollow'
Thanks_Url = Zhihu_URL + '/answer/thanks'
Cancel_Thanks_Url = Zhihu_URL + '/answer/cancel_thanks'
Send_Message_Url = Zhihu_URL + '/inbox/post'
Unhelpful_Url = Zhihu_URL + '/answer/not_helpful'
Cancel_Unhelpful_Url = Zhihu_URL + '/answer/helpful'
Get_Collection_Url = Zhihu_URL + '/node/AnswerFavlists'
re_question_url = re.compile(
r'^https?://www\.zhihu\.com/question/\d+(\?sort=created|/?)$')
re_question_url_std = re.compile(r'^https?://www\.zhihu\.com/question/\d+/?')
re_ans_url = re.compile(
r'^https?://www\.zhihu\.com/question/\d+/answer/\d+/?$')
re_author_url = re.compile(r'^https?://www\.zhihu\.com/(?:people|org)/[^/]+/?$')
re_collection_url = re.compile(r'^https?://www\.zhihu\.com/collection/\d+/?$')
re_column_url = re.compile(r'^http://zhuanlan\.zhihu\.com/([^/]+)/?$')
re_post_url = re.compile(r'^http://zhuanlan\.zhihu\.com/p/(\d+)/?$')
re_topic_url = re.compile(r'^https?://www\.zhihu\.com/topic/(\d+)/?$')
re_a2q = re.compile(r'(.*)/answer/.*')
re_collection_url_split = re.compile(r'.*(/c.*)')
re_get_number = re.compile(r'[^\d]*(\d+).*')
re_del_empty_line = re.compile(r'\n*(.*)\n*')
def check_soup(attr, soup_type='_make_soup'):
def real(func):
@functools.wraps(func)
def wrapper(self):
# noinspection PyTypeChecker
value = getattr(self, attr, None)
if value is None:
if soup_type == '_make_soup':
getattr(self, soup_type)()
elif self.soup is None:
getattr(self, soup_type)()
value = func(self)
setattr(self, attr, value)
return value
return wrapper
return real
def class_common_init(url_re, allowed_none=True, trailing_slash=True):
def real(func):
@functools.wraps(func)
def wrapper(self, url, *args, **kwargs):
if url is None and not allowed_none:
raise ValueError('Invalid Url: ' + url)
if url is not None:
if url_re.match(url) is None:
raise ValueError('Invalid URL: ' + url)
if not url.endswith('/') and trailing_slash:
url += '/'
if 'session' not in kwargs.keys() or kwargs['session'] is None:
kwargs['session'] = Session()
kwargs['session'].mount('https://', Retry(5))
kwargs['session'].mount('http://', Retry(5))
self.soup = None
return func(self, url, *args, **kwargs)
return wrapper
return real
def remove_invalid_char(text):
"""去除字符串中的无效字符,一般用于保存文件时保证文件名的有效性.
:param str text: 待处理的字符串
:return: 处理后的字符串
:rtype: str
"""
invalid_char_list = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n']
res = ''
for char in text:
if char not in invalid_char_list:
res += char
return res
def parser_author_from_tag(author):
author_link = author.find('a', class_='author-link')
if author_link is None:
return None, '匿名用户', '', ''
else:
author_name = author_link.text
motto_span = author.find('span', class_='bio')
author_motto = motto_span['title'] \
if motto_span is not None else ''
author_url = Zhihu_URL + author_link['href']
avatar_link = author.find('a', class_='avatar-link')
photo_url = PROTOCOL + avatar_link.img['src'].replace('_s', '_r')
return author_url, author_name, author_motto, photo_url
def parser_author_from_comment(author):
author_avatar = author.find('a', class_='zm-item-link-avatar')
if author_avatar is None:
return None, '匿名用户', ''
else:
author_link = author.find('a', class_='zg-link')
author_name = author_link.text
author_url = author_link['href']
avatar_link = author.find('img', class_='zm-item-img-avatar')
photo_url = PROTOCOL + avatar_link['src'].replace('_s', '_r')
return author_url, author_name, photo_url
def answer_content_process(content):
content = clone_bs4_elem(content)
del content['class']
soup = BeautifulSoup(
'<html><head><meta charset="utf-8"></head><body></body></html>')
soup.body.append(content)
no_script_list = soup.find_all("noscript")
for no_script in no_script_list:
no_script.extract()
img_list = soup.find_all(
"img", class_=["origin_image", "content_image"])
for img in img_list:
if "content_image" in img['class']:
img['data-original'] = img['data-actualsrc']
new_img = soup.new_tag('img', src=PROTOCOL + img['data-original'])
img.replace_with(new_img)
if img.next_sibling is None:
new_img.insert_after(soup.new_tag('br'))
useless_list = soup.find_all("i", class_="icon-external")
for useless in useless_list:
useless.extract()
return soup.prettify()
def get_path(path, filename, mode, default_path, default_name):
if path is None:
path = os.path.join(
os.getcwd(), remove_invalid_char(default_path))
if filename is None:
filename = remove_invalid_char(default_name)
if os.path.isdir(path) is False:
os.makedirs(path)
temp = filename
i = 0
while os.path.isfile(os.path.join(path, temp) + '.' + mode):
i += 1
temp = filename + str(i)
return os.path.join(path, temp) + '.' + mode
def common_follower(url, xsrf, session):
from .author import Author, ANONYMOUS
headers = dict(Default_Header)
headers['Referer'] = url
data = {'offset': 0, '_xsrf': xsrf}
gotten_data_num = 20
offset = 0
while gotten_data_num == 20:
data['offset'] = offset
res = session.post(url, data=data, headers=headers)
json_data = res.json()['msg']
gotten_data_num = json_data[0]
offset += gotten_data_num
soup = BeautifulSoup(json_data[1])
follower_divs = soup.find_all('div', class_='zm-profile-card')
for div in follower_divs:
if div.a is not None:
author_name = div.a['title']
author_url = Zhihu_URL + div.a['href']
author_motto = div.find('span', class_='bio').text
author_photo = PROTOCOL + div.img['src'].replace('_m', '_r')
numbers = [re_get_number.match(a.text).group(1)
for a in div.find_all('a', target='_blank')]
try:
yield Author(author_url, author_name, author_motto,
*numbers, photo_url=author_photo,
session=session)
except ValueError: # invalid url
yield ANONYMOUS
else:
yield ANONYMOUS
def clone_bs4_elem(el):
"""Clone a bs4 tag before modifying it.
Code from `http://stackoverflow.com/questions/23057631/clone-element-with
-beautifulsoup`
"""
if isinstance(el, NavigableString):
return type(el)(el)
copy = Tag(None, el.builder, el.name, el.namespace, el.nsprefix)
# work around bug where there is no builder set
# https://bugs.launchpad.net/beautifulsoup/+bug/1307471
copy.attrs = dict(el.attrs)
for attr in ('can_be_empty_element', 'hidden'):
setattr(copy, attr, getattr(el, attr))
for child in el.contents:
copy.append(clone_bs4_elem(child))
return copy | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/common.py | common.py |
import json
from .base import BaseZhihu
from .common import *
class BanException(Exception):
"""当尝试获取被反屏蔽系统限制的用户资料时,将会引发此异常"""
pass
class Author(BaseZhihu):
"""用户类,请使用``ZhihuClient.answer``方法构造对象."""
@class_common_init(re_author_url, True)
def __init__(self, url, name=None, motto=None, follower_num=None,
question_num=None, answer_num=None, upvote_num=None,
thank_num=None, photo_url=None, session=None):
"""创建用户类实例.
:param str url: 用户主页url,形如 http://www.zhihu.com/people/7sdream
:param str name: 用户名字,可选
:param str motto: 用户简介,可选
:param int follower_num: 用户粉丝数,可选
:param int question_num: 用户提问数,可选
:param int answer_num: 用户答案数,可选
:param int upvote_num: 用户获得赞同数,可选
:param int thank_num: 用户获得感谢数,可选
:param str photo_url: 用户头像地址,可选
:param Session session: 使用的网络会话,为空则使用新会话。
:return: 用户对象
:rtype: Author
"""
self.url = url
self._session = session
self.card = None
self._nav_list = None
self._name = name
self._motto = motto
self._follower_num = follower_num
self._question_num = question_num
self._answer_num = answer_num
self._upvote_num = upvote_num
self._thank_num = thank_num
self._photo_url = photo_url
def _gen_soup(self, content):
self.soup = BeautifulSoup(content)
ban_title = self.soup.find("div", class_="ProfileBan-title")
if ban_title is not None:
raise BanException(ban_title.text)
self._nav_list = self.soup.find(
'div', class_='profile-navbar').find_all('a')
def _make_card(self):
if self.card is None and self.url is not None:
params = {'url_token': self.id}
real_params = {'params': json.dumps(params)}
r = self._session.get(Get_Profile_Card_URL, params=real_params)
self.card = BeautifulSoup(r.content)
@property
def id(self):
"""获取用户id,就是网址最后那一部分.
:return: 用户id
:rtype: str
"""
return re.match(r'^.*/([^/]+)/$', self.url).group(1) \
if self.url is not None else ''
@property
@check_soup('_xsrf')
def xsrf(self):
"""获取知乎的反xsrf参数(用不到就忽视吧~)
:return: xsrf参数
:rtype: str
"""
return self.soup.find('input', attrs={'name': '_xsrf'})['value']
@property
@check_soup('_hash_id')
def hash_id(self):
"""获取作者的内部hash id(用不到就忽视吧~)
:return: 用户hash id
:rtype: str
"""
div = self.soup.find('div', class_='zm-profile-header-op-btns')
if div is not None:
return div.button['data-id']
else:
ga = self.soup.find('script', attrs={'data-name': 'ga_vars'})
return json.loads(ga.text)['user_hash']
@property
@check_soup('_name', '_make_card')
def name(self):
"""获取用户名字.
:return: 用户名字
:rtype: str
"""
if self.url is None:
return '匿名用户'
if self.soup is not None:
return self.soup.find('div', class_='title-section').span.text
else:
assert self.card is not None
return self.card.find('span', class_='name').text
@property
@check_soup('_motto', '_make_card')
def motto(self):
"""获取用户自我介绍,由于历史原因,我还是把这个属性叫做motto吧.
:return: 用户自我介绍
:rtype: str
"""
if self.url is None:
return ''
else:
if self.soup is not None:
bar = self.soup.find(
'div', class_='title-section')
if len(bar.contents) < 4:
return ''
else:
return bar.contents[3].text
else:
assert self.card is not None
motto = self.card.find('div', class_='tagline')
return motto.text if motto is not None else ''
@property
@check_soup('_photo_url', '_make_card')
def photo_url(self):
"""获取用户头像图片地址.
:return: 用户头像url
:rtype: str
"""
if self.url is not None:
if self.soup is not None:
img = self.soup.find('img', class_='Avatar Avatar--l')['src']
return img.replace('_l', '_r')
else:
assert (self.card is not None)
return PROTOCOL + self.card.img['src'].replace('_xs', '_r')
else:
return 'http://pic1.zhimg.com/da8e974dc_r.jpg'
@property
@check_soup('_followee_num')
def followee_num(self):
"""获取关注了多少人.
:return: 关注的人数
:rtype: int
"""
if self.url is None:
return 0
else:
number = int(self.soup.find(
'div', class_='zm-profile-side-following').a.strong.text)
return number
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取追随者数量,就是关注此人的人数.
:return: 追随者数量
:rtype: int
"""
if self.url is None:
return 0
else:
number = int(self.soup.find(
'div', class_='zm-profile-side-following zg-clear').find_all(
'a')[1].strong.text)
return number
@property
@check_soup('_upvote_num')
def upvote_num(self):
"""获取收到的的赞同数量.
:return: 收到的的赞同数量
:rtype: int
"""
if self.url is None:
return 0
else:
number = int(self.soup.find(
'span', class_='zm-profile-header-user-agree').strong.text)
return number
@property
@check_soup('_thank_num')
def thank_num(self):
"""获取收到的感谢数量.
:return: 收到的感谢数量
:rtype: int
"""
if self.url is None:
return 0
else:
number = int(self.soup.find(
'span', class_='zm-profile-header-user-thanks').strong.text)
return number
@property
@check_soup('_weibo_url')
def weibo_url(self):
"""获取用户微博链接.
:return: 微博链接地址,如没有则返回 ‘unknown’
:rtype: str
"""
if self.url is None:
return None
else:
tmp = self.soup.find(
'a', class_='zm-profile-header-user-weibo')
return tmp['href'] if tmp is not None else 'unknown'
@property
def business(self):
"""用户的行业.
:return: 用户的行业,如没有则返回 ‘unknown’
:rtype: str
"""
return self._find_user_profile('business')
@property
def location(self):
"""用户的所在地.
:return: 用户的所在地,如没有则返回 ‘unknown’
:rtype: str
"""
return self._find_user_profile('location')
@property
def education(self):
"""用户的教育状况.
:return: 用户的教育状况,如没有则返回 ‘unknown’
:rtype: str
"""
return self._find_user_profile('education')
def _find_user_profile(self, t):
self._make_soup()
if self.url is None:
return 'unknown'
else:
res = self.soup.find(
'span', class_=t)
if res and res.has_attr('title'):
return res['title']
else:
return 'unknown'
@property
@check_soup('_gender')
def gender(self):
"""用户的性别.
:return: 用户的性别(male/female/unknown)
:rtype: str
"""
if self.url is None:
return 'unknown'
else:
return 'female' \
if self.soup.find('i', class_='icon-profile-female') \
else 'male'
@property
@check_soup('_question_num')
def question_num(self):
"""获取提问数量.
:return: 提问数量
:rtype: int
"""
if self.url is None:
return 0
else:
return int(self._nav_list[1].span.text)
@property
@check_soup('_answer_num')
def answer_num(self):
"""获取答案数量.
:return: 答案数量
:rtype: int
"""
if self.url is None:
return 0
else:
return int(self._nav_list[2].span.text)
@property
@check_soup('_post_num')
def post_num(self):
"""获取专栏文章数量.
:return: 专栏文章数量
:rtype: int
"""
if self.url is None:
return 0
else:
return int(self._nav_list[3].span.text)
@property
@check_soup('_collection_num')
def collection_num(self):
"""获取收藏夹数量.
:return: 收藏夹数量
:rtype: int
"""
if self.url is None:
return 0
else:
return int(self._nav_list[4].span.text)
@property
@check_soup('_followed_column_num')
def followed_column_num(self):
"""获取用户关注的专栏数
:return: 关注的专栏数
:rtype: int
"""
if self.url is not None:
tag = self.soup.find('div', class_='zm-profile-side-columns')
if tag is not None:
return int(re_get_number.match(
tag.parent.strong.text).group(1))
return 0
@property
@check_soup('_followed_topic_num')
def followed_topic_num(self):
"""获取用户关注的话题数
:return: 关注的话题数
:rtype: int
"""
if self.url is not None:
tag = self.soup.find('div', class_='zm-profile-side-topics')
if tag is not None:
return int(re_get_number.match(
tag.parent.strong.text).group(1))
return 0
@property
def questions(self):
"""获取用户的所有问题.
:return: 用户的所有问题,返回生成器.
:rtype: Question.Iterable
"""
from .question import Question
if self.url is None or self.question_num == 0:
return
for page_index in range(1, (self.question_num - 1) // 20 + 2):
html = self._session.get(
self.url + 'asks?page=' + str(page_index)).text
soup = BeautifulSoup(html)
question_links = soup.find_all('a', class_='question_link')
question_datas = soup.find_all(
'div', class_='zm-profile-section-main')
for link, data in zip(question_links, question_datas):
url = Zhihu_URL + link['href']
title = link.text.strip()
answer_num = int(
re_get_number.match(data.div.contents[4]).group(1))
follower_num = int(
re_get_number.match(data.div.contents[6]).group(1))
q = Question(url, title, follower_num, answer_num,
session=self._session)
yield q
@property
def answers(self):
"""获取用户的所有答案.
:return: 用户所有答案,返回生成器.
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
if self.url is None or self.answer_num == 0:
return
for page_index in range(1, (self.answer_num - 1) // 20 + 2):
html = self._session.get(
self.url + 'answers?page=' + str(page_index)).text
soup = BeautifulSoup(html)
questions = soup.find_all('a', class_='question_link')
upvotes = soup.find_all('a', class_='zm-item-vote-count')
for q, upvote in zip(questions, upvotes):
answer_url = Zhihu_URL + q['href']
question_url = Zhihu_URL + re_a2q.match(q['href']).group(1)
question_title = q.text
upvote_num = upvote.text
if upvote_num.isdigit():
upvote_num = int(upvote_num)
else:
upvote_num = None
question = Question(question_url, question_title,
session=self._session)
yield Answer(answer_url, question, self, upvote_num,
session=self._session)
@property
def followers(self):
"""获取关注此用户的人.
:return: 关注此用户的人,返回生成器
:rtype: Author.Iterable
"""
for x in self._follow_ee_ers('er'):
yield x
@property
def followees(self):
"""获取用户关注的人.
:return: 用户关注的人的,返回生成器
:rtype: Author.Iterable
"""
for x in self._follow_ee_ers('ee'):
yield x
def followers_skip(self, skip):
"""获取关注此用户的人,跳过前 skip 个用户。
:return: 关注此用户的人,返回生成器
:rtype: Author.Iterable
"""
for x in self._follow_ee_ers('er', skip):
yield x
def followees_skip(self, skip):
"""获取用户关注的人,跳过前 skip 个用户。
:return: 用户关注的人的,返回生成器
:rtype: Author.Iterable
"""
for x in self._follow_ee_ers('ee', skip):
yield x
def _follow_ee_ers(self, t, skip=0):
if self.url is None:
return
if t == 'er':
request_url = Author_Get_More_Followers_URL
else:
request_url = Author_Get_More_Followees_URL
self._make_card()
if self.hash_id is None:
self._make_soup()
headers = dict(Default_Header)
headers['Referer'] = self.url + 'follow' + t + 's'
params = {"order_by": "created", "offset": 0, "hash_id": self.hash_id}
data = {'_xsrf': self.xsrf, 'method': 'next', 'params': ''}
gotten_date_num = 20
offset = skip
while gotten_date_num == 20:
params['offset'] = offset
data['params'] = json.dumps(params)
res = self._session.post(request_url, data=data, headers=headers)
json_data = res.json()
gotten_date_num = len(json_data['msg'])
offset += gotten_date_num
for html in json_data['msg']:
soup = BeautifulSoup(html)
h2 = soup.find('h2')
author_name = h2.a.text
author_url = h2.a['href']
author_motto = soup.find('span', class_='bio').text
author_photo = PROTOCOL + soup.a.img['src'].replace('_m', '_r')
numbers = [
int(re_get_number.match(x.text).group(1))
for x in soup.find_all('a', class_="zg-link-gray-normal")
]
try:
yield Author(author_url, author_name, author_motto,
*numbers,
photo_url=author_photo, session=self._session)
except ValueError: # invalid url
yield ANONYMOUS
@property
def collections(self):
"""获取用户收藏夹.
:return: 用户收藏夹,返回生成器
:rtype: Collection.Iterable
"""
from .collection import Collection
if self.url is None or self.collection_num == 0:
return
else:
collection_num = self.collection_num
for page_index in range(1, (collection_num - 1) // 20 + 2):
html = self._session.get(
self.url + 'collections?page=' + str(page_index)).text
soup = BeautifulSoup(html)
collections_names = soup.find_all(
'a', class_='zm-profile-fav-item-title')
collection_follower_nums = soup.find_all(
'div', class_='zm-profile-fav-bio')
for c, f in zip(collections_names, collection_follower_nums):
c_url = Zhihu_URL + c['href']
c_name = c.text
c_fn = int(re_get_number.match(f.contents[2]).group(1))
yield Collection(c_url, self, c_name, c_fn,
session=self._session)
@property
def columns(self):
"""获取用户专栏.
:return: 用户专栏,返回生成器
:rtype: Column.Iterable
"""
from .column import Column
if self.url is None or self.post_num == 0:
return
soup = BeautifulSoup(self._session.get(self.url + 'posts').text)
column_list = soup.find('div', class_='column-list')
column_tags = column_list.find_all('div', class_='item')
for column_tag in column_tags:
name = column_tag['title']
url = column_tag['data-href']
numbers = column_tag.find('span', class_='des').text.split('•')
follower_num = int(re_get_number.match(numbers[0]).group(1))
if len(numbers) == 1:
post_num = 0
else:
post_num = int(
re_get_number.match(numbers[1]).group(1))
yield Column(url, name, follower_num, post_num,
session=self._session)
@property
def followed_columns(self):
"""获取用户关注的专栏.
:return: 用户关注的专栏,返回生成器
:rtype: Column.Iterable
"""
from .column import Column
if self.url is None:
return
if self.followed_column_num > 0:
tag = self.soup.find('div', class_='zm-profile-side-columns')
if tag is not None:
for a in tag.find_all('a'):
yield Column(a['href'], a.img['alt'],
session=self._session)
if self.followed_column_num > 7:
offset = 7
gotten_data_num = 20
while gotten_data_num == 20:
params = {
'hash_id': self.hash_id,
'limit': 20,
'offset': offset
}
data = {
'method': 'next',
'_xsrf': self.xsrf,
'params': json.dumps(params)
}
j = self._session.post(Author_Get_More_Follow_Column_URL,
data=data).json()
gotten_data_num = len(j['msg'])
offset += gotten_data_num
for msg in map(BeautifulSoup, j['msg']):
name = msg.strong.text
url = msg.a['href']
post_num = int(re_get_number.match(
msg.span.text).group(1))
yield Column(url, name, post_num=post_num,
session=self._session)
@property
def followed_topics(self):
"""获取用户关注的话题.
:return: 用户关注的话题,返回生成器
:rtype: Topic.Iterable
"""
from .topic import Topic
if self.url is None:
return
if self.followed_topic_num > 0:
tag = self.soup.find('div', class_='zm-profile-side-topics')
if tag is not None:
for a in tag.find_all('a'):
yield Topic(Zhihu_URL + a['href'], a.img['alt'],
session=self._session)
if self.followed_topic_num > 7:
offset = 7
gotten_data_num = 20
while gotten_data_num == 20:
data = {'start': 0, 'offset': offset, '_xsrf': self.xsrf}
j = self._session.post(
Author_Get_More_Follow_Topic_URL.format(self.id),
data=data).json()
gotten_data_num = j['msg'][0]
offset += gotten_data_num
topic_item = BeautifulSoup(j['msg'][1]).find_all(
'div', class_='zm-profile-section-item')
for div in topic_item:
name = div.strong.text
url = Zhihu_URL + div.a['href']
yield Topic(url, name, session=self._session)
@property
def activities(self):
"""获取用户的最近动态.
:return: 最近动态,返回生成器,具体说明见 :class:`.Activity`
:rtype: Activity.Iterable
"""
from .activity import Activity
if self.url is None:
return
gotten_feed_num = 20
start = '0'
api_url = self.url + 'activities'
while gotten_feed_num == 20:
data = {'_xsrf': self.xsrf, 'start': start}
res = self._session.post(api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
acts = soup.find_all(
'div', class_='zm-profile-section-item zm-item clearfix')
start = acts[-1]['data-time'] if len(acts) > 0 else 0
for act in acts:
# --- ignore Round Table temporarily ---
if act.attrs['data-type-detail'] == "member_follow_roundtable":
continue
# --- --- --- --- -- --- --- --- --- ---
yield Activity(act, self._session, self)
@property
def last_activity_time(self):
"""获取用户最后一次活动的时间
:return: 用户最后一次活动的时间,返回值为 unix 时间戳
:rtype: int
"""
self._make_soup()
act = self.soup.find(
'div', class_='zm-profile-section-item zm-item clearfix')
return int(act['data-time']) if act is not None else -1
def is_zero_user(self):
"""返回当前用户是否为三零用户,其实是四零: 赞同0,感谢0,提问0,回答0.
:return: 是否是三零用户
:rtype: bool
"""
return self.upvote_num + self.thank_num + \
self.question_num + self.answer_num == 0
class _Anonymous:
def __init__(self):
self.name = "匿名用户"
self.url = ''
ANONYMOUS = _Anonymous()
"""匿名用户常量,通过 ``zhihu.ANONYMOUS`` 访问。
提问者、回答者、点赞者、问题关注者、评论者都可能是 ``ANONYMOUS``
""" | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/author.py | author.py |
from .common import *
from .base import BaseZhihu
class Collection(BaseZhihu):
"""收藏夹,请使用``ZhihuClient.collection``方法构造对象."""
@class_common_init(re_collection_url)
def __init__(self, url, owner=None, name=None, follower_num=None,
session=None):
"""创建收藏夹类实例.
:param str url: 收藏夹主页url,必须
:param Author owner: 收藏夹拥有者,可选
:param str name: 收藏夹标题,可选
:param int follower_num: 收藏夹关注人数,可选
:param Session session: 使用的网络会话,为空则使用新会话。
:return: 收藏夹对象
:rtype: Collection
"""
self.url = url
self._session = session
self.soup = None
self._name = name
self._owner = owner
self._follower_num = follower_num
self._id = int(re.match(r'.*/(\d+)', self.url).group(1))
@property
def id(self):
"""获取收藏夹id(网址最后的部分).
:return: 收藏夹id
:rtype: int
"""
return self._id
@property
@check_soup('_cid')
def cid(self):
"""获取收藏夹内部Id(用不到忽视就好)
:return: 内部Id
:rtype: int
"""
return int(re_get_number.match(
self.soup.find('a', attrs={'name': 'focus'})['id']).group(1))
@property
@check_soup('_xsrf')
def xsrf(self):
"""获取知乎的反xsrf参数(用不到就忽视吧~)
:return: xsrf参数
:rtype: str
"""
return self.soup.find(
'input', attrs={'name': '_xsrf'})['value']
@property
@check_soup('_name')
def name(self):
"""获取收藏夹名字.
:return: 收藏夹名字
:rtype: str
"""
return re_del_empty_line.match(
self.soup.find('h2', id='zh-fav-head-title').text).group(1)
@property
@check_soup('_owner')
def owner(self):
"""获取收藏夹拥有者,返回Author对象.
:return: 收藏夹拥有者
:rtype: Author
"""
from .author import Author
a = self.soup.find('h2', class_='zm-list-content-title').a
name = a.text
url = Zhihu_URL + a['href']
motto = self.soup.find(
'div', id='zh-single-answer-author-info').div.text
photo_url = PROTOCOL + self.soup.find(
'img', class_='zm-list-avatar-medium')['src'].replace('_m', '_r')
return Author(url, name, motto, photo_url=photo_url,
session=self._session)
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取关注此收藏夹的人数.
:return: 关注此收藏夹的人数
:rtype: int
"""
href = re_collection_url_split.match(self.url).group(1)
return int(self.soup.find('a', href=href + 'followers').text)
@property
def followers(self):
"""获取关注此收藏夹的用户
:return: 关注此收藏夹的用户
:rtype: Author.Iterable
"""
self._make_soup()
followers_url = self.url + 'followers'
for x in common_follower(followers_url, self.xsrf, self._session):
yield x
@property
def questions(self):
"""获取收藏夹内所有问题对象.
:return: 收藏夹内所有问题,返回生成器
:rtype: Question.Iterable
"""
self._make_soup()
# noinspection PyTypeChecker
for question in self._page_get_questions(self.soup):
yield question
i = 2
while True:
soup = BeautifulSoup(self._session.get(
self.url[:-1] + '?page=' + str(i)).text)
for question in self._page_get_questions(soup):
if question == 0:
return
yield question
i += 1
@property
def answers(self):
"""获取收藏夹内所有答案对象.
:return: 收藏夹内所有答案,返回生成器
:rtype: Answer.Iterable
"""
self._make_soup()
# noinspection PyTypeChecker
for answer in self._page_get_answers(self.soup):
yield answer
i = 2
while True:
soup = BeautifulSoup(self._session.get(
self.url[:-1] + '?page=' + str(i)).text)
for answer in self._page_get_answers(soup):
if answer == 0:
return
yield answer
i += 1
@property
def logs(self):
"""获取收藏夹日志
:return: 收藏夹日志中的操作,返回生成器
:rtype: CollectActivity.Iterable
"""
import time
from datetime import datetime
from .answer import Answer
from .question import Question
from .acttype import CollectActType
self._make_soup()
gotten_feed_num = 20
offset = 0
data = {
'start': 0,
'_xsrf': self.xsrf
}
api_url = self.url + 'log'
while gotten_feed_num == 20:
data['offset'] = offset
res = self._session.post(url=api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
offset += gotten_feed_num
zm_items = soup.find_all('div', class_='zm-item')
for zm_item in zm_items:
act_time = datetime.strptime(zm_item.find('time').text, "%Y-%m-%d %H:%M:%S")
if zm_item.find('ins'):
link = zm_item.find('ins').a
act_type = CollectActType.INSERT_ANSWER
elif zm_item.find('del'):
link = zm_item.find('del').a
act_type = CollectActType.DELETE_ANSWER
else:
continue
try:
answer_url = Zhihu_URL + link['href']
question_url = re_a2q.match(answer_url).group(1)
question = Question(question_url, link.text)
answer = Answer(
answer_url, question, session=self._session)
yield CollectActivity(
act_type, act_time, self.owner, self, answer)
except AttributeError:
act_type = CollectActType.CREATE_COLLECTION
yield CollectActivity(
act_type, act_time, self.owner, self)
data['start'] = zm_items[-1]['id'][8:]
time.sleep(0.5)
def _page_get_questions(self, soup):
from .question import Question
question_tags = soup.find_all("div", class_="zm-item")
if len(question_tags) == 0:
yield 0
return
else:
for question_tag in question_tags:
if question_tag.h2 is not None:
question_title = question_tag.h2.a.text
question_url = Zhihu_URL + question_tag.h2.a['href']
yield Question(question_url, question_title,
session=self._session)
def _page_get_answers(self, soup):
from .question import Question
from .author import Author, ANONYMOUS
from .answer import Answer
answer_tags = soup.find_all("div", class_="zm-item")
if len(answer_tags) == 0:
yield 0
return
else:
question = None
for tag in answer_tags:
# 判断是否是'建议修改的回答'等情况
url_tag = tag.find('a', class_='answer-date-link')
if url_tag is None:
reason = tag.find('div', id='answer-status').p.text
print("pass a answer, reason %s ." % reason)
continue
if tag.h2 is not None:
question_title = tag.h2.a.text
question_url = Zhihu_URL + tag.h2.a['href']
question = Question(question_url, question_title,
session=self._session)
answer_url = Zhihu_URL + url_tag['href']
div = tag.find('div', class_='zm-item-answer-author-info')
author_link = div.find('a', class_='author-link')
if author_link is not None:
author_url = Zhihu_URL + author_link['href']
author_name = author_link.text
motto_span = div.find('span', class_='bio')
author_motto = motto_span['title'] if motto_span else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
else:
author = ANONYMOUS
upvote_num = tag.find('a', class_='zm-item-vote-count').text
if upvote_num.isdigit():
upvote_num = int(upvote_num)
else:
upvote_num = None
answer = Answer(answer_url, question, author,
upvote_num, session=self._session)
yield answer
class CollectActivity:
"""收藏夹操作, 请使用``Collection.logs``构造对象."""
def __init__(self, type, time, owner, collection, answer=None):
"""创建收藏夹操作类实例
:param acttype.CollectActType type: 操作类型
:param datetime.datetime time: 进行操作的时间
:param Author owner: 收藏夹的拥有者
:param Collection collection: 所属收藏夹
:param Answer answer: 收藏的答案,可选
:return: CollectActivity
"""
self._type = type
self._time = time
self._owner = owner
self._collection = collection
self._answer = answer
@property
def type(self):
"""
:return: 收藏夹操作类型, 具体参见 :class:`.CollectActType`
:rtype: :class:`.CollectActType`
"""
return self._type
@property
def answer(self):
"""
:return: 添加或删除收藏的答案, 若是创建收藏夹操作返回 None
:rtype: Answer or None
"""
return self._answer
@property
def time(self):
"""
:return: 进行操作的时间
:rtype: datetime.datetime
"""
return self._time
@property
def owner(self):
"""
:return: 收藏夹的拥有者
:rtype: Author
"""
return self._owner
@property
def collection(self):
"""
:return: 所属收藏夹
:rtype: Collection
"""
return self._collection | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/collection.py | collection.py |
from .common import *
from .base import BaseZhihu, JsonAsSoupMixin
class Column(JsonAsSoupMixin, BaseZhihu):
"""专栏类,请使用``ZhihuClient.column``方法构造对象."""
@class_common_init(re_column_url)
def __init__(self, url, name=None, follower_num=None,
post_num=None, session=None):
"""创建专栏类实例.
:param str url: 专栏url
:param str name: 专栏名,可选
:param int follower_num: 关注者数量,可选
:param int post_num: 文章数量,可选
:param Session session: 使用的网络会话,为空则使用新会话。
:return: 专栏对象
:rtype: Column
"""
self._in_name = re_column_url.match(url).group(1)
self.url = url
self._session = session
self._name = name
self._follower_num = follower_num
self._post_num = post_num
def _make_soup(self):
if self.soup is None:
json = self._get_content()
self._gen_soup(json)
def _get_content(self):
origin_host = self._session.headers.get('Host')
self._session.headers.update(Host='zhuanlan.zhihu.com')
res = self._session.get(Column_Data.format(self._in_name))
self._session.headers.update(Host=origin_host)
return res.json()
@property
@check_soup('_name')
def name(self):
"""获取专栏名称.
:return: 专栏名称
:rtype: str
"""
return self.soup['name']
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取关注人数.
:return: 关注人数
:rtype: int
"""
return int(self.soup['followersCount'])
@property
@check_soup('_post_num')
def post_num(self):
"""获取专栏文章数.
:return: 专栏文章数
:rtype: int
"""
return int(self.soup['postsCount'])
@property
def posts(self):
"""获取专栏的所有文章.
:return: 专栏所有文章,返回生成器
:rtype: Post.Iterable
"""
origin_host = self._session.headers.get('Host')
for offset in range(0, (self.post_num - 1) // 10 + 1):
self._session.headers.update(Host='zhuanlan.zhihu.com')
res = self._session.get(
Column_Posts_Data.format(self._in_name, offset * 10))
soup = res.json()
self._session.headers.update(Host=origin_host)
for post in soup:
yield self._parse_post_data(post)
def _parse_post_data(self, post):
from .author import Author
from .post import Post
url = Column_Url + post['url']
template = post['author']['avatar']['template']
photo_id = post['author']['avatar']['id']
photo_url = template.format(id=photo_id, size='r')
author = Author(post['author']['profileUrl'],
post['author']['name'], post['author']['bio'],
photo_url=photo_url, session=self._session)
title = post['title']
upvote_num = post['likesCount']
comment_num = post['commentsCount']
print(url)
return Post(url, self, author, title, upvote_num, comment_num,
session=self._session) | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/column.py | column.py |
from datetime import datetime
from .acttype import ActType
from .answer import Answer
from .author import Author, ANONYMOUS
from .collection import Collection
from .column import Column
from .common import *
from .post import Post
from .question import Question
from .topic import Topic
class Activity:
"""用户动态类,请使用Author.activities获取."""
def __init__(self, act, session, author):
"""创建用户动态类实例.
:param bs4.element.Tag act: 表示用户动态的页面元素
:param Session session: 使用的网络会话
:param Author author: Activity 所属的用户对象
:return: 用户动态对象
:rtype: Activity
:说明:
根据Activity.type不同可以获取不同属性,具体请看 :class:`.ActType`
"""
self._session = session
self._author = author
self._type = ActType.from_str(act.attrs['data-type-detail'])
useless_tag = act.div.find('a', class_='zg-link')
if useless_tag is not None:
useless_tag.extract()
attribute = self._get_assemble_method(self.type)(act)
self._attr = attribute.__class__.__name__.lower()
setattr(self, self._attr, attribute)
self._time = datetime.fromtimestamp(int(act['data-time']))
@property
def type(self):
"""
:return: 用户动态类型, 具体参见 :class:`.ActType`
:rtype: class:`.ActType`
"""
return self._type
@property
def content(self):
"""获取此对象中能提供的那个属性,对应表请查看 :class:`.ActType` 类.
:return: 对象提供的对象
:rtype: Author or Question or Answer or Topic or Column or Post
"""
return getattr(self, self._attr)
@property
def time(self):
"""
:return: 返回用户执行 Activity 操作的时间
:rtype: datetime.datetime
"""
return self._time
def __find_post(self, act):
try:
column_url = act.find('a', class_='column_link')['href']
column_name = act.find('a', class_='column_link').text
column = Column(column_url, column_name, session=self._session)
except TypeError:
column = None
try:
author_tag = act.find('div', class_='author-info')
author_url = Zhihu_URL + author_tag.a['href']
author_name = author_tag.a.text
author_motto = author_tag.span.text if author_tag.span else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
except TypeError:
author = ANONYMOUS
post_url = act.find('a', class_='post-link')['href']
post_title = act.find('a', class_='post-link').text
post_comment_num, post_upvote_num = self._parse_un_cn(act)
return Post(post_url, column, author, post_title,
post_upvote_num, post_comment_num,
session=self._session)
def _assemble_create_post(self, act):
return self.__find_post(act)
def _assemble_voteup_post(self, act):
return self.__find_post(act)
def _assemble_follow_column(self, act):
return Column(act.div.a['href'], act.div.a.text, session=self._session)
def _assemble_follow_topic(self, act):
topic_url = Zhihu_URL + act.div.a['href']
topic_name = act.div.a['title']
return Topic(topic_url, topic_name, session=self._session)
def _assemble_answer_question(self, act):
question_url = Zhihu_URL + re_a2q.match(
act.div.find_all('a')[-1]['href']).group(1)
question_title = act.div.find_all('a')[-1].text.strip()
question = Question(question_url, question_title, session=self._session)
answer_url = Zhihu_URL + act.div.find_all('a')[-1]['href']
answer_comment_num, answer_upvote_num = self._parse_un_cn(act)
return Answer(answer_url, question, self._author, answer_upvote_num,
session=self._session)
def _assemble_voteup_answer(self, act):
question_url = Zhihu_URL + re_a2q.match(act.div.a['href']).group(1)
question_title = act.div.a.text.strip()
question = Question(question_url, question_title, session=self._session)
try_find_author = act.find_all('a', class_='author-link',
href=re.compile('^/people/[^/]*$'))
if len(try_find_author) == 0:
author_url = None
author_name = '匿名用户'
author_motto = ''
else:
try_find_author = try_find_author[-1]
author_url = Zhihu_URL + try_find_author['href']
author_name = try_find_author.text
try_find_motto = act.find('span', class_='bio')
if try_find_motto is None:
author_motto = ''
else:
author_motto = try_find_motto['title']
author = Author(author_url, author_name, author_motto,
session=self._session)
answer_url = Zhihu_URL + act.div.a['href']
answer_comment_num, answer_upvote_num = self._parse_un_cn(act)
return Answer(answer_url, question, author, answer_upvote_num,
session=self._session)
def _assemble_ask_question(self, act):
a = act.find("a", class_="question_link")
url = Zhihu_URL + a['href']
title = a.text.strip(' \n')
return Question(url, title, session=self._session)
def _assemble_follow_question(self, act):
return Question(Zhihu_URL + act.div.a['href'], act.div.a.text.strip(),
session=self._session)
def _assemble_follow_collection(self, act):
url = act.div.a['href']
if not url.startswith('http'):
url = Zhihu_URL + url
return Collection(url, session=self._session)
def _get_assemble_method(self, act_type):
assemble_methods = {
ActType.UPVOTE_POST: self._assemble_voteup_post,
ActType.FOLLOW_COLUMN: self._assemble_follow_column,
ActType.UPVOTE_ANSWER: self._assemble_voteup_answer,
ActType.ANSWER_QUESTION: self._assemble_answer_question,
ActType.ASK_QUESTION: self._assemble_ask_question,
ActType.FOLLOW_QUESTION: self._assemble_follow_question,
ActType.FOLLOW_TOPIC: self._assemble_follow_topic,
ActType.PUBLISH_POST: self._assemble_create_post,
ActType.FOLLOW_COLLECTION: self._assemble_follow_collection
}
if act_type in assemble_methods:
return assemble_methods[act_type]
else:
raise ValueError('invalid activity type')
@staticmethod
def _parse_un_cn(act):
upvote_num = act.find('a', class_='zm-item-vote-count').text
if upvote_num.isdigit():
upvote_num = int(upvote_num)
else:
upvote_num = None
comment = act.find('a', class_='toggle-comment')
comment_text = next(comment.stripped_strings)
comment_num_match = re_get_number.match(comment_text)
comment_num = int(
comment_num_match.group(1)) if comment_num_match is not None else 0
return comment_num, upvote_num | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/activity.py | activity.py |
import time
from datetime import datetime
from .common import *
from .base import BaseZhihu
class Topic(BaseZhihu):
"""答案类,请使用``ZhihuClient.topic``方法构造对象."""
@class_common_init(re_topic_url)
def __init__(self, url, name=None, session=None):
"""创建话题类实例.
:param url: 话题url
:param name: 话题名称,可选
:return: Topic
"""
self.url = url
self._session = session
self._name = name
self._id = int(re_topic_url.match(self.url).group(1))
@property
def id(self):
"""获取话题Id(网址最后那串数字)
:return: 话题Id
:rtype: int
"""
return self._id
@property
@check_soup('_xsrf')
def xsrf(self):
"""获取知乎的反xsrf参数(用不到就忽视吧~)
:return: xsrf参数
:rtype: str
"""
return self.soup.find('input', attrs={'name': '_xsrf'})['value']
@property
@check_soup('_tid')
def tid(self):
"""话题内部Id,有时候要用到
:return: 话题内部Id
:rtype: int
"""
return int(self.soup.find(
'div', id='zh-topic-desc')['data-resourceid'])
@property
@check_soup('_name')
def name(self):
"""获取话题名称.
:return: 话题名称
:rtype: str
"""
return self.soup.find('h1').text
@property
def parents(self):
"""获取此话题的父话题。
注意:由于没找到有很多父话题的话题来测试,
所以本方法可能再某些时候出现问题,请不吝反馈。
:return: 此话题的父话题,返回生成器
:rtype: Topic.Iterable
"""
self._make_soup()
parent_topic_tag = self.soup.find('div', class_='parent-topic')
if parent_topic_tag is None:
yield []
else:
for topic_tag in parent_topic_tag.find_all('a'):
yield Topic(Zhihu_URL + topic_tag['href'],
topic_tag.text.strip(),
session=self._session)
@property
def children(self):
"""获取此话题的子话题
:return: 此话题的子话题, 返回生成器
:rtype: Topic.Iterable
"""
self._make_soup()
child_topic_tag = self.soup.find('div', class_='child-topic')
if child_topic_tag is None:
return []
elif '共有' not in child_topic_tag.contents[-2].text:
for topic_tag in child_topic_tag.div.find_all('a'):
yield Topic(Zhihu_URL + topic_tag['href'],
topic_tag.text.strip(),
session=self._session)
else:
flag = 'load'
child = ''
data = {'_xsrf': self.xsrf}
params = {
'parent': self.id
}
while flag == 'load':
params['child'] = child
res = self._session.post(Topic_Get_Children_Url,
params=params, data=data)
j = map(lambda x: x[0], res.json()['msg'][1])
*topics, last = j
for topic in topics:
yield Topic(Zhihu_URL + '/topic/' + topic[2], topic[1],
session=self._session)
flag = last[0]
child = last[2]
if flag == 'topic':
yield Topic(Zhihu_URL + '/topic/' + last[2], last[1],
session=self._session)
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取话题关注人数.
:return: 关注人数
:rtype: int
"""
follower_num_block = self.soup.find(
'div', class_='zm-topic-side-followers-info')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text)
@property
def followers(self):
"""获取话题关注者
:return: 话题关注者,返回生成器
:rtype: Author.Iterable
"""
from .author import Author, ANONYMOUS
self._make_soup()
gotten_data_num = 20
data = {
'_xsrf': self.xsrf,
'start': '',
'offset': 0
}
while gotten_data_num == 20:
res = self._session.post(
Topic_Get_More_Follower_Url.format(self.id), data=data)
j = res.json()['msg']
gotten_data_num = j[0]
data['offset'] += gotten_data_num
soup = BeautifulSoup(j[1])
divs = soup.find_all('div', class_='zm-person-item')
for div in divs:
h2 = div.h2
url = Zhihu_URL + h2.a['href']
name = h2.a.text
motto = h2.parent.div.text.strip()
try:
yield Author(url, name, motto, session=self._session)
except ValueError: # invalid url
yield ANONYMOUS
data['start'] = int(re_get_number.match(divs[-1]['id']).group(1))
@property
@check_soup('_photo_url')
def photo_url(self):
"""获取话题头像图片地址.
:return: 话题头像url
:rtype: str
"""
img = self.soup.find('a', id='zh-avartar-edit-form').img['src']
return img.replace('_m', '_r')
@property
@check_soup('_description')
def description(self):
"""获取话题描述信息.
:return: 话题描述信息
:rtype: str
"""
desc = self.soup.find('div', class_='zm-editable-content').text
return desc
@property
def top_authors(self):
"""获取最佳回答者
:return: 此话题下最佳回答者,一般来说是5个,要不就没有,返回生成器
:rtype: Author.Iterable
"""
from .author import Author, ANONYMOUS
self._make_soup()
t = self.soup.find('div', id='zh-topic-top-answerer')
if t is None:
return
for d in t.find_all('div', class_='zm-topic-side-person-item-content'):
url = Zhihu_URL + d.a['href']
name = d.a.text
motto = d.find('span', class_='bio')['title']
try:
yield Author(url, name, motto, session=self._session)
except ValueError: # invalid url
yield ANONYMOUS
@property
def top_answers(self):
"""获取话题下的精华答案.
:return: 话题下的精华答案,返回生成器.
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author, ANONYMOUS
top_answers_url = Topic_Top_Answers_Url.format(self.id)
params = {'page': 1}
while True:
# 超出50页直接返回
if params['page'] > 50:
return
res = self._session.get(top_answers_url, params=params)
params['page'] += 1
soup = BeautifulSoup(res.content)
# 不够50页,来到错误页面 返回
if soup.find('div', class_='error') is not None:
return
questions = soup.find_all('a', class_='question_link')
answers = soup.find_all('a', class_='answer-date-link')
authors = soup.find_all('div', class_='zm-item-answer-author-info')
upvotes = soup.find_all('a', class_='zm-item-vote-count')
for ans, up, q, au in zip(answers, upvotes, questions, authors):
answer_url = Zhihu_URL + ans['href']
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
upvote = up.text
if upvote.isdigit():
upvote = int(upvote)
else:
upvote = None
question = Question(question_url, question_title,
session=self._session)
if au.a is None:
author = ANONYMOUS
else:
author_url = Zhihu_URL + au.a['href']
author_name = au.a.text
author_motto = au.strong['title'] if au.strong else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
yield Answer(answer_url, question, author, upvote,
session=self._session)
@property
def questions(self):
"""获取话题下的所有问题(按时间降序排列)
:return: 话题下所有问题,返回生成器
:rtype: Question.Iterable
"""
from .question import Question
question_url = Topic_Questions_Url.format(self.id)
params = {'page': 1}
older_time_stamp = int(time.time()) * 1000
while True:
res = self._session.get(question_url, params=params)
soup = BeautifulSoup(res.content)
if soup.find('div', class_='error') is not None:
return
questions = soup.find_all('div', class_='question-item')
questions = list(filter(
lambda x: int(x.h2.span['data-timestamp']) < older_time_stamp,
questions))
for qu_div in questions:
url = Zhihu_URL + qu_div.h2.a['href']
title = qu_div.h2.a.text.strip()
creation_time = datetime.fromtimestamp(
int(qu_div.h2.span['data-timestamp']) // 1000)
yield Question(url, title, creation_time=creation_time,
session=self._session)
older_time_stamp = int(questions[-1].h2.span['data-timestamp'])
params['page'] += 1
@property
def unanswered_questions(self):
"""获取话题下的等待回答的问题
什么是「等待回答」的问题:https://www.zhihu.com/question/40470324
:return: 话题下等待回答的问题,返回生成器
:rtype: Question.Iterable
"""
from .question import Question
question_url = Topic_Unanswered_Question_Url.format(self.id)
params = {'page': 1}
while True:
res = self._session.get(question_url, params=params)
soup = BeautifulSoup(res.content)
if soup.find('div', class_='error') is not None:
return
questions = soup.find_all('div', class_='question-item')
for qu_div in questions:
url = Zhihu_URL + qu_div.h2.a['href']
title = qu_div.h2.a.text.strip()
yield Question(url, title, session=self._session)
params['page'] += 1
@property
def answers(self):
"""获取话题下所有答案(按时间降序排列)
:return: 话题下所有答案,返回生成器
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author, ANONYMOUS
newest_url = Topic_Newest_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(newest_url)
soup = BeautifulSoup(res.content)
while True:
divs = soup.find_all('div', class_='folding')
# 如果话题下无答案,则直接返回
if len(divs) == 0:
return
last_score = divs[-1]['data-score']
for div in divs:
q = div.find('a', class_="question_link")
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
question = Question(question_url, question_title,
session=self._session)
ans = div.find('a', class_='answer-date-link')
answer_url = Zhihu_URL + ans['href']
upvote = div.find('a', class_='zm-item-vote-count').text
if upvote.isdigit():
upvote = int(upvote)
else:
upvote = None
au = div.find('div', class_='zm-item-answer-author-info')
if au.a is None:
author = ANONYMOUS
else:
author_url = Zhihu_URL + au.a['href']
author_name = au.a.text
author_motto = au.strong['title'] if au.strong else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
yield Answer(answer_url, question, author, upvote,
session=self._session)
params['offset'] = last_score
res = self._session.post(newest_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到内容数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1])
@property
def hot_questions(self):
"""获取话题下热门的问题
:return: 话题下的热门动态中的问题,按热门度顺序返回生成器
:rtype: Question.Iterable
"""
from .question import Question
hot_questions_url = Topic_Hot_Questions_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(hot_questions_url)
soup = BeautifulSoup(res.content)
while True:
questions_duplicate = soup.find_all('a', class_='question_link')
# 如果话题下无问题,则直接返回
if len(questions_duplicate) == 0:
return
# 去除重复的问题
questions = list(set(questions_duplicate))
questions.sort(key=self._get_score, reverse=True)
last_score = soup.find_all(
'div', class_='feed-item')[-1]['data-score']
for q in questions:
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
question = Question(question_url, question_title,
session=self._session)
yield question
params['offset'] = last_score
res = self._session.post(hot_questions_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到问题数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1])
@property
def hot_answers(self):
"""获取话题下热门的回答
:return: 话题下的热门动态中的回答,按热门度顺序返回生成器
:rtype: Question.Iterable
"""
from .question import Question
from .author import Author
from .answer import Answer
hot_questions_url = Topic_Hot_Questions_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(hot_questions_url)
soup = BeautifulSoup(res.content)
while True:
answers_div = soup.find_all('div', class_='feed-item')
last_score = answers_div[-1]['data-score']
for div in answers_div:
# 没有 text area 的情况是:答案被和谐。
if not div.textarea:
continue
question_url = Zhihu_URL + div.h2.a['href']
question_title = div.h2.a.text.strip()
question = Question(question_url, question_title,
session=self._session)
author_link = div.find('a', class_='author-link')
if not author_link:
author_url = None
author_name = '匿名用户'
author_motto = ''
else:
author_url = Zhihu_URL + author_link['href']
author_name = author_link.text
author_motto_span = div.find('span', class_='bio')
author_motto = author_motto_span['title'] \
if author_motto_span else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
body = div.find('div', class_='zm-item-rich-text')
answer_url = Zhihu_URL + body['data-entry-url']
upvote_num = int(div.find(
'div', class_='zm-item-vote-info')['data-votecount'])
yield Answer(answer_url, question, author, upvote_num,
session=self._session)
params['offset'] = last_score
res = self._session.post(hot_questions_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到问题数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1])
@staticmethod
def _get_score(tag):
h2 = tag.parent
div = h2.parent
try:
_ = h2['class']
return div['data-score']
except KeyError:
return div.parent.parent['data-score'] | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/topic.py | topic.py |
import getpass
import importlib
import json
import time
from urllib.parse import urlencode
import requests
from .common import *
class ZhihuClient:
"""知乎客户端类,内部维护了自己专用的网络会话,可用cookies或账号密码登录."""
def __init__(self, cookies=None):
"""创建客户端类实例.
:param str cookies: 见 :meth:`.login_with_cookies` 中 ``cookies`` 参数
:return: 知乎客户端对象
:rtype: ZhihuClient
"""
self._session = requests.Session()
self._session.headers.update(Default_Header)
self.proxies = None
if cookies is not None:
assert isinstance(cookies, str)
self.login_with_cookies(cookies)
# ===== login staff =====
@staticmethod
def _get_captcha_url():
params = {
'r': str(int(time.time() * 1000)),
'type': 'login',
}
return Captcha_URL + '?' + urlencode(params)
def get_captcha(self):
"""获取验证码数据。
:return: 验证码图片数据。
:rtype: bytes
"""
self._session.get(Zhihu_URL)
r = self._session.get(self._get_captcha_url())
return r.content
def login(self, email, password, captcha=None):
"""登陆知乎.
:param str email: 邮箱
:param str password: 密码
:param str captcha: 验证码, 默认为None,表示不提交验证码
:return:
======== ======== ============== ====================
元素序号 元素类型 意义 说明
======== ======== ============== ====================
0 int 是否成功 0为成功,1为失败
1 str 失败原因 登录成功则为空字符串
2 str cookies字符串 登录失败则为空字符串
======== ======== ============== ====================
:rtype: (int, str, str)
"""
data = {'email': email, 'password': password,
'remember_me': 'true'}
if captcha is not None:
data['captcha'] = captcha
r = self._session.post(Login_URL, data=data)
j = r.json()
code = int(j['r'])
message = j['msg']
cookies_str = json.dumps(self._session.cookies.get_dict()) \
if code == 0 else ''
return code, message, cookies_str
def login_with_cookies(self, cookies):
"""使用cookies文件或字符串登录知乎
:param str cookies:
============== ===========================
参数形式 作用
============== ===========================
文件名 将文件内容作为cookies字符串
cookies 字符串 直接提供cookies字符串
============== ===========================
:return: 无
:rtype: None
"""
if os.path.isfile(cookies):
with open(cookies) as f:
cookies = f.read()
cookies_dict = json.loads(cookies)
self._session.cookies.update(cookies_dict)
def login_in_terminal(self, need_captcha=False, use_getpass=True):
"""不使用cookies,在终端中根据提示登陆知乎
:param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True
:param bool use_getpass: 是否使用安全模式输入密码,默认为 True,
如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试
:return: 如果成功返回cookies字符串
:rtype: str
"""
print('====== zhihu login =====')
email = input('email: ')
if use_getpass:
password = getpass.getpass('password: ')
else:
password = input("password: ")
if need_captcha:
captcha_data = self.get_captcha()
with open('captcha.gif', 'wb') as f:
f.write(captcha_data)
print('please check captcha.gif for captcha')
captcha = input('captcha: ')
os.remove('captcha.gif')
else:
captcha = None
print('====== logging.... =====')
code, msg, cookies = self.login(email, password, captcha)
if code == 0:
print('login successfully')
else:
print('login failed, reason: {0}'.format(msg))
return cookies
def create_cookies(self, file, need_captcha=False, use_getpass=True):
"""在终端中执行登录流程,将 cookies 存放在文件中以便后续使用
:param str file: 文件名
:param bool need_captcha: 登录过程中是否使用验证码, 默认为 False
:param bool use_getpass: 是否使用安全模式输入密码,默认为 True,
如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试
:return:
"""
cookies_str = self.login_in_terminal(need_captcha, use_getpass)
if cookies_str:
with open(file, 'w') as f:
f.write(cookies_str)
print('cookies file created.')
else:
print('can\'t create cookies.')
# ===== network staff =====
def set_proxy(self, proxy):
"""设置代理
:param str proxy: 使用 "http://example.com:port" 的形式
:return: 无
:rtype: None
:说明:
由于一个 :class:`.ZhihuClient` 对象和它创建出来的其他知乎对象共用
一个Session,所以调用这个方法也会将所有生成出的知乎类设置上代理。
"""
self._session.proxies.update({'http': proxy})
def set_proxy_pool(self, proxies, auth=None, https=True):
"""设置代理池
:param proxies: proxy列表, 形如 ``["ip1:port1", "ip2:port2"]``
:param auth: 如果代理需要验证身份, 通过这个参数提供, 比如
:param https: 默认为 True, 传入 False 则不设置 https 代理
.. code-block:: python
from requests.auth import HTTPProxyAuth
auth = HTTPProxyAuth('laike9m', '123')
:说明:
每次 GET/POST 请求会随机选择列表中的代理
"""
from random import choice
if https:
self.proxies = [{'http': p, 'https': p} for p in proxies]
else:
self.proxies = [{'http': p} for p in proxies]
def get_with_random_proxy(url, **kwargs):
proxy = choice(self.proxies)
kwargs['proxies'] = proxy
if auth:
kwargs['auth'] = auth
return self._session.original_get(url, **kwargs)
def post_with_random_proxy(url, *args, **kwargs):
proxy = choice(self.proxies)
kwargs['proxies'] = proxy
if auth:
kwargs['auth'] = auth
return self._session.original_post(url, *args, **kwargs)
self._session.original_get = self._session.get
self._session.get = get_with_random_proxy
self._session.original_post = self._session.post
self._session.post = post_with_random_proxy
def remove_proxy_pool(self):
"""
移除代理池
"""
self.proxies = None
self._session.get = self._session.original_get
self._session.post = self._session.original_post
del self._session.original_get
del self._session.original_post
# ===== getter staff ======
def me(self):
"""获取使用特定 cookies 的 Me 实例
:return: cookies对应的Me对象
:rtype: Me
"""
from .me import Me
headers = dict(Default_Header)
headers['Host'] = 'zhuanlan.zhihu.com'
res = self._session.get(Get_Me_Info_Url, headers=headers)
json_data = res.json()
url = json_data['profileUrl']
name = json_data['name']
motto = json_data['bio']
photo = json_data['avatar']['template'].format(
id=json_data['avatar']['id'], size='r')
return Me(url, name, motto, photo, session=self._session)
def __getattr__(self, item: str):
"""本函数用于获取各种类,如 `Answer` `Question` 等.
:支持的形式有:
1. client.answer()
2. client.author()
3. client.collection()
4. client.column()
5. client.post()
6. client.question()
7. client.topic()
参数均为对应页面的url,返回对应的类的实例。
"""
def getter(url):
return getattr(module, item.capitalize())(url,
session=self._session)
attr_list = ['answer', 'author', 'collection',
'column', 'post', 'question', 'topic']
if item.lower() in attr_list:
module = importlib.import_module('.'+item.lower(), 'zhihu')
return getter | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/client.py | client.py |
import enum
match = {
'ANSWER_QUESTION': 'member_answer_question',
'UPVOTE_ANSWER': 'member_voteup_answer',
'ASK_QUESTION': 'member_ask_question',
'FOLLOW_QUESTION': 'member_follow_question',
'UPVOTE_POST': 'member_voteup_article',
'FOLLOW_COLUMN': 'member_follow_column',
'FOLLOW_TOPIC': 'member_follow_topic',
'PUBLISH_POST': 'member_create_article',
'FOLLOW_COLLECTION': 'member_follow_favlist'
}
reverse_match = {v: k for k, v in match.items()}
class ActType(enum.Enum):
"""用于表示用户动态的类型.
:常量说明:
================= ================ ============ =====================
常量名 说明 提供属性 属性类型
================= ================ ============ =====================
ANSWER_QUESTION 回答了一个问题 answer :class:`.Answer`
UPVOTE_ANSWER 赞同了一个回答 answer :class:`.Answer`
ASK_QUESTION 提出了一个问题 question :class:`.Question`
FOLLOW_QUESTION 关注了一个问题 question :class:`.Question`
UPVOTE_POST 赞同了一篇文章 post :class:`.Post`
FOLLOW_COLUMN 关注了一个专栏 column :class:`.Column`
FOLLOW_TOPIC 关注了一个话题 topic :class:`.Topic`
PUBLISH_POST 发表了一篇文章 post :class:`.Post`
FOLLOW_COLLECTION 关注了一个收藏夹 collection :class:`.Collection`
================= ================ ============ =====================
"""
ANSWER_QUESTION = 1
UPVOTE_ANSWER = 2
ASK_QUESTION = 4
FOLLOW_QUESTION = 8
UPVOTE_POST = 16
FOLLOW_COLUMN = 32
FOLLOW_TOPIC = 64
PUBLISH_POST = 128
FOLLOW_COLLECTION = 256
@classmethod
def from_str(cls, div_class):
return cls.__getattr__(reverse_match[div_class])
def __str__(self):
return match[self.name]
class CollectActType(enum.Enum):
"""用于表示收藏夹操作的类型.
:常量说明:
================= ==============
常量名 说明
================= ==============
INSERT_ANSWER 在收藏夹中增加一个回答
DELETE_ANSWER 在收藏夹中删除一个回答
CREATE_COLLECTION 创建收藏夹
================= ==============
"""
INSERT_ANSWER = 1
DELETE_ANSWER = 2
CREATE_COLLECTION = 3 | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/acttype.py | acttype.py |
from .common import *
from .base import BaseZhihu, JsonAsSoupMixin
class Post(JsonAsSoupMixin, BaseZhihu):
"""专栏文章类,请使用``ZhihuClient.post``方法构造对象."""
@class_common_init(re_post_url)
def __init__(self, url, column=None, author=None, title=None,
upvote_num=None, comment_num=None, session=None):
"""创建专栏文章类实例.
:param str url: 文章url
:param Column column: 文章所属专栏,可选
:param Author author: 文章作者,可选
:param str title: 文章标题,可选
:param int upvote_num: 文章赞同数,可选
:param int comment_num: 文章评论数,可选
:param Session session: 使用的网络会话,为空则使用新会话
:return: 专栏文章对象
:rtype: Post
"""
match = re_post_url.match(url)
self.url = url
self._session = session
self._column = column
self._author = author
self._title = title
self._upvote_num = upvote_num
self._comment_num = comment_num
self._slug = int(match.group(1)) # 文章编号
def _make_soup(self):
if self.soup is None:
json = self._get_content()
self._gen_soup(json)
def _get_content(self):
origin_host = self._session.headers.get('Host')
self._session.headers.update(Host='zhuanlan.zhihu.com')
json = self._session.get(Column_Post_Data.format(self.slug)).json()
self._session.headers.update(Host=origin_host)
return json
@property
def column_in_name(self):
"""获取文章所在专栏的内部名称(用不到就忽视吧~)
:return: 专栏的内部名称
:rtype: str
"""
self._make_soup()
if 'column' in self.soup:
return self.soup['column']['slug']
else:
return None
@property
def slug(self):
"""获取文章的编号(用不到就忽视吧~)
:return: 文章编号
:rtype: int
"""
return self._slug
@property
@check_soup('_column')
def column(self):
"""获取文章所在专栏.
:return: 文章所在专栏
:rtype: Column
"""
from .column import Column
if 'column' in self.soup:
url = Column_Url + '/' + self.soup['column']['slug']
name = self.soup['column']['name']
return Column(url, name, session=self._session)
else:
return None
@property
@check_soup('_author')
def author(self):
"""获取文章作者.
:return: 文章作者
:rtype: Author
"""
from .author import Author
url = self.soup['author']['profileUrl']
name = self.soup['author']['name']
motto = self.soup['author']['bio']
template = self.soup['author']['avatar']['template']
photo_id = self.soup['author']['avatar']['id']
photo_url = template.format(id=photo_id, size='r')
return Author(url, name, motto, photo_url=photo_url,
session=self._session)
@property
@check_soup('_title')
def title(self):
"""获取文章标题.
:return: 文章标题
:rtype: str
"""
return self.soup['title']
@property
@check_soup('_upvote_num')
def upvote_num(self):
"""获取文章赞同数量.
:return: 文章赞同数
:rtype: int
"""
return int(self.soup['likesCount'])
@property
@check_soup('_comment_num')
def comment_num(self):
"""获取评论数量.
:return: 评论数量
:rtype: int
"""
return self.soup['commentsCount']
def save(self, filepath=None, filename=None, mode="md"):
"""保存答案为 Html 文档或 markdown 文档.
:param str filepath: 要保存的文件所在的目录,
不填为当前目录下以专栏标题命名的目录, 设为"."则为当前目录。
:param str filename: 要保存的文件名,
不填则默认为 所在文章标题 - 作者名.html/md。
如果文件已存在,自动在后面加上数字区分。
**自定义文件名时请不要输入后缀 .html 或 .md。**
:param str mode: 保存类型,可选 `html` 、 `markdown` 、 `md` 。
:return: 无
:rtype: None
"""
if mode not in ["html", "md", "markdown"]:
raise ValueError("`mode` must be 'html', 'markdown' or 'md',"
" got {0}".format(mode))
self._make_soup()
file = get_path(filepath, filename, mode, self.column.name,
self.title + '-' + self.author.name)
with open(file, 'wb') as f:
if mode == "html":
f.write(self.soup['content'].encode('utf-8'))
else:
import html2text
h2t = html2text.HTML2Text()
h2t.body_width = 0
f.write(h2t.handle(self.soup['content']).encode('utf-8'))
@property
def upvoters(self):
"""获取文章的点赞用户
:return: 文章的点赞用户,返回生成器。
"""
from .author import Author, ANONYMOUS
self._make_soup()
headers = dict(Default_Header)
headers['Host'] = 'zhuanlan.zhihu.com'
json = self._session.get(
Post_Get_Upvoter.format(self.slug),
headers=headers
).json()
for au in json:
try:
yield Author(
au['profileUrl'],
au['name'],
au['bio'],
photo_url=au['avatar']['template'].format(
id=au['avatar']['id'], size='r'),
session=self._session
)
except ValueError: # invalid url
yield ANONYMOUS | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/post.py | post.py |
import json
import time
from datetime import datetime
from .common import *
from .base import BaseZhihu
class Question(BaseZhihu):
"""问题类,请使用``ZhihuClient.question``方法构造对象."""
@class_common_init(re_question_url, trailing_slash=False)
def __init__(self, url, title=None, followers_num=None,
answer_num=None, creation_time=None, author=None,
session=None):
"""创建问题类实例.
:param str url: 问题url. 现在支持两种 url
1. https://www.zhihu.com/question/qid
2. https://www.zhihu.com/question/qid?sort=created
区别在于,使用第一种,调用 ``question.answers`` 的时候会按投票排序返回答案;
使用第二种, 会按时间排序返回答案, 后提交的答案先返回
:param str title: 问题标题,可选,
:param int followers_num: 问题关注人数,可选
:param int answer_num: 问题答案数,可选
:param datetime.datetime creation_time: 问题创建时间,可选
:param Author author: 提问者,可选
:return: 问题对象
:rtype: Question
"""
self._session = session
self._url = url
self._title = title
self._answer_num = answer_num
self._followers_num = followers_num
self._id = int(re.match(r'.*/(\d+)', self.url).group(1))
self._author = author
self._creation_time = creation_time
self._logs = None
self._deleted = None
@property
def url(self):
# always return url like https://www.zhihu.com/question/1234/
url = re.match(re_question_url_std, self._url).group()
return url if url.endswith('/') else url + '/'
@property
def id(self):
"""获取问题id(网址最后的部分).
:return: 问题id
:rtype: int
"""
return self._id
@property
@check_soup('_qid')
def qid(self):
"""获取问题内部id(用不到就忽视吧)
:return: 问题内部id
:rtype: int
"""
return int(self.soup.find(
'div', id='zh-question-detail')['data-resourceid'])
@property
@check_soup('_xsrf')
def xsrf(self):
"""获取知乎的反xsrf参数(用不到就忽视吧~)
:return: xsrf参数
:rtype: str
"""
return self.soup.find('input', attrs={'name': '_xsrf'})['value']
@property
@check_soup('_html')
def html(self):
"""获取页面源码.
:return: 页面源码
:rtype: str
"""
return self.soup.prettify()
@property
@check_soup('_title')
def title(self):
"""获取问题标题.
:return: 问题标题
:rtype: str
"""
return self.soup.find('h2', class_='zm-item-title') \
.text.replace('\n', '')
@property
@check_soup('_details')
def details(self):
"""获取问题详细描述,目前实现方法只是直接获取文本,效果不满意……等更新.
:return: 问题详细描述
:rtype: str
"""
return self.soup.find("div", id="zh-question-detail").div.text
@property
@check_soup('_answer_num')
def answer_num(self):
"""获取问题答案数量.
:return: 问题答案数量
:rtype: int
"""
answer_num_block = self.soup.find('h3', id='zh-question-answer-num')
# 当0人回答或1回答时,都会找不到 answer_num_block,
# 通过找答案的赞同数block来判断到底有没有答案。
# (感谢知乎用户 段晓晨 提出此问题)
if answer_num_block is None:
if self.soup.find('span', class_='count') is not None:
return 1
else:
return 0
return int(answer_num_block['data-num'])
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取问题关注人数.
:return: 问题关注人数
:rtype: int
"""
follower_num_block = self.soup.find('div', class_='zg-gray-normal')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block is None or follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text)
@property
@check_soup('_topics')
def topics(self):
"""获取问题所属话题.
:return: 问题所属话题
:rtype: Topic.Iterable
"""
from .topic import Topic
for topic in self.soup.find_all('a', class_='zm-item-tag'):
yield Topic(Zhihu_URL + topic['href'], topic.text.replace('\n', ''),
session=self._session)
@property
def followers(self):
"""获取关注此问题的用户
:return: 关注此问题的用户
:rtype: Author.Iterable
:问题: 要注意若执行过程中另外有人关注,可能造成重复获取到某些用户
"""
self._make_soup()
followers_url = self.url + 'followers'
for x in common_follower(followers_url, self.xsrf, self._session):
yield x
@property
def answers(self):
"""获取问题的所有答案.
:return: 问题的所有答案,返回生成器
:rtype: Answer.Iterable
"""
from .author import Author
from .answer import Answer
self._make_soup()
# TODO: 统一逻辑. 完全可以都用 _parse_answer_html 的逻辑替换
if self._url.endswith('sort=created'):
pager = self.soup.find('div', class_='zm-invite-pager')
if pager is None:
max_page = 1
else:
max_page = int(pager.find_all('span')[-2].a.text)
for page in range(1, max_page + 1):
if page == 1:
soup = self.soup
else:
url = self._url + '&page=%d' % page
soup = BeautifulSoup(self._session.get(url).content)
error_answers = soup.find_all('div', id='answer-status')
for each in error_answers:
each['class'] = 'zm-editable-content'
answers_wrap = soup.find('div', id='zh-question-answer-wrap')
# 正式处理
authors = answers_wrap.find_all(
'div', class_='zm-item-answer-author-info')
urls = answers_wrap.find_all('a', class_='answer-date-link')
up_num = answers_wrap.find_all('div',
class_='zm-item-vote-info')
contents = answers_wrap.find_all(
'div', class_='zm-editable-content')
assert len(authors) == len(urls) == len(up_num) == len(
contents)
for author, url, up_num, content in \
zip(authors, urls, up_num, contents):
a_url, name, motto, photo = parser_author_from_tag(author)
author_obj = Author(a_url, name, motto, photo_url=photo,
session=self._session)
url = Zhihu_URL + url['href']
up_num = int(up_num['data-votecount'])
content = answer_content_process(content)
yield Answer(url, self, author_obj, up_num, content,
session=self._session)
else:
pagesize = 10
new_header = dict(Default_Header)
new_header['Referer'] = self.url
params = {"url_token": self.id,
'pagesize': pagesize,
'offset': 0}
data = {'_xsrf': self.xsrf,
'method': 'next',
'params': ''}
for i in range(0, (self.answer_num - 1) // pagesize + 1):
if i == 0:
# 修正各种建议修改的回答……
error_answers = self.soup.find_all('div',
id='answer-status')
for each in error_answers:
each['class'] = 'zm-editable-content'
answers_wrap = self.soup.find('div',
id='zh-question-answer-wrap')
# 正式处理
authors = answers_wrap.find_all(
'div', class_='zm-item-answer-author-info')
urls = answers_wrap.find_all('a', class_='answer-date-link')
up_num = answers_wrap.find_all('div',
class_='zm-item-vote-info')
contents = answers_wrap.find_all(
'div', class_='zm-editable-content')
assert len(authors) == len(urls) == len(up_num) == len(
contents)
for author, url, up_num, content in \
zip(authors, urls, up_num, contents):
a_url, name, motto, photo = parser_author_from_tag(
author)
author_obj = Author(a_url, name, motto, photo_url=photo,
session=self._session)
url = Zhihu_URL + url['href']
up_num = int(up_num['data-votecount'])
content = answer_content_process(content)
yield Answer(url, self, author_obj, up_num, content,
session=self._session)
else:
params['offset'] = i * pagesize
data['params'] = json.dumps(params)
r = self._session.post(Question_Get_More_Answer_URL,
data=data,
headers=new_header)
answer_list = r.json()['msg']
for answer_html in answer_list:
yield self._parse_answer_html(answer_html)
@property
def top_answer(self):
"""获取排名第一的答案.
:return: 排名第一的答案
:rtype: Answer
"""
for a in self.answers:
return a
def top_i_answer(self, i):
"""获取排名某一位的答案.
:param int i: 要获取的答案的排名
:return: 答案对象,能直接获取的属性参见answers方法
:rtype: Answer
"""
for j, a in enumerate(self.answers):
if j == i - 1:
return a
def top_i_answers(self, i):
"""获取排名在前几位的答案.
:param int i: 获取前几个
:return: 答案对象,返回生成器
:rtype: Answer.Iterable
"""
for j, a in enumerate(self.answers):
if j <= i - 1:
yield a
else:
return
@property
@check_soup('_author')
def author(self):
"""获取问题的提问者.
:return: 提问者
:rtype: Author or zhihu.ANONYMOUS
"""
from .author import Author, ANONYMOUS
logs = self._query_logs()
author_a = logs[-1].find_all('div')[0].a
if author_a.text == '匿名用户':
return ANONYMOUS
else:
url = Zhihu_URL + author_a['href']
return Author(url, name=author_a.text, session=self._session)
@property
@check_soup('_creation_time')
def creation_time(self):
"""
:return: 问题创建时间
:rtype: datetime.datetime
"""
logs = self._query_logs()
time_string = logs[-1].find('div', class_='zm-item-meta').time[
'datetime']
return datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S")
@property
@check_soup('_last_edit_time')
def last_edit_time(self):
"""
:return: 问题最后编辑时间
:rtype: datetime.datetime
"""
data = {'_xsrf': self.xsrf, 'offset': '1'}
res = self._session.post(self.url + 'log', data=data)
_, content = res.json()['msg']
soup = BeautifulSoup(content)
time_string = soup.find_all('time')[0]['datetime']
return datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S")
def _query_logs(self):
if self._logs is None:
gotten_feed_num = 20
start = '0'
offset = 0
api_url = self.url + 'log'
logs = None
while gotten_feed_num == 20:
data = {'_xsrf': self.xsrf, 'offset': offset, 'start': start}
res = self._session.post(api_url, data=data)
gotten_feed_num, content = res.json()['msg']
offset += gotten_feed_num
soup = BeautifulSoup(content)
logs = soup.find_all('div', class_='zm-item')
start = logs[-1]['id'][8:] if len(logs) > 0 else '0'
time.sleep(0.2) # prevent from posting too quickly
self._logs = logs
return self._logs
# noinspection PyAttributeOutsideInit
def refresh(self):
"""刷新 Question object 的属性.
例如回答数增加了, 先调用 ``refresh()``
再访问 answer_num 属性, 可获得更新后的答案数量.
:return: None
"""
super().refresh()
self._html = None
self._title = None
self._details = None
self._answer_num = None
self._follower_num = None
self._topics = None
self._last_edit_time = None
self._logs = None
@property
@check_soup('_deleted')
def deleted(self):
"""问题是否被删除, 被删除了返回 True, 未被删除返回 False
:return: True or False
"""
return self._deleted
def _parse_answer_html(self, answer_html):
from .author import Author
from .answer import Answer
soup = BeautifulSoup(answer_html)
# 修正各种建议修改的回答……
error_answers = soup.find_all('div', id='answer-status')
for each in error_answers:
each['class'] = 'zm-editable-content'
answer_url = self.url + 'answer/' + soup.div['data-atoken']
author = soup.find('div', class_='zm-item-answer-author-info')
upvote_num = int(soup.find(
'div', class_='zm-item-vote-info')['data-votecount'])
content = soup.find('div', class_='zm-editable-content')
content = answer_content_process(content)
a_url, name, motto, photo = parser_author_from_tag(author)
author = Author(a_url, name, motto, photo_url=photo,
session=self._session)
return Answer(answer_url, self, author, upvote_num, content,
session=self._session) | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/question.py | question.py |
import json
from datetime import datetime
from .common import *
from .base import BaseZhihu
from .collection import Collection
from .author import Author, ANONYMOUS
class Answer(BaseZhihu):
"""答案类,请使用``ZhihuClient.answer``方法构造对象."""
@class_common_init(re_ans_url)
def __init__(self, url, question=None, author=None,
upvote_num=None, content=None, session=None):
"""创建答案类实例.
:param str url: 答案url
:param Question question: 答案所在的问题对象,可选
:param Author author: 答案回答者对象,可选
:param int upvote_num: 答案赞同数量,可选
:param str content: 答案内容,可选
:param Session session: 使用的网络会话,为空则使用新会话
:return: 答案对象
:rtype: Answer
"""
self.url = url
self._session = session
self._question = question
self._author = author
self._upvote_num = upvote_num
self._content = content
self._deleted = None
@property
def id(self):
"""答案的id
:return: 答案id
:rtype: int
"""
return int(re.match(r'.*/(\d+)/$', self.url).group(1))
@property
@check_soup('_xsrf')
def xsrf(self):
"""获取知乎的反xsrf参数(用不到就忽视吧~)
:return: xsrf参数
:rtype: str
"""
return self.soup.find('input', attrs={'name': '_xsrf'})['value']
@property
@check_soup('_aid')
def aid(self):
"""获取答案的内部id,某些POST操作需要此参数
:return: 答案内部id
:rtype: str
"""
return int(self.soup.find('div', class_='zm-item-answer')['data-aid'])
@property
@check_soup('_html')
def html(self):
"""获取网页源码
:return: 网页源码
:rtype: str
"""
return self.soup.prettify()
@property
@check_soup('_author')
def author(self):
"""获取答案作者.
:return: 答案作者
:rtype: Author
"""
from .author import Author
author = self.soup.find('div', class_='zm-item-answer-author-info')
url, name, motto, photo = parser_author_from_tag(author)
if name == '匿名用户':
return ANONYMOUS
else:
return Author(url, name, motto, photo_url=photo,
session=self._session)
@property
@check_soup('_question')
def question(self):
"""获取答案所在问题.
:return: 答案所在问题
:rtype: Question
"""
from .question import Question
question_link = self.soup.find(
"h2", class_="zm-item-title").a
url = Zhihu_URL + question_link["href"]
title = question_link.text.strip()
followers_num = int(self.soup.find(
'div', class_='zh-question-followers-sidebar').div.a.strong.text)
answers_num = int(re_get_number.match(self.soup.find(
'div', class_='zh-answers-title').h3.a.text).group(1))
return Question(url, title, followers_num, answers_num,
session=self._session)
@property
@check_soup('_upvote_num')
def upvote_num(self):
"""获取答案赞同数量.
:return: 答案赞同数量
:rtype: int
"""
return int(self.soup.find(
'div', class_='zm-item-vote-info')['data-votecount'])
@property
def upvoters(self):
"""获取答案点赞用户,返回生成器.
:return: 点赞用户
:rtype: Author.Iterable
"""
self._make_soup()
next_req = '/answer/' + str(self.aid) + '/voters_profile'
while next_req != '':
data = self._session.get(Zhihu_URL + next_req).json()
next_req = data['paging']['next']
for html in data['payload']:
soup = BeautifulSoup(html)
yield self._parse_author_soup(soup)
@property
@check_soup('_content')
def content(self):
"""以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str
"""
answer_wrap = self.soup.find('div', id='zh-question-answer-wrap')
content = answer_wrap.find('div', class_='zm-editable-content')
content = answer_content_process(content)
return content
@property
@check_soup('_creation_time')
def creation_time(self):
"""获取答案创建时间
:return: 答案创建时间
:rtype: datetime.datetime
"""
return datetime.fromtimestamp(int(self.soup.find(
'div', class_='zm-item-answer')['data-created']))
@property
@check_soup('_collect_num')
def collect_num(self):
"""获取答案收藏数
:return: 答案收藏数量
:rtype: int
"""
element = self.soup.find("a", {
"data-za-a": "click_answer_collected_count"
})
if element is None:
return 0
else:
return int(element.get_text())
@property
def collections(self):
"""获取包含该答案的收藏夹
:return: 包含该答案的收藏夹
:rtype: Collection.Iterable
collect_num 未必等于 len(collections),比如:
https://www.zhihu.com/question/20064699/answer/13855720
显示被收藏 38 次,但只有 30 个收藏夹
"""
import time
gotten_feed_num = 20
offset = 0
data = {
'method':'next',
'_xsrf': self.xsrf
}
while gotten_feed_num >= 10:
data['params'] = "{\"answer_url\": %d,\"offset\": %d}" % (self.id, offset)
res = self._session.post(url=Get_Collection_Url, data=data)
gotten_feed_num = len(res.json()['msg'])
offset += gotten_feed_num
soup = BeautifulSoup(''.join(res.json()['msg']))
for zm_item in soup.find_all('div', class_='zm-item'):
url = Zhihu_URL + zm_item.h2.a['href']
name = zm_item.h2.a.text
links = zm_item.div.find_all('a')
owner = Author(links[0]['href'], session=self._session)
follower_num = int(links[1].text.split()[0])
yield Collection(url, owner=owner, name=name,
follower_num=follower_num,
session=self._session)
time.sleep(0.2) # prevent from posting too quickly
def save(self, filepath=None, filename=None, mode="html"):
"""保存答案为Html文档或markdown文档.
:param str filepath: 要保存的文件所在的目录,
不填为当前目录下以问题标题命名的目录, 设为"."则为当前目录。
:param str filename: 要保存的文件名,
不填则默认为 所在问题标题 - 答主名.html/md。
如果文件已存在,自动在后面加上数字区分。
**自定义文件名时请不要输入后缀 .html 或 .md。**
:param str mode: 保存类型,可选 `html` 、 `markdown` 、 `md` 。
:return: 无
:rtype: None
"""
if mode not in ["html", "md", "markdown"]:
raise ValueError("`mode` must be 'html', 'markdown' or 'md',"
" got {0}".format(mode))
file = get_path(filepath, filename, mode, self.question.title,
self.question.title + '-' + self.author.name)
with open(file, 'wb') as f:
if mode == "html":
f.write(self.content.encode('utf-8'))
else:
import html2text
h2t = html2text.HTML2Text()
h2t.body_width = 0
f.write(h2t.handle(self.content).encode('utf-8'))
def _parse_author_soup(self, soup):
from .author import Author, ANONYMOUS
author_tag = soup.find('div', class_='body')
if author_tag.string is None:
author_name = author_tag.div.a['title']
author_url = author_tag.div.a['href']
author_motto = author_tag.div.span.text
photo_url = PROTOCOL + soup.a.img['src'].replace('_m', '_r')
numbers_tag = soup.find_all('li')
numbers = [int(re_get_number.match(x.get_text()).group(1))
for x in numbers_tag]
# noinspection PyTypeChecker
return Author(author_url, author_name, author_motto, None,
numbers[2], numbers[3], numbers[0], numbers[1],
photo_url, session=self._session)
else:
return ANONYMOUS
@property
@check_soup('_comment_num')
def comment_num(self):
"""
:return: 答案下评论的数量
:rtype: int
"""
comment = self.soup.select_one("div.answer-actions a.toggle-comment")
comment_num_string = comment.text
number = comment_num_string.split()[0]
return int(number) if number.isdigit() else 0
@property
def comments(self):
"""获取答案下的所有评论.
:return: 答案下的所有评论,返回生成器
:rtype: Comments.Iterable
"""
import math
from .author import Author, ANONYMOUS
from .comment import Comment
api_url = Get_Answer_Comment_URL.format(self.aid)
page = pages = 1
while page <= pages:
res = self._session.get(api_url + '?page=' + str(page))
if page == 1:
total = int(res.json()['paging']['totalCount'])
if total == 0:
return
pages = math.ceil(total / 30)
page += 1
comment_items = res.json()['data']
for comment_item in comment_items:
comment_id = comment_item['id']
content = comment_item['content']
upvote_num = comment_item['likesCount']
time_string = comment_item['createdTime'][:19]
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
if comment_item['author'].get('url') is not None:
a_url = comment_item['author']['url']
a_name = comment_item['author']['name']
photo_url_tmp = comment_item['author']['avatar']['template']
photo_url_id = comment_item['author']['avatar']['id']
a_photo_url = photo_url_tmp.replace(
'{id}', photo_url_id).replace('_{size}', '')
author_obj = Author(a_url, a_name, photo_url=a_photo_url,
session=self._session)
else:
author_obj = ANONYMOUS
yield Comment(comment_id, self, author_obj, upvote_num, content, time)
@property
def latest_comments(self):
"""获取答案下的所有评论。较新的评论先返回。
使用该方法比 ``reversed(list(answer.comments))`` 效率高
因为现在靠后的热门评论会被挪到前面,所以返回的评论未必严格满足时间先后关系
:return: 答案下的所有评论,返回生成器
:rtype: Comments.Iterable
"""
import math
from .author import Author, ANONYMOUS
from .comment import Comment
if self.comment_num == 0:
return
pages = math.ceil(self.comment_num / 30)
api_url = Get_Answer_Comment_URL.format(self.aid)
for page in range(pages, 0, -1):
res = self._session.get(api_url + '?page=' + str(page))
comment_items = res.json()['data']
for comment_item in reversed(comment_items):
comment_id = comment_item['id']
content = comment_item['content']
upvote_num = comment_item['likesCount']
time_string = comment_item['createdTime'][:19]
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
if comment_item['author'].get('url') != None:
a_url = comment_item['author']['url']
a_name = comment_item['author']['name']
photo_url_tmp = comment_item['author']['avatar']['template']
photo_url_id = comment_item['author']['avatar']['id']
a_photo_url = photo_url_tmp.replace(
'{id}', photo_url_id).replace('_{size}', '')
author_obj = Author(a_url, a_name, photo_url=a_photo_url,
session=self._session)
else:
author_obj = ANONYMOUS
yield Comment(comment_id, self, author_obj, upvote_num, content, time)
def refresh(self):
"""刷新 Answer object 的属性.
例如赞同数增加了, 先调用 ``refresh()``
再访问 upvote_num属性, 可获得更新后的赞同数.
:return: None
"""
super().refresh()
self._html = None
self._upvote_num = None
self._content = None
self._collect_num = None
self._comment_num = None
@property
@check_soup('_deleted')
def deleted(self):
"""答案是否被删除, 被删除了返回 True, 为被删除返回 False
:return: True or False
"""
return self._deleted | zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu/answer.py | answer.py |
zhihu-py3 : 知乎非官方API库 with Python3
========================================
|Author| |Build| |DocumentationStatus| |PypiVersion| |License| |PypiDownloadStatus|
通知
----
由于知乎前端老是改阿改的,每次我都要更新弄的我好烦的说……
所以我开发了一个新的项目\ `Zhihu-OAuth <https://github.com/7sDream/zhihu-oauth>`__。
这个新项目用了一些黑科技手段,反正应该是更加稳定和快速了!**而且还支持 Python 2 哟!**
稳定我倒是没测,但是这里有一个
`速度对比 <https://github.com/7sDream/zhihu-oauth/blob/master/compare.md>`__。
如果你是准备新开一个项目的话,我强烈建议你看看我的新项目~
如果你已经用 Zhihu-py3 写了一些代码的话,我最近会写一个从 Zhihu-py3 转到 Zhihu-OAuth
的简易指南,你也可以关注一下哟。
毕竟嘛,有更好的方案的话,为什么不试试呢?
功能
----
由于知乎没有公开API,加上受到\ `zhihu-python <https://github.com/egrcc/zhihu-python>`__\ 项目的启发,在Python3下重新写了一个知乎的数据解析模块。
提供的功能一句话概括为,用户提供知乎的网址构用于建对应类的对象,可以获取到某些需要的数据。
简单例子:
.. code:: python
from zhihu import ZhihuClient
Cookies_File = 'cookies.json'
client = ZhihuClient(Cookies_File)
url = 'http://www.zhihu.com/question/24825703'
question = client.question(url)
print(question.title)
print(question.answer_num)
print(question.follower_num)
print(question.topics)
for answer in question.answers:
print(answer.author.name, answer.upvote_num)
这段代码的输出为:
::
关系亲密的人之间要说「谢谢」吗?
627
4322
['心理学', '恋爱', '社会', '礼仪', '亲密关系']
龙晓航 50
小不点儿 198
芝士就是力量 89
欧阳忆希 425
...
另外还有\ ``Author(用户)``\ 、\ ``Answer(答案)``\ 、\ ``Collection(收藏夹)``\ 、\ ``Column(专栏)``\ 、\ ``Post(文章)``\ 、\ ``Topic(话题)``\ 等类可以使用,\ ``Answer``,\ ``Post``\ 类提供了\ ``save``\ 方法能将答案或文章保存为HTML或Markdown格式,具体请看文档,或者\ ``zhihu-test.py``\ 。
安装
----
.. class:: bold
本项目依赖于\ `requests <https://pypi.python.org/pypi/requests/2.7.0>`__\ 、\ `BeautifulSoup4 <http://www.crummy.com/software/BeautifulSoup>`__\ 、\ `html2text <https://github.com/aaronsw/html2text>`__
已将项目发布到pypi,请使用下列命令安装
.. code:: bash
(sudo) pip(3) install (--upgrade) zhihu-py3
希望开启lxml的话请使用:
.. code:: bash
(sudo) pip(3) install (--upgrade) zhihu-py3[lxml]
因为lxml解析html效率高而且容错率强,在知乎使用\ ``<br>``\ 时,自带的html.parser会将其转换成\ ``<br>...</br>``\ ,而lxml则转换为\ ``<br/>``\ ,更为标准且美观,所以推荐使用第二个命令。
不安装lxml也能使用本模块,此时会自动使用html.parser作为解析器。
PS 若在安装lxml时出错,请安装libxml和libxslt后重试:
.. code:: bash
sudo apt-get install libxml2 libxml2-dev libxslt1.1 libxslt1-dev
准备工作
--------
第一次使用推荐运行以下代码生成 cookies 文件:
.. code:: python
from zhihu import ZhihuClient
ZhihuClient().create_cookies('cookies.json')
运行结果
::
====== zhihu login =====
email: <your-email>
password: <your-password>
please check captcha.gif for captcha
captcha: <captcha-code>
====== logging.... =====
login successfully
cookies file created.
运行成功后会在目录下生成\ ``cookies.json``\ 文件。
以下示例皆以登录成功为前提。
建议在正式使用之前运行\ ``zhihu-test.py``\ 测试一下。
用法实例
--------
为了精简 Readme,本部分移动至文档内。
请看文档的「用法示例」部分。
登录方法综述
---------------------------------------------
为了精简 Readme,本部分移动至文档内。
请看文档的「登录方法综述」部分。
文档
----
终于搞定了文档这个磨人的小妖精,可惜 Sphinx 还是不会用 T^T
先随意弄成这样吧:
`Master版文档 <http://zhihu-py3.readthedocs.org/zh_CN/latest>`__
`Dev版文档 <http://zhihu-py3.readthedocs.org/zh_CN/dev>`__
其他
----
**有问题请开Issue,几个小时后无回应可加最后面的QQ群询问。**
友链:
- \ `zhihurss <https://github.com/SimplyY/zhihu-rss>`__\ :一个基于 zhihu-py3 做的跨平台知乎 rss(any user) 的客户端。
TODO List
---------
- [x] 增加获取用户关注者,用户追随者
- [x] 增加获取答案点赞用户功能
- [x] 获取用户头像地址
- [x] 打包为标准Python模块
- [x] 重构代码,增加\ ``ZhihuClient``\ 类,使类可以自定义cookies文件
- [x] 收藏夹关注者,问题关注者等等
- [x] ``ZhihuClient``\ 增加各种用户操作(比如给某答案点赞)
- [ ] Unittest (因为知乎可能会变,所以这个有点难
- [x] 增加获取用户关注专栏数和关注专栏的功能
- [x] 增加获取用户关注话题数和关注话题的功能
- [x] 评论类也要慢慢提上议程了吧
联系我
------
Github:\ `@7sDream <https://github.com/7sDream>`__
知乎:\ `@7sDream <http://www.zhihu.com/people/7sdream>`__
新浪微博:\ `@Dilover <http://weibo.com/didilover>`__
邮箱:\ `给我发邮件 <mailto:[email protected]>`__
编程交流群:478786205
.. |Author| image:: https://img.shields.io/badge/Author-7sDream-blue.svg
:target: https://github.com/7sDream
.. |DocumentationStatus| image:: https://readthedocs.org/projects/zhihu-py3/badge/?version=latest
:target: https://readthedocs.org/projects/zhihu-py3/?badge=latest
.. |PypiVersion| image:: https://img.shields.io/pypi/v/zhihu-py3.svg
:target: https://pypi.python.org/pypi/zhihu-py3
.. |PypiDownloadStatus| image:: https://img.shields.io/pypi/dd/zhihu-py3.svg
:target: https://pypi.python.org/pypi/zhihu-py3
.. |License| image:: https://img.shields.io/pypi/l/zhihu-py3.svg
:target: https://github.com/7sDream/zhihu-py3/blob/master/LICENSE
.. |Build| image:: https://travis-ci.org/7sDream/zhihu-py3.svg?branch=dev
:target: https://travis-ci.org/7sDream/zhihu-py3
| zhihu-py3 | /zhihu_py3-0.3.23-py3-none-any.whl/zhihu_py3-0.3.23.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
## 关于
Zhihu-API 的初衷是希望提供一套简洁、优雅的、Pythonic的API接口,面向的用户是对知乎数据有兴趣的群体,它可以用在数据分析、数据挖掘、增长黑客、以及希望通过程序自动完成知乎某个操作等应用场景。
注意:只支持Python3
## 安装
```python
pip install -U zhihu
# 或者安装最新包
pip install git+git://github.com/lzjun567/zhihu-api --upgrade
```
## 快速上手
```python
from zhihu import Zhihu
zhihu = Zhihu()
#获取用户基本信息
profile = zhihu.profile(user_slug="xiaoxiaodouzi")
print(profile)
>>>
{
'name': '我是x',
'headline': '程序员',
'gender': -1,
'user_type': 'people',
'is_advertiser': False,
'url_token': 'xiaoxiaodouzi',
'id': '1da75b85900e00adb072e91c56fd9149',
'is_org': False
}
# 发送私信
>>> zhihu.send_message(content="私信测试", user_slug="xiaoxiaodouzi")
<Response [200]>
# 关注用户
>>> zhihu.follow(user_slug="xiaoxiaodouzi")
{'follower_count': 12, 'followed': True}
# 取消关注
>>> zhihu.unfollow(user_slug="xiaoxiaodouzi")
{'follower_count': 11, 'followed': False}
>>> from zhihu import Answer
>>> answer = Answer(url="https://www.zhihu.com/question/62569341/answer/205327777")
# 赞同回答
>>> answer.vote_up()
{'voting': 1, 'voteup_count': 260}
# 反对
>>> answer.vote_down()
{'voting': -1, 'voteup_count': 259}
# 中立
>>> answer.vote_neutral()
{'voting': 0, 'voteup_count': 260}
# 感谢回答
>>> answer.thank()
{'is_thanked': True}
# 取消感谢
>>> answer.thank_cancel()
{'is_thanked': False}
```
## 贡献者
欢迎 PR, 所有贡献者都将出现在这里,排名不分先后
* [@BigBorg](https://github.com/BigBorg)
* [@xiaowenlong100](https://github.com/xiaowenlong100)
* [@chenghengchao](https://github.com/chenghengchao)
* [@MaxPoon](https://github.com/MaxPoon)
* [@Oopswc](https://github.com/Oopswc)
## 交流
群已经加不进,可以先加微信:lzjun567 拉你入群

| zhihu | /zhihu-0.2.6.tar.gz/zhihu-0.2.6/README.md | README.md |
zhihu API
=========================
**UNOFFICIAL** API for `zhihu <https://www.zhihu.com>`_. This package supports only Python 3.x.
A `Node.js implementation <https://github.com/syaning/zhihu-api>`_ is also available.
Installation
------------
.. code-block:: bash
$ pip install zhihuapi
Quich Start
-----------
.. code-block:: python
import zhihuapi as api
with open('cookie') as f:
api.cookie(f.read())
data = api.user('zhihuadmin').profile()
print(data)
The result is:
.. code-block:: js
{
"url_token": "zhihuadmin",
"avatar_url": "https://pic3.zhimg.com/34bf96bf5584ac4b5264bd7ed4fdbc5a_is.jpg",
"avatar_url_template": "https://pic3.zhimg.com/34bf96bf5584ac4b5264bd7ed4fdbc5a_{size}.jpg",
"type": "people",
"name": "知乎小管家",
"headline": "欢迎反馈问题和建议!",
"is_org": false,
"url": "https://www.zhihu.com/people/zhihuadmin",
"badge": [
{
"type": "identity",
"description": "知乎官方帐号"
}
],
"user_type": "people",
"is_advertiser": false,
"id": "3d198a56310c02c4a83efb9f4a4c027e"
}
License
-------
MIT | zhihuapi | /zhihuapi-0.6.0.tar.gz/zhihuapi-0.6.0/README.rst | README.rst |
import requests
import os
import pickle
import zxing
import pyqrcode
import time
cookies = requests.cookies.RequestsCookieJar()
def login():
load_cookies()
status = check_login_status()
if status is False:
print('cookies过期 请重新登陆')
return qr_login()
else:
print('成功从文件加载cookies')
return True
def qr_login():
headers = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64)',
}
status = True
s = requests.session()
try:
while status is True:
result = get_qrcode()
imgurl = result[0]
token = result[1]
res = s.get(imgurl, headers=headers, cookies=cookies)
content = res.content
f = open('image.png', 'wb')
f.write(content)
f.close()
reader = zxing.BarCodeReader()
raw = reader.decode("image.png").raw
url = pyqrcode.create(raw)
print(url.terminal(quiet_zone=1))
start = time.time()
while True:
if time.time() - start < 120:
url = 'https://www.zhihu.com/api/v3/account/api/login/qrcode/{}/scan_info'.format(token)
res = s.get(url, headers=headers)
if res.status_code == 200:
content = res.json()
if 'status' in content:
if content['status'] == 1:
print('扫码成功 请确认登陆')
continue
else:
print('成功确认登陆')
cookies_dict = res.cookies.get_dict()
for key in cookies_dict:
cookies.set(key, cookies_dict[key])
q_c0 = content['cookie']['q_c0']
z_c0 = content['cookie']['z_c0']
cookies.set('q_c0', q_c0)
cookies.set('z_c0', z_c0)
save_cookies()
os.system('cls')
return True
else:
print('二维码已经过期 重新加载二维码')
status = False
time.sleep(5)
os.system('cls')
break
qr_login()
except Exception:
return False
def get_qrcode():
global cookies
s = requests.session()
headers = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64)',
}
url = 'https://www.zhihu.com/signin?next=%2F'
res = s.get(url=url, headers=headers)
cookies_dict = res.cookies.get_dict()
for key in cookies_dict:
cookies.set(key, cookies_dict[key])
url = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
res = s.get(url, headers=headers)
cookies_dict = res.cookies.get_dict()
for key in cookies_dict:
cookies.set(key, cookies_dict[key])
url = 'https://www.zhihu.com/udid'
res = s.post(url=url, headers=headers)
cookies_dict = res.cookies.get_dict()
for key in cookies_dict:
cookies.set(key, cookies_dict[key])
url = 'https://www.zhihu.com/api/v3/account/api/login/qrcode'
res = s.post(url, headers=headers, cookies=cookies, data="")
token = res.json()['token']
imgurl = 'https://www.zhihu.com/api/v3/account/api/login/qrcode/{}/image'.format(token)
result = [imgurl, token]
return result
def save_cookies():
f = open('cookies', 'wb')
pickle.dump(cookies, f)
f.close()
def load_cookies():
global cookies
try:
f = open('cookies.txt', 'rb')
data = pickle.load(f)
cookies = data
except FileNotFoundError:
print('cookie文件不存在')
def check_login_status():
s = requests.session()
url = 'https://www.zhihu.com/api/v4/me'
headers = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64)',
}
res = s.get(url, headers=headers, cookies=cookies)
content = res.json()
if 'error'in content:
return False
else:
return True | zhihutool | /zhihutool-1.0.6.tar.gz/zhihutool-1.0.6/ZhihuTool/login.py | login.py |
<div align="center">
<a href="http://zhijian.readthedocs.io"><img width="450px" height="auto" src="https://github.com/zhangyikaii/LAMDA-ZhiJian/raw/main/assests/logo.png?raw=true"></a>
</div>
<div align="center">
<img src="https://img.shields.io/badge/License-MIT-<COLOR>.svg?style=for-the-badge" alt="Generic badge", height="21">
<img src="https://img.shields.io/github/actions/workflow/status/zhangyikaii/LAMDA-ZhiJian/tests.yml?branch=main&style=for-the-badge" alt="GitHub Workflow Status (branch)", height="21">
<img src="https://img.shields.io/readthedocs/smp?style=for-the-badge&logo=readthedocs&logoColor=white" alt="Read the Docs", height="21">
<br>
<img src="https://img.shields.io/pypi/v/ZhiJian?color=blue&style=for-the-badge&logo=pypi&logoColor=white" alt="PyPI", height="21">
<img src="https://img.shields.io/pypi/dm/ZhiJian?style=for-the-badge&color=blue" alt="PyPI - Downloads", height="21">
<br>
<img src="https://img.shields.io/badge/PYTORCH-1.4+-red?style=for-the-badge&logo=pytorch" alt="PyTorch - Version", height="21">
<img src="https://img.shields.io/badge/PYTHON-3.7+-red?style=for-the-badge&logo=python&logoColor=white" alt="Python - Version", height="21">
</div>
<h4 align="center">
<p>
A Unifying and Rapidly Deployable Toolbox for Pre-trained Model Reuse
<p>
<p>
<a href="https://arxiv.org/abs/2308.09158">[Paper]</a> [<b>Code</b>] <a href="https://zhijian.readthedocs.io/en/latest/#">[Docs]</a>
<p>
<p>
<b>English</b> |
<a href="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/README_zh.md">中文</a>
<p>
</h4>
**ZhiJian** ([**执简**驭繁](https://baike.baidu.com/item/%E6%89%A7%E7%AE%80%E9%A9%AD%E7%B9%81)) is a *comprehensive* and *user-friendly* `PyTorch`-based **Model Reuse toolbox** for leveraging **foundation pre-trained models** and their **fine-tuned counterparts** to *extract* knowledge and *expedite* learning in real-world tasks.
**The rapid progress** in deep learning has led to the emergence of **numerous open-source Pre-Trained Models (PTMs)** on platforms like PyTorch, TensorFlow, and HuggingFace Transformers. Leveraging these PTMs for specific tasks empowers them to handle objectives effectively, creating valuable resources for the machine-learning community. **Reusing PTMs is vital in enhancing target models' capabilities and efficiency**, achieved through adapting the architecture, customizing learning on target data, or devising optimized inference strategies to leverage PTM knowledge.

🔥 **To facilitate a holistic consideration of various model reuse strategies**, ZhiJian categorizes model reuse methods into *three* sequential modules: **Architect**, **Tuner**, and **Merger**, aligning with the stages of **model preparation**, **model learning**, and **model inference** on the target task, respectively. **The provided interface methods include**:
<details>
<summary style="margin-left: 2px;"><b>A</b>rchitect Module [<em>Click to Expand</em>]<p style="margin-left: 12px;">The Architect module involves <b>modifying the pre-trained model to fit the target task</b>, and reusing certain parts of the pre-trained model while introducing new learnable parameters with specialized structures.</p></summary>
<details>
<summary style="margin-left: 12px;"><strong> Linear Probing</strong> & <strong>Partial-k</strong>, <em>How transferable are features in deep neural networks?</em> In: NeurIPS'14. <a href="https://arxiv.org/pdf/1411.1792.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/linear_probing.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Adapter</strong>, <em>Parameter-Efficient Transfer Learning for NLP.</em> In: ICML'19. <a href="https://arxiv.org/pdf/1902.00751.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/adapter.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Diff Pruning</strong>, <em>Parameter-Efficient Transfer Learning with Diff Pruning.</em> In: ACL'21. <a href="https://arxiv.org/pdf/2012.07463.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/diff_pruning.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> LoRA</strong>, <em>LoRA: Low-Rank Adaptation of Large Language Models.</em> In: ICLR'22. <a href="https://arxiv.org/pdf/2106.09685.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/lora.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Visual Prompt Tuning / Prefix</strong>, <em>Visual Prompt Tuning.</em> In: ECCV'22. <a href="https://arxiv.org/pdf/2203.12119.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/visual_prompt_tuning.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Scaling & Shifting</strong>, <em>Scaling & Shifting Your Features: A New Baseline for Efficient Model Tuning.</em> In: NeurIPS'22. <a href="https://arxiv.org/pdf/2210.08823.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/scaling_and_shifting.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> AdaptFormer</strong>, <em>AdaptFormer: Adapting Vision Transformers for Scalable Visual Recognition.</em> In: NeurIPS'22. <a href="https://arxiv.org/pdf/2205.13535.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/adapterformer.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> BitFit</strong>, <em>BitFit: Simple Parameter-efficient Fine-tuning for Transformer-based Masked Language-models.</em> In: ACL'22. <a href="https://arxiv.org/pdf/2106.10199.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/bitfit.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Convpass</strong>, <em>Convolutional Bypasses Are Better Vision Transformer Adapters.</em> In: Tech Report 07-2022. <a href="https://arxiv.org/pdf/2207.07039.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/convpass.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Fact-Tuning</strong>, <em>FacT: Factor-Tuning for Lightweight Adaptation on Vision Transformer.</em> In: AAAI'23. <a href="https://arxiv.org/pdf/2212.03145.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/fact_tuning.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
</details>
<details>
<summary style="margin-left: 2px;"><b>T</b>uner Module [<em>Click to Expand</em>]<p style="margin-left: 12px;">The Tuner module focuses on <b>training the target model with guidance from pre-trained model knowledge</b> to expedite the optimization process, <em>e.g.</em>, via adjusting objectives, optimizers, or regularizers.</p></summary>
<details>
<summary style="margin-left: 12px;"><strong> Knowledge Transfer</strong>, <em>NeC4.5: neural ensemble based C4.5.</em> In: IEEE Trans. Knowl. Data Eng. 2004. <a href="https://ieeexplore.ieee.org/document/1294896">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/knowledge_transfer.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> FitNet</strong>, <em>FitNets: Hints for Thin Deep Nets.</em> In: ICLR'15. <a href="https://arxiv.org/pdf/1412.6550.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/fitnet.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> LwF</strong>, <em>Learning without Forgetting.</em> In: CVPR'19. <a href="https://arxiv.org/pdf/1606.09282.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/learning_without_forgetting.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> FSP</strong>, <em>A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning.</em> In: CVPR'17. <a href="https://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/fsp.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> NST</strong>, <em>Like What You Like: Knowledge Distill via Neuron Selectivity Transfer.</em> In: CVPR'17. <a href="https://arxiv.org/pdf/1707.01219.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/nst.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> RKD</strong>, <em>Relational Knowledge Distillation.</em> In: CVPR'19. <a href="https://arxiv.org/pdf/1904.05068.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/rkd.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> SPKD</strong>, <em>Similarity-Preserving Knowledge Distillation.</em> In: CVPR'19. <a href="https://arxiv.org/pdf/1907.09682.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/spkd.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> CRD</strong>, <em>Contrastive Representation Distillation.</em> In: ICLR'20. <a href="https://arxiv.org/pdf/1910.10699.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/crd.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> REFILLED</strong>, <em>Distilling Cross-Task Knowledge via Relationship Matching.</em> In: CVPR'20. <a href="http://www.lamda.nju.edu.cn/lus/files/CVPR20_ReFilled.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/refilled.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> WiSE-FT</strong>, <em>Robust fine-tuning of zero-shot models.</em> In: CVPR'22. <a href="https://arxiv.org/pdf/2109.01903.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/wise_tune.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> L<sup>2</sup> penalty / L<sup>2</sup>-SP</strong>, <em>Explicit Inductive Bias for Transfer Learning with Convolutional Networks.</em> In: ICML'18. <a href="https://arxiv.org/pdf/1802.01483.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/l_2_penalty.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Spectral Norm</strong>, <em>Spectral Normalization for Generative Adversarial Networks.</em> In: ICLR'18. <a href="https://arxiv.org/pdf/1802.05957.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/spectral_norm.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> BSS</strong>, <em>Catastrophic Forgetting Meets Negative Transfer: Batch Spectral Shrinkage for Safe Transfer Learning.</em> In: NeurIPS'19. <a href="https://proceedings.neurips.cc/paper_files/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/bss.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> DELTA</strong>, <em>DELTA: DEep Learning Transfer using Feature Map with Attention for Convolutional Networks.</em> In: ICLR'19. <a href="https://arxiv.org/pdf/1901.09229.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/delta.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> DeiT</strong>, <em>Training data-efficient image transformers & distillation through attention.</em> In: ICML'21. <a href="https://arxiv.org/pdf/2012.12877.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/deit.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> DIST</strong>, <em>Knowledge Distillation from A Stronger Teacher.</em> In: NeurIPS'22. <a href="https://arxiv.org/pdf/2205.10536.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/dist.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
</details>
<details>
<summary style="margin-left: 2px;"><b>M</b>erger Module [<em>Click to Expand</em>]<p style="margin-left: 12px;">The Merger module influences <b>the inference phase</b> by either reusing pre-trained features or incorporating adapted logits from the pre-trained model.</p></summary>
<details>
<summary style="margin-left: 12px;"><strong> Nearest Class Mean</strong>, <em>Generalizing to new classes at near-zero cost.</em> In: TPAMI'13. <a href="https://ieeexplore.ieee.org/document/6517188">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/ncm.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> SimpleShot</strong>, <em>SimpleShot: Revisiting Nearest-Neighbor Classification for Few-Shot Learning.</em> In: CVPR'19. <a href="https://arxiv.org/pdf/1911.04623.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/simpleshot.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Head2Toe</strong>, <em>Head2Toe: Utilizing Intermediate Representations for Better Transfer Learning.</em> In: ICML'22. <a href="https://arxiv.org/pdf/2201.03529.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/head2toe.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> VQT</strong>, <em>Visual Query Tuning: Towards Effective Usage of Intermediate Representations for Parameter and Memory Efficient Transfer Learning.</em> In: CVPR'23. <a href="https://arxiv.org/pdf/2212.03220.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/vqt.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> via Optimal Transport</strong>, <em>Model Fusion via Optimal Transport.</em> In: NeurIPS'20. <a href="https://arxiv.org/pdf/1910.05653.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/otfusion.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Model Soup</strong> <em>Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time.</em> In: ICML'22. <a href="https://arxiv.org/pdf/2203.05482.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/modelsoup.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Fisher Merging</strong> <em>Merging Models with Fisher-Weighted Averaging.</em> In: NeurIPS'22. <a href="https://arxiv.org/pdf/2111.09832.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/fishermerging.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Deep Model Reassembly</strong> <em>Deep Model Reassembly.</em> In: NeurIPS'22. <a href="https://arxiv.org/pdf/2210.17409.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/dmr.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> REPAIR</strong> <em>REPAIR: REnormalizing Permuted Activations for Interpolation Repair.</em> In: ICLR'23. <a href="https://arxiv.org/pdf/2211.08403.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/repair.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> Git Re-Basin</strong> <em>Git Re-Basin: Merging Models modulo Permutation Symmetries.</em> In: ICLR'23. <a href="https://arxiv.org/pdf/2209.04836.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/gitrebasin.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
<details>
<summary style="margin-left: 12px;"><strong> ZipIt</strong> <em>ZipIt! Merging Models from Different Tasks without Training.</em> In: ICLR'23. <a href="https://arxiv.org/pdf/2305.03053.pdf">[Paper]</a> <a href="https://github.com">[Code]</a></summary>
<div style="text-align: center;">
<img src="https://github.com/zhangyikaii/LAMDA-ZhiJian/blob/main/assests/zipit.png?raw=true" alt="WSFG" width="auto" height="300px" />
</div>
</details>
</details>
<!-- -->
💡 **ZhiJian** also has the following **highlights**:
+ **Support** reuse of various **pre-trained model zoo**, including:
+ PyTorch [Torchvision](https://pytorch.org/vision/stable/models.html); OpenAI [CLIP](https://github.com/openai/CLIP); 🤗Hugging Face [PyTorch Image Models (timm)](https://github.com/huggingface/pytorch-image-models), [Transformers](https://github.com/huggingface/transformers)
+ Other popular projects, *e.g.*, [vit-pytorch](https://github.com/lucidrains/vit-pytorch) (stars [14k](https://github.com/lucidrains/vit-pytorch/stargazers)).
+ Large Language Model, including [baichuan](https://huggingface.co/baichuan-inc/baichuan-7B), [LLaMA](https://github.com/facebookresearch/llama), and [BLOOM](https://huggingface.co/bigscience/bloom).
+ **Extremely easy** to get started and **customize**
+ Get started with a 10 minute blitz [](https://colab.research.google.com/drive/1Ho1R6h5FEg6zXBJVauXcBnSpBrfi6JmN?usp=sharing)
+ Customize datasets and pre-trained models with step-by-step instructions [](https://colab.research.google.com/drive/1PKy1U7DyAy5AJYIBv5VEoHWEDJ6NCwTZ?usp=sharing)
+ Feel free to create a novel approach for reusing pre-trained model [](https://colab.research.google.com/drive/1vHQjlaAGhoeiTVAwOrSQCraAlDvWOlh9?usp=sharing)
+ **Concise** things do **big**
+ Only ~5000 lines of the base code, with incorporating method like building *LEGO* blocks
+ **State-of-the-art** results on VTAB of Multi-Reuse Tasks Challenge with approximately **10k** experiments [[here]](https://github.com/zhangyikaii/LAMDA-ZhiJian/tree/main/results)
+ Support friendly guideline and comprehensive documentation to custom dataset and pre-trained model [[here]](https://zhijian.readthedocs.io/en/latest/tutorials/get_started.html)
> "ZhiJian" in Chinese means handling complexity with concise and efficient methods. Given the variations in pre-trained models and the deployment overhead of full parameter fine-tuning, ZhiJian represents a solution that is easily reusable, maintains high accuracy, and maximizes the potential of pre-trained models.
>
> “执简驭繁”的意思是用简洁高效的方法驾驭纷繁复杂的事物。“繁”表示现有预训练模型和复用方法种类多、差异大、部署难,所以取名"执简"的意思是通过该工具包,能轻松地驾驭模型复用方法,易上手、快复用、稳精度,最大限度地唤醒预训练模型的知识。
## 🕹️ Quick Start
1. An environment with Python 3.7+ from [conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html "conda-env"), [venv](https://docs.python.org/3/library/venv.html), or [virtualenv](https://virtualenv.pypa.io/en/latest/).
2. Install ZhiJian using pip:
```bash
$ pip install zhijian
```
+ [Option] Install with the newest version through GitHub:
```bash
$ pip install git+https://github.com/ZhangYikaii/LAMDA-ZhiJian.git@main --upgrade
```
3. Open your python console and type
```python
import zhijian
print(zhijian.__version__)
```
If no error occurs, you have successfully installed ZhiJian.
4. Try a demo that reuses pre-trained ViT-B/16 on target CIFAR-100 dataset with LoRA
```python
from zhijian.trainers.base import get_args, prepare_trainer
args = get_args(
dataset='VTAB-1k.CIFAR-100', # dataset
dataset_dir='your/dataset/directory', # dataset directory
model='timm.vit_base_patch16_224_in21k', # backbone network
config_blitz='(LoRA.adapt): ...->(blocks[0:12].attn.qkv){inout1}->...', # addin blitz configuration
training_mode='finetune', # training mode
optimizer='adam', # optimizer
lr=1e-2, # learning rate
wd=1e-5, # weight decay
gpu='0', # gpu id
verbose=True # control the verbosity of the output
)
import torch, os
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
torch.cuda.set_device(int(args.gpu))
# Pre-trained Model
from zhijian.trainers.finetune import get_model
model, model_args, device = get_model(args)
# Target Dataset
from zhijian.data.base import prepare_vision_dataloader
train_loader, val_loader, num_classes = prepare_vision_dataloader(args, model_args)
# Optimizer
import torch.optim as optim
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.max_epoch, eta_min=args.eta_min)
criterion = torch.nn.CrossEntropyLoss()
# Trainer
trainer = prepare_trainer(
args,
model=model, model_args=model_args, device=device,
train_loader=train_loader, val_loader=val_loader, num_classes=num_classes,
optimizer=optimizer, lr_scheduler=lr_scheduler, criterion=criterion
)
trainer.fit()
trainer.test()
```
For more information, please click the [tutorials](https://zhijian.readthedocs.io/en/latest/tutorials/get_started.html).
## Documentation
📚 The tutorials and API documentation are hosted on [ZhiJian.readthedocs.io](https://zhijian.readthedocs.io/)
## Why ZhiJian?

<table>
<tr>
<td colspan="9" style="border-bottom: 2px solid black;"></td>
</tr>
<tr>
<td><b>Related Library</b></td>
<td><b>GitHub Stars</b></td>
<td><b># of Alg.<sup>(1)</sup></b></td>
<td><b># of Model<sup>(1)</sup></b></td>
<td><b># of Dataset<sup>(1)</sup></b></td>
<td><b># of Fields<sup>(2)</sup></b></td>
<td><b>LLM Supp.</b></td>
<td><b>Docs.</b></td>
<td><b>Last Update</b></td>
</tr>
<tr>
<td><a href="https://github.com/huggingface/peft">PEFT</a></td>
<td><a href="https://github.com/huggingface/peft/stargazers">
<img src="https://img.shields.io/github/stars/huggingface/peft" alt="GitHub stars">
</a></td>
<td>6</td>
<td>~15</td>
<td>➖<sup>(3)</sup></td>
<td>1<sup>(a)</sup></td>
<td>✔️</td>
<td>✔️</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/huggingface/peft?label=last%20update">
</a>
</td>
</tr>
<tr>
<td><a href="https://github.com/adapter-hub/adapter-transformers">adapter-transformers</a></td>
<td><a href="https://github.com/adapter-hub/adapter-transformers/stargazers">
<img src="https://img.shields.io/github/stars/adapter-hub/adapter-transformers" alt="GitHub stars">
</a></td>
<td>10</td>
<td>~15</td>
<td>➖<sup>(3)</sup></td>
<td>1<sup>(a)</sup></td>
<td>❌</td>
<td>✔️</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/adapter-hub/adapter-transformers?label=last%20update">
</a>
</td>
</tr>
<tr>
<td><a href="https://github.com/hiyouga/LLaMA-Efficient-Tuning">LLaMA-Efficient-Tuning</a></td>
<td><a href="https://github.com/hiyouga/LLaMA-Efficient-Tuning/stargazers">
<img src="https://img.shields.io/github/stars/hiyouga/LLaMA-Efficient-Tuning" alt="GitHub stars">
</a></td>
<td>4</sup></td>
<td>5</td>
<td>~20</td>
<td>1<sup>(a)</sup></td>
<td>✔️</td>
<td>❌</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/hiyouga/LLaMA-Efficient-Tuning?label=last%20update">
</a></td>
</tr>
<tr>
<td><a href="https://github.com/AberHu/Knowledge-Distillation-Zoo">Knowledge-Distillation-Zoo</a></td>
<td><a href="https://github.com/AberHu/Knowledge-Distillation-Zoo/stargazers">
<img src="https://img.shields.io/github/stars/AberHu/Knowledge-Distillation-Zoo" alt="GitHub stars">
</a></td>
<td>20</td>
<td>2</td>
<td>2</td>
<td>1<sup>(b)</sup></td>
<td>❌</td>
<td>❌</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/AberHu/Knowledge-Distillation-Zoo?label=last%20update">
</a></td>
</tr>
<tr>
<td><a href="https://github.com/sicara/easy-few-shot-learning">Easy Few-Shot Learning</a></td>
<td><a href="https://github.com/sicara/easy-few-shot-learning/stargazers">
<img src="https://img.shields.io/github/stars/sicara/easy-few-shot-learning" alt="GitHub stars">
</a></td>
<td>10</td>
<td>3</td>
<td>2</td>
<td>1<sup>(b)</sup></td>
<td>❌</td>
<td>❌</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/sicara/easy-few-shot-learning?label=last%20update">
</a></td>
</tr>
<tr>
<td><a href="https://github.com/mlfoundations/model-soups">Model soups</a></td>
<td><a href="https://github.com/mlfoundations/model-soups/stargazers">
<img src="https://img.shields.io/github/stars/mlfoundations/model-soups" alt="GitHub stars">
</a></td>
<td>3</sup></td>
<td>3</td>
<td>5</td>
<td>1<sup>(c)</sup></td>
<td>❌</td>
<td>❌</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/mlfoundations/model-soups?label=last%20update">
</a></td>
</tr>
<tr>
<td><a href="https://github.com/samuela/git-re-basin">Git Re-Basin</a></td>
<td><a href="https://github.com/samuela/git-re-basin/stargazers">
<img src="https://img.shields.io/github/stars/samuela/git-re-basin" alt="GitHub stars">
</a></td>
<td>3</sup></td>
<td>5</td>
<td>4</td>
<td>1<sup>(c)</sup></td>
<td>❌</td>
<td>❌</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/samuela/git-re-basin?label=last%20update">
</a></td>
</tr>
<tr>
<td colspan="9" style="border-bottom: 2px solid grey;"></td>
</tr>
</tr>
<tr>
<td><b>ZhiJian</b></td>
<!-- <td><a href="https://github.com/adapter-hub/adapter-transformers/stargazers">
<img src="https://img.shields.io/github/stars/zhangyikaii/LAMDA-ZhiJian" alt="GitHub stars">
</a></td> -->
<td>🙌</td>
<td>30+</td>
<td>~50</td>
<td>19</td>
<td>3<sup>(a,b,c)</sup></td>
<td>✔️</td>
<td>✔️</td>
<td>
<a>
<img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/zhangyikaii/LAMDA-ZhiJian?label=last%20update">
</a></td>
</tr>
</table>
<sup><b>(1)</b>: access date: 2023-08-05</sup>
<sup><b>(2)</b>: fields for (a) Architect; (b) Tuner; (c) Merger;</sup>
### 📦 Reproducible SoTA Results
**ZhiJian** fixed the random seed to ensure reproducibility of the results, with only minor variations across different devices.
#### VTAB of Multi-Reuse-Tasks Challenge
We develop a robust classification challenge called **VTAB-M (Visual Task Adaptation Benchmark for Multi-Reuse-Tasks)**, building upon the VTAB. **This challenge involves tackling a diverse set of 18 visual tasks concurrently, while harnessing the power of pre-trained knowledge.** The primary objective is to equip models with versatile capabilities that span across natural, specialized, and structured visual domains.
The challenge incorporates datasets including *CIFAR-100, CLEVR-Count, CLEVR-Distance, Caltech101, DTD, Diabetic-Retinopathy, Dmlab, EuroSAT, KITTI, Oxford-Flowers-102, Oxford-IIIT-Pet, PatchCamelyon, RESISC45, SVHN, dSprites-Location, dSprites-Orientation, smallNORB-Azimuth, and smallNORB-Elevation.* Following the VTAB-1k standards, we sample a training set consisting of 1,000 samples from each dataset. The comprehensive model evaluation is conducted using the entire test data. VTAB-M serves as a comprehensive evaluation framework that assessing models' **generalization** and **adaptation** across diverse visual tasks. It pushes the pre-trained models to become more **versatile** and **proficient** through reuse methods.
[More results](https://github.com/zhangyikaii/LAMDA-ZhiJian/tree/main/results) will be released gradually in upcoming updates. Please stay tuned for more information.
| Method | Tuned Params | Mixed Mean | Caltech101 | CIFAR-100 | CLEVR-Count | CLEVR-Distance | Diabetic-Retinopathy | Dmlab | dSprites-Location | dSprites-Orientation | DTD | EuroSAT | KITTI | Oxford-Flowers-102 | Oxford-IIIT-Pet | PatchCamelyon | RESISC45 | smallNORB-Azimuth | smallNORB-Elevation | SVHN |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| <div style="white-space: nowrap">**Adapter** </div> | 0.73/86.53(M) | 57.14 | <a href="./results/configs/ViT-B-16-VTAB-M-Caltech101-Adapter.json">84.16</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CIFAR-100-Adapter.json">66.74</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Count-Adapter.json">30.43</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Distance-Adapter.json">22.97</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Diabetic-Retinopathy-Adapter.json">75.92</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Dmlab-Adapter.json">46.29</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Location-Adapter.json">3.76</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Orientation-Adapter.json">26.47</a> | <a href="./results/configs/ViT-B-16-VTAB-M-DTD-Adapter.json">68.03</a> | <a href="./results/configs/ViT-B-16-VTAB-M-EuroSAT-Adapter.json">95.13</a> | <a href="./results/configs/ViT-B-16-VTAB-M-KITTI-Adapter.json">49.09</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-Flowers-102-Adapter.json">98.63</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-IIIT-Pet-Adapter.json">91.47</a> | <a href="./results/configs/ViT-B-16-VTAB-M-PatchCamelyon-Adapter.json">79.21</a> | <a href="./results/configs/ViT-B-16-VTAB-M-RESISC45-Adapter.json">82.25</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Azimuth-Adapter.json">7.99</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Elevation-Adapter.json">23.20</a> | <a href="./results/configs/ViT-B-16-VTAB-M-SVHN-Adapter.json">76.71</a> |
| <div style="white-space: nowrap">**LoRA** </div> | 0.71/86.51(M) | 57.61 | <a href="./results/configs/ViT-B-16-VTAB-M-Caltech101-LoRA.json">84.75</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CIFAR-100-LoRA.json">63.92</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Count-LoRA.json">33.25</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Distance-LoRA.json">27.85</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Diabetic-Retinopathy-LoRA.json">76.37</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Dmlab-LoRA.json">44.90</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Location-LoRA.json">4.54</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Orientation-LoRA.json">24.72</a> | <a href="./results/configs/ViT-B-16-VTAB-M-DTD-LoRA.json">68.56</a> | <a href="./results/configs/ViT-B-16-VTAB-M-EuroSAT-LoRA.json">94.33</a> | <a href="./results/configs/ViT-B-16-VTAB-M-KITTI-LoRA.json">50.91</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-Flowers-102-LoRA.json">98.80</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-IIIT-Pet-LoRA.json">91.66</a> | <a href="./results/configs/ViT-B-16-VTAB-M-PatchCamelyon-LoRA.json">82.57</a> | <a href="./results/configs/ViT-B-16-VTAB-M-RESISC45-LoRA.json">82.71</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Azimuth-LoRA.json">5.92</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Elevation-LoRA.json">27.00</a> | <a href="./results/configs/ViT-B-16-VTAB-M-SVHN-LoRA.json">74.30</a> |
| <div style="white-space: nowrap">**VPT / Deep**</div> | 0.45/86.24(M) | 53.12 | <a href="./results/configs/ViT-B-16-VTAB-M-Caltech101-VPT-Deep.json">83.15</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CIFAR-100-VPT-Deep.json">52.39</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Count-VPT-Deep.json">23.49</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Distance-VPT-Deep.json">20.67</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Diabetic-Retinopathy-VPT-Deep.json">75.13</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Dmlab-VPT-Deep.json">39.37</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Location-VPT-Deep.json">2.84</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Orientation-VPT-Deep.json">23.06</a> | <a href="./results/configs/ViT-B-16-VTAB-M-DTD-VPT-Deep.json">66.12</a> | <a href="./results/configs/ViT-B-16-VTAB-M-EuroSAT-VPT-Deep.json">93.13</a> | <a href="./results/configs/ViT-B-16-VTAB-M-KITTI-VPT-Deep.json">42.33</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-Flowers-102-VPT-Deep.json">97.82</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-IIIT-Pet-VPT-Deep.json">90.00</a> | <a href="./results/configs/ViT-B-16-VTAB-M-PatchCamelyon-VPT-Deep.json">77.45</a> | <a href="./results/configs/ViT-B-16-VTAB-M-RESISC45-VPT-Deep.json">79.75</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Azimuth-VPT-Deep.json">7.65</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Elevation-VPT-Deep.json">18.02</a> | <a href="./results/configs/ViT-B-16-VTAB-M-SVHN-VPT-Deep.json">63.87</a> |
| <div style="white-space: nowrap">**Linear Probing** </div> | 0.42/86.22(M) | 48.59 | <a href="./results/configs/ViT-B-16-VTAB-M-Caltech101-Linear-Probing.json">80.93</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CIFAR-100-Linear-Probing.json">37.15</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Count-Linear-Probing.json">14.07</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Distance-Linear-Probing.json">22.27</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Diabetic-Retinopathy-Linear-Probing.json">74.68</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Dmlab-Linear-Probing.json">35.32</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Location-Linear-Probing.json">3.29</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Orientation-Linear-Probing.json">18.51</a> | <a href="./results/configs/ViT-B-16-VTAB-M-DTD-Linear-Probing.json">60.69</a> | <a href="./results/configs/ViT-B-16-VTAB-M-EuroSAT-Linear-Probing.json">88.72</a> | <a href="./results/configs/ViT-B-16-VTAB-M-KITTI-Linear-Probing.json">40.08</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-Flowers-102-Linear-Probing.json">97.59</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-IIIT-Pet-Linear-Probing.json">88.09</a> | <a href="./results/configs/ViT-B-16-VTAB-M-PatchCamelyon-Linear-Probing.json">79.36</a> | <a href="./results/configs/ViT-B-16-VTAB-M-RESISC45-Linear-Probing.json">72.98</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Azimuth-Linear-Probing.json">7.42</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Elevation-Linear-Probing.json">15.09</a> | <a href="./results/configs/ViT-B-16-VTAB-M-SVHN-Linear-Probing.json">38.34</a> |
| <div style="white-space: nowrap">**Partial-1** </div> | 7.51/86.22(M) | 51.60 | <a href="./results/configs/ViT-B-16-VTAB-M-Caltech101-Partial-1.json">81.87</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CIFAR-100-Partial-1.json">42.01</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Count-Partial-1.json">25.50</a> | <a href="./results/configs/ViT-B-16-VTAB-M-CLEVR-Distance-Partial-1.json">24.34</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Diabetic-Retinopathy-Partial-1.json">75.20</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Dmlab-Partial-1.json">39.39</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Location-Partial-1.json">2.08</a> | <a href="./results/configs/ViT-B-16-VTAB-M-dSprites-Orientation-Partial-1.json">24.29</a> | <a href="./results/configs/ViT-B-16-VTAB-M-DTD-Partial-1.json">63.94</a> | <a href="./results/configs/ViT-B-16-VTAB-M-EuroSAT-Partial-1.json">91.37</a> | <a href="./results/configs/ViT-B-16-VTAB-M-KITTI-Partial-1.json">34.60</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-Flowers-102-Partial-1.json">97.82</a> | <a href="./results/configs/ViT-B-16-VTAB-M-Oxford-IIIT-Pet-Partial-1.json">89.48</a> | <a href="./results/configs/ViT-B-16-VTAB-M-PatchCamelyon-Partial-1.json">79.50</a> | <a href="./results/configs/ViT-B-16-VTAB-M-RESISC45-Partial-1.json">77.57</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Azimuth-Partial-1.json">7.65</a> | <a href="./results/configs/ViT-B-16-VTAB-M-smallNORB-Elevation-Partial-1.json">21.85</a> | <a href="./results/configs/ViT-B-16-VTAB-M-SVHN-Partial-1.json">50.35</a> |
## Contributing
**ZhiJian** is currently in active development, and we warmly welcome any contributions aimed at enhancing capabilities. Whether you have insights to share regarding pre-trained models, data, or innovative reuse methods, we eagerly invite you to join us in making **ZhiJian** even better. If you want to submit your valuable contributions, please click [here](https://zhijian.readthedocs.io/en/latest/contributing.html).
## Citing ZhiJian
```latex
@misc{zhang2023zhijian,
title={ZhiJian: A Unifying and Rapidly Deployable Toolbox for Pre-trained Model Reuse},
author={Yi-Kai Zhang and Lu Ren and Chao Yi and Qi-Wei Wang and De-Chuan Zhan and Han-Jia Ye},
year={2023},
eprint={2308.09158},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@misc{zhijian2023,
author = {ZhiJian Contributors},
title = {LAMDA-ZhiJian},
year = {2023},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/zhangyikaii/LAMDA-ZhiJian}}
}
```
| zhijian | /zhijian-0.0.3.tar.gz/zhijian-0.0.3/README.md | README.md |
import os
import sys
import pdb
import inspect
import builtins
from functools import wraps
def __zhijxu_is_rank_0():
if os.environ.get("LOCAL_RANK", "0") == "0":
return True
return False
def zhijxu_run_once(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return func(*args, **kwargs)
wrapper.has_run = False
return wrapper
@zhijxu_run_once
def zhijxu_vscode_attach(sleep_time_sec=3):
"""
only rank 0 will wait for vscode debug attach, other rank continue to run
"""
import debugpy
from termcolor import cprint
import time as tmp_time
if __zhijxu_is_rank_0():
debugpy.listen(("localhost", 56789))
tmp_time.sleep(
sleep_time_sec
) # so the next print can be shown at last line of terminal in multiprocess case
cprint("\n\nzhijxu, waiting for debug connect\n\n", color="red", flush=True)
debugpy.wait_for_client()
cprint("\n\nzhijxu,debug connection done!!!\n\n", color="red", flush=True)
def zhijxu_do_bench(
fn,
warmup=25,
rep=100,
grad_to_none=None,
percentiles=(0.5, 0.2, 0.8),
fast_flush=False,
):
"""
example call: do_bench(lambda: matmul(a,b))
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
the 20-th and 80-th performance percentile.
:param fn: Function to benchmark
:type fn: Callable
:param warmup: Warmup time (in ms)
:type warmup: int
:param rep: Repetition time (in ms)
:type rep: int
:param grad_to_none: Reset the gradient of the provided tensor to None
:type grad_to_none: torch.tensor, optional
:param percentiles: Performance percentile to return in addition to the median.
:type percentiles: list[float]
:param fast_flush: Use faster kernel to flush L2 between measurements
:type fast_flush: bool
"""
import torch
# Estimate the runtime of the function
fn()
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(5):
fn()
end_event.record()
torch.cuda.synchronize()
estimate_ms = start_event.elapsed_time(end_event) / 5
# compute number of warmup and repeat
n_warmup = max(1, int(warmup / estimate_ms))
n_repeat = max(1, int(rep / estimate_ms))
# We maintain a buffer of 256 MB that we clear
# before each kernel call to make sure that the L2
# doesn't contain any input data before the run
start_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
end_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
if fast_flush:
cache = torch.empty(int(256e6 // 4), dtype=torch.int, device="cuda")
else:
cache = torch.empty(int(256e6), dtype=torch.int8, device="cuda")
# Warm-up
for _ in range(n_warmup):
fn()
# Benchmark
for i in range(n_repeat):
# we don't want `fn` to accumulate gradient values
# if it contains a backward pass. So we clear the
# provided gradients
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
# we clear the L2 cache before each run
cache.zero_()
# record time of `fn`
start_event[i].record()
fn()
end_event[i].record()
# Record clocks
torch.cuda.synchronize()
times = torch.tensor([s.elapsed_time(e) for s, e in zip(start_event, end_event)])
if percentiles:
percentiles = torch.quantile(times, torch.tensor(percentiles)).tolist()
return tuple(percentiles)
else:
return torch.mean(times).item()
def zhijxu_cuda_profiling(step):
"""
the control env and its possible values are:
- VIZTRACER, 0/1
- NSYS, 0/1
- START_STEP, any integer larger than 0
- END_STEP, any integer larger than START_STEP
"""
from termcolor import cprint
import torch
from viztracer import VizTracer
if os.environ.get("VIZTRACER", "0") == "1":
assert (
os.environ["CUDA_LAUNCH_BLOCKING"] == "1"
), "CUDA_LAUNCH_BLOCKING must be set to 1 when using VizTracer to profile CUDA code"
if step == 0:
tracer = VizTracer(output_file=f"trace_{os.getpid()}.json")
if step == int(os.environ.get("START_STEP", 10)):
cprint("Start tracing", "red")
tracer.start()
if step == int(os.environ.get("END_STEP", 20)):
cprint("Stop tracing", "red")
tracer.stop()
tracer.save()
sys.exit(0)
if os.environ.get("NSYS", "0") == "1":
assert (
os.environ.get("CUDA_LAUNCH_BLOCKING", "0") != "1"
), "CUDA_LAUNCH_BLOCKING must not be set to 1 when using nsys"
if step == int(os.environ.get("START_STEP", 10)):
cprint("Start tracing", "red")
torch.cuda.cudart().cudaProfilerStart()
if step == int(os.environ.get("END_STEP", 20)):
cprint("Stop tracing", "red")
torch.cuda.cudart().cudaProfilerStop()
@zhijxu_run_once
def zhijxu_pdb():
"""
only rank 0 will enter pdb, other ranks continue execution
"""
if __zhijxu_is_rank_0():
from termcolor import cprint
cprint("zhijxu, i am rank 0, enter pdb now", "red")
pdb.set_trace()
@zhijxu_run_once
def zhijxu_enter_pdb_at_exception():
"""
register a hook which will enter pdb when process wants to exit, this will help debugging when process has un-caught exception
"""
import axexit
atexit.register(pdb.pm)
def zhijxu_open_onnx_in_tensorboard(model, port):
"""
give the onnx model path and tensorboard port, then will convert to tensorboard and launch tensorboard automatically for you
"""
from tempfile import mkdtemp
tmp_dir = mkdtemp(prefix="onnx-tensorboard-")
from termcolor import cprint
cprint(f"converted tensorboard is put at {tmp_dir}", "red")
os.system(f"python /home/zhijxu/onnxruntime/tools/python/onnx2tfevents.py --logdir={tmp_dir} --model {model}")
os.system(f"tensorboard --logdir={tmp_dir} --port {port} &")
fset = {
name: obj
for name, obj in inspect.getmembers(sys.modules[__name__])
if inspect.isfunction(obj) and name.startswith("zhijxu_")
}
for name, obj in fset.items():
if inspect.isfunction(obj):
setattr(builtins, name, obj) | zhijiang | /scripts/useful_func.py | useful_func.py |
from collections import defaultdict
from packaging import version
import onnx
from onnx.helper import tensor_dtype_to_string
from termcolor import cprint
assert version.parse(onnx.__version__) >= version.parse("1.14.0"), "onnx version must >= 1.14.0, as tensor_dtype_to_string is not supported in older version"
class Analyze_onnx_model():
def __init__(self, onnx_file):
self.model = onnx.load(onnx_file)
self.constant_registery = dict()
def print_dict_one_by_one(self, input_dict):
for key, value in input_dict.items():
print(key, value)
def get_node_dict(self):
def sort_by_cnt(input_dict):
tmp = sorted(input_dict.items(), key=lambda x: x[1], reverse=True)
return dict(tmp)
nodes = self.model.graph.node
node_dict = defaultdict(int)
for node in nodes:
node_dict[node.op_type] += 1
return sort_by_cnt(node_dict)
def get_constant_dict(self):
def sort_by_size(input_dict):
tmp = sorted(input_dict.items(), key=lambda x: x[1][0], reverse=True)
return dict(tmp)
constants = self.model.graph.initializer
constant_dict = {}
for constant in constants:
val = onnx.numpy_helper.to_array(constant)
self.constant_registery[constant.name] = val
constant_dict[constant.name] = [val.size, val.shape, tensor_dtype_to_string(constant.data_type).lstrip("TensorProto.")]
return sort_by_size(constant_dict)
def get_value_info_dict(self):
value_infos = self.model.graph.value_info
value_info_dict = {}
for value_info in value_infos:
value_info_dict[value_info.name] = [[i.dim_value for i in value_info.type.tensor_type.shape.dim], tensor_dtype_to_string(value_info.type.tensor_type.elem_type).lstrip("TensorProto.")]
return value_info_dict
def print_info(self):
cprint("zhijiang, node op info:", "red")
print(self.print_dict_one_by_one(self.get_node_dict()))
cprint("zhijiang, constant info:", "red")
print(self.print_dict_one_by_one(self.get_constant_dict()))
cprint("zhijiang, value info:", "red")
print(self.print_dict_one_by_one(self.get_value_info_dict())) | zhijiang | /scripts/zhijiang_onnx_helper.py | zhijiang_onnx_helper.py |
from collections import defaultdict
from packaging import version
import onnx
from onnx.helper import tensor_dtype_to_string
from termcolor import cprint
assert version.parse(onnx.__version__) >= version.parse("1.14.0"), "onnx version must >= 1.14.0, as tensor_dtype_to_string is not supported in older version"
class Analyze_onnx_model():
def __init__(self, onnx_file):
self.model = onnx.load(onnx_file)
self.constant_registery = dict()
def print_dict_one_by_one(self, input_dict):
for key, value in input_dict.items():
print(key, value)
def get_node_dict(self):
def sort_by_cnt(input_dict):
tmp = sorted(input_dict.items(), key=lambda x: x[1], reverse=True)
return dict(tmp)
nodes = self.model.graph.node
node_dict = defaultdict(int)
for node in nodes:
node_dict[node.op_type] += 1
return sort_by_cnt(node_dict)
def get_constant_dict(self):
def sort_by_size(input_dict):
tmp = sorted(input_dict.items(), key=lambda x: x[1][0], reverse=True)
return dict(tmp)
constants = self.model.graph.initializer
constant_dict = {}
for constant in constants:
val = onnx.numpy_helper.to_array(constant)
self.constant_registery[constant.name] = val
constant_dict[constant.name] = [val.size, val.shape, tensor_dtype_to_string(constant.data_type).lstrip("TensorProto.")]
return sort_by_size(constant_dict)
def get_value_info_dict(self):
value_infos = self.model.graph.value_info
value_info_dict = {}
for value_info in value_infos:
value_info_dict[value_info.name] = [[i.dim_value for i in value_info.type.tensor_type.shape.dim], tensor_dtype_to_string(value_info.type.tensor_type.elem_type).lstrip("TensorProto.")]
return value_info_dict
def print_info(self):
cprint("zhijxu, node op info:", "red")
print(self.print_dict_one_by_one(self.get_node_dict()))
cprint("zhijxu, constant info:", "red")
print(self.print_dict_one_by_one(self.get_constant_dict()))
cprint("zhijxu, value info:", "red")
print(self.print_dict_one_by_one(self.get_value_info_dict())) | zhijiang | /scripts/zhijxu_onnx_helper.py | zhijxu_onnx_helper.py |
import os
import shutil
from time import gmtime, strftime
def create_package_template(current_path, project_name, version="0.0.1", license_type="MIT"):
# create src folder
src_path = os.path.join(current_path, "src")
os.makedirs(src_path, exist_ok=True)
# create project folder
project_path = os.path.join(src_path, project_name)
os.makedirs(project_path, exist_ok=True)
# create __init__.py example.py
init_file_path = os.path.join(project_path, "__init__.py")
example_file_path = os.path.join(project_path, "example.py")
with open(init_file_path, mode='a'): pass
with open(example_file_path, mode='a'): pass
if license_type == "MIT":
license = """MIT License
Copyright (c) [year] [fullname]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
else:
license=""
# create other necessary files and folder
with open(os.path.join(current_path, "LICENSE"), mode='w') as f:
f.write(license)
pyproject_template = ["[build-system] \n",
"requires = [\"setuptools>=61.0\"] \n",
"build-backend = \"setuptools.build_meta\" \n",
"\n"
"[project] \n",
"name = \"{}\" \n".format(project_name),
"version = \"{}\" \n".format(version),
"authors = [ \n",
"{ name=\"Example Author\", email=\"[email protected]\" }, \n",
"]\n",
"description = \"A small example package\" \n",
"readme = \"README.md\" \n",
"requires-python = \">=3.7\" \n",
"classifiers = [ \n",
"\"Programming Language :: Python :: 3\",\n",
"\"License :: OSI Approved :: MIT License\",\n",
"\"Operating System :: OS Independent\", \n",
"]\n",
"\n",
"[project.urls]\n",
"\"Homepage\" = \"https://github.com/pypa/sampleproject\"\n",
"\"Bug Tracker\" = \"https://github.com/pypa/sampleproject/issues\" \n"]
with open(os.path.join(current_path, "pyproject.toml"), mode='w') as f:
f.writelines(pyproject_template)
with open(os.path.join(current_path, "README.md"), mode='w'): pass
os.makedirs(os.path.join(current_path, "tests"), exist_ok=True)
def version_control(current_path, runs_name, not_save_list=[".git", "runs"], log=""):
"""This is a simple version control function that copy current codes and save it into a specified folder
Args:
current_path (str): The top level code directory
runs_name (str): Version control runs name
not_save_list (list, str): Define the not copy files name, Defaults to ["runs"].
log (str, optional): Add this version log, Defaults to "".
"""
# create runs folder
runs_path = os.path.join(current_path, "runs")
os.makedirs(runs_path, exist_ok=True)
# create current runs folder with the name of runs_name
runs_name_path = os.path.join(runs_path, runs_name)
os.makedirs(runs_name_path)
# get all the list file in current path
dirs = os.listdir(current_path)
print("Runs name path", runs_name_path)
if "runs" not in not_save_list:
not_save_list.append("runs")
log = strftime("%Y-%m-%d %H:%M:%S", gmtime()) +"\n"+ log
with open(os.path.join(runs_name_path, "log.txt"), mode='w') as f:
f.write(log)
for file in dirs:
if file not in not_save_list:
source = os.path.join(current_path, file)
destination = os.path.join(runs_name_path, file)
if os.path.isfile(source):
# copy these file to runs_path
shutil.copy(os.path.join(current_path, file), os.path.join(runs_name_path, file))
else:
shutil.copytree(source, destination)
def create_project_template(current_path, add_folder=None):
"""Define a project template: in the top level directory:
configs: store package template
datasets: store neceesary datasets
experiments: store experiment result and also checkpoint
scripts: runnning files
test: testing files folder
packages: store the main model and algorithm
Args:
current_path (str): path to create project template
add_folder (str): if needs to add other folders
"""
# create necessary
folders = ["configs", "datasets", "experiments", "scripts", "tests", "packages"]
if add_folder is not None:
folders += add_folder
ReadMe = "README.md"
for folder in folders:
folder_path = os.path.abspath(os.path.join(current_path, folder))
os.makedirs(folder_path, exist_ok=True)
ReadMe_path = os.path.abspath(os.path.join(folder_path, ReadMe))
with open(ReadMe_path, mode='a'): pass | zhijie-toolbox | /zhijie_toolbox-0.1.7-py3-none-any.whl/zhijie_toolbox/toolbox.py | toolbox.py |
===============================
zhimabot
===============================
.. image:: https://img.shields.io/pypi/v/zhimabot.svg
:target: https://pypi.python.org/pypi/zhimabot
.. image:: https://img.shields.io/travis/wwj718/zhimabot.svg
:target: https://travis-ci.org/wwj718/zhimabot
.. image:: https://readthedocs.org/projects/zhimabot/badge/?version=latest
:target: https://zhimabot.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://pyup.io/repos/github/wwj718/zhimabot/shield.svg
:target: https://pyup.io/repos/github/wwj718/zhimabot/
:alt: Updates
Python library for zhimabot
* Free software: MIT license
* Documentation: https://zhimabot.readthedocs.io.
Features
--------
* TODO
Credits
---------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| zhimabot | /zhimabot-0.1.0.tar.gz/zhimabot-0.1.0/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/wwj718/zhimabot/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zhimabot could always use more documentation, whether as part of the
official zhimabot docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/wwj718/zhimabot/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zhimabot` for local development.
1. Fork the `zhimabot` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zhimabot.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zhimabot
$ cd zhimabot/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 zhimabot tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
https://travis-ci.org/wwj718/zhimabot/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests.test_zhimabot
| zhimabot | /zhimabot-0.1.0.tar.gz/zhimabot-0.1.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zhimabot, run this command in your terminal:
.. code-block:: console
$ pip install zhimabot
This is the preferred method to install zhimabot, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zhimabot can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/wwj718/zhimabot
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/wwj718/zhimabot/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/wwj718/zhimabot
.. _tarball: https://github.com/wwj718/zhimabot/tarball/master
| zhimabot | /zhimabot-0.1.0.tar.gz/zhimabot-0.1.0/docs/installation.rst | installation.rst |
from __future__ import unicode_literals
try:
#install ipython
from IPython import embed
except:
import pip
pip.main(["install", "ipython"]) # install package dynamically
from prompt_toolkit import prompt
from prompt_toolkit.history import FileHistory#InMemoryHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
from prompt_toolkit.validation import Validator, ValidationError #验证
#from pygments.lexers import JsonLexer
import os.path
import sys
import uuid
import json
import requests
#from pprint import pprint
'''
try:
import zhimabot
except ImportError:
sys.path.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir))
import yige
'''
CLIENT_ACCESS_TOKEN = os.environ.get("ZHIMABOT_ACCESS_TOKEN") #export ZHIMABOT_ACCESS_TOKEN=10115
######## prompt
DEFAULT_MAX_STEPS = 5
INTERACTIVE_PROMPT = '> '
BOTTOM_TOOLBAR_STYLE = style_from_dict({
Token.Toolbar: '#ffffff bg:#333333',
})
def get_bottom_toolbar_tokens(cli):
return [(Token.Toolbar, '使用: 1.query: 查找新街口附近的三星级宾馆 2.简单调试: !run xxx 3.复杂调试: !debug ')]
class myValidator(Validator):
def validate(self, document):
if not document.text: #输入为空
raise ValidationError(message='输入不能为空',
cursor_position=len(document.text)) # Move cursor to end of input.
###########
from pygments import highlight, lexers, formatters
def output_format(obj):
'''
python2/3
中文没有被解析
obj is dict
http://stackoverflow.com/questions/35950573/python-unicode-string-to-javascript
'''
formatted_json = json.dumps(obj, sort_keys=True, indent=4,ensure_ascii=False).encode('utf8')
if (sys.version_info > (3, 0)):
# Python 3 code in this block
colorful_json = highlight(formatted_json, lexers.JsonLexer(), formatters.TerminalFormatter())
return colorful_json
else:
colorful_json = highlight(unicode(formatted_json, 'UTF-8'), lexers.JsonLexer(), formatters.TerminalFormatter())
return colorful_json #中文没有解决
def main():
try:
input_function = raw_input
except NameError:
input_function = input #python3
session_id = uuid.uuid1()
history = FileHistory('/tmp/.zhimabot_prompt') #InMemoryHistory()
my_completer = WordCompleter(['!print','response',"!run","!debug"])
# zhimabot
'''
ai = yige.Yige(CLIENT_ACCESS_TOKEN)
request = ai.text_request()
'''
response = None
while True:
try:
# lexer 从网上把词库拉下来缓存到本地,然后高亮
# 本地输入先做好分词
# 看文章 sqlite
# https://github.com/eliangcs/http-prompt yige-prompt
# https://github.com/donnemartin/haxor-news
# 自然语言 https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/examples/regular-language.py
message = prompt(INTERACTIVE_PROMPT,
history=history,
enable_history_search=True,
auto_suggest=AutoSuggestFromHistory(),
completer=my_completer,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,# 底部转态栏
style = BOTTOM_TOOLBAR_STYLE,
validator = myValidator(),
#get_title= fun #"Yige_prompt",
).rstrip()
#mouse_support=True).rstrip() #支持鼠标会导致翻页问题
except (KeyboardInterrupt, EOFError):
return
# 用户输入一句话,然后解析,颜色 方便调试? console ,语法高亮
#context = run_actions(session_id, message, context, max_steps)
if message:
# 仅仅是拿到输入而已,之后要做什么自己决定
if message.startswith("!run"):
#在当前上下文执行python
#上次执行的结果 response 关键词 使用grep?
code = message.split("run")[-1].strip()
# 把response变为类似对象
# 复杂的调试使用ipython 或者进入ipython上下文? 把当前环境带入
# 在ipython之前诸如上下文?
try:
exec(code) #在上下文执行,python提示
except Exception as e :
print(str(e))
elif message.startswith("!debug"):
#帮助安装
embed(header='zhimabot debug console \n -- by 『wwj718』(blog.just4fun.site) \n -- 如有建议或bug,欢迎发我邮件:[email protected]', banner1='')
else:
#查询
try:
#此处写逻辑
#request.query = message #用户输入
payload = {}
payload["appId"] = CLIENT_ACCESS_TOKEN
payload["query"] = message
url = "http://dev.zhimabot.com:8080/zhimabot/analysis"
response = requests.post(url,json=payload)
#response = request.getresponse() #注意置信度 confidence
#request.session_id = session_id
#response = request.getresponse() #json
print(output_format(response.json()))
except Exception as e:
print(str(e))
# 各个返回值的含义:http://docs.yige.ai/Query%E6%8E%A5%E5%8F%A3.html
if __name__ == '__main__':
main() | zhimabot | /zhimabot-0.1.0.tar.gz/zhimabot-0.1.0/examples/send_text_example_prompt.py | send_text_example_prompt.py |
import errno
import ifaddr
import socket
def discover_devices(timeout: int = 5) -> list:
"""
Search for all available devices on all networks of this computer
Args:
timeout (int, optional): Timeout(s). Defaults to `5`
Returns:
list: List of available devices. [(device_name: str, ip: str)]
"""
devices = []
ip_networks = _get_ip_list()
for ip in ip_networks:
with socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
) as sock:
sock.settimeout(timeout)
sent, port = _discovery_multicast(ip, sock)
if sent:
devices = _receive_devices(port, sock, devices)
return devices
def _get_ip_list() -> list:
"""
Return a list of all ips of this computer
Returns:
list: A list containing all the ips of this computer
"""
adapters = ifaddr.get_adapters()
ip_list = []
for adapter in adapters:
for ip in adapter.ips:
if isinstance(ip.ip, str):
ip_list.append(ip.ip)
assert len(ip_list) > 0
return ip_list
def _get_local_ip(ip: str, port: int = 80) -> str:
"""
For a given IP, return the local IP that can access it
Args:
ip (str): Remote IP
port (int, optional): Remote port. Defaults to `80`
Returns:
str: The local IP that can access the given remote IP
"""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect((ip, port))
local_ip = s.getsockname()[0]
return local_ip
def _receive_devices(port: int, sock: socket.socket, devices: list) -> list:
"""
After sending the device discovery request, collect all available
devices
Args:
port (int): Local port
sock (socket.socket): Socket object
devices (list): List of available devices.
[(device_name: str, ip: str)]
Returns:
list: Updated list of available devices.
[(device_name: str, ip: str)]
"""
try:
while True:
data, _ = sock.recvfrom(port)
split_data = data.decode("utf-8").split(":")
if len(split_data) == 3:
devices.append((split_data[0].strip(), split_data[2].strip()))
else:
raise ValueError(data)
except (socket.timeout, OSError):
pass
return devices
def _discovery_multicast(ip: str, sock: socket.socket, attempts: int = 5):
"""
Send a multicast command on the given network to discover available
devices.
If port is in use, try the next port up to <attempts> times
Args:
ip (str): Local IP
sock (socket.socket): Socket object
attempts (int, optional): Number of times trying different ports
if port is in use. Defaults to `5`.
Returns:
tuple: (sent: bool, port: int) ``sent`` is `True` after sending the
command. ``port`` is the one used for the connection
"""
multicast_ip = "239.253.1.1"
port = 50000
sent = False
for i in range(attempts):
try:
port += i
sock.bind((ip, port))
sock.sendto(b"hdiq-discovery-request-py", (multicast_ip, port))
sent = True
except OSError as e:
if e.errno == errno.EADDRINUSE and i < attempts:
print(f"Socket Error {errno.EADDRINUSE}: socket in use")
continue
break
else:
break
return sent, port | zhinst-hdiq | /zhinst_hdiq-1.0.2-py3-none-any.whl/zhinst/hdiq/utils.py | utils.py |
import socket
import time
from zhinst.hdiq import utils
class Hdiq:
"""
High-level driver for the Zurich Instruments HDIQ
Attributes:
ip (str): IP of the HDIQ device
port (int, optional): Port used for the HDIQ connection.
Defaults to `4242`
timeout (int, optional): Timeout(s). Defaults to `60`
"""
_TTL = 60
def __init__(self, ip: str, port: int = 4242, timeout: int = 60):
self.port = port
self.ip_with_port = (ip, port)
self.local_ip = utils._get_local_ip(ip, port)
self.local_ip_with_port = (self.local_ip, self.port)
self.timeout = timeout
def set_lo_to_exp(self, channel: int) -> bool:
"""
Route the LO signal to the Exp output
Args:
channel (int): Channel number `1-8` supported
Returns:
bool: True if `ACK` is received
"""
command = f"setLOtoExp{channel}"
return bool(self._send_command_loop(command))
def set_rf_to_exp(self, channel: int) -> bool:
"""
Route the upconverted RF signal to the Exp port
Args:
channel (int): Channel number `1-8` supported
Returns:
bool: True if `ACK` is received
"""
command = f"setRFtoExp{channel}"
return bool(self._send_command_loop(command))
def set_rf_to_calib(self, channel: int) -> bool:
"""
Route the upconverted RF signal to the Calib port
Args:
channel (int): Channel number `1-8` supported
Returns:
bool: True if `ACK` is received
"""
command = f"setRFtoCalib{channel}"
return bool(self._send_command_loop(command))
def get_channel_status(self, channel: int) -> str:
"""
Returns the channel status `1-3`
1: RF output to Exp port
2: RF output to Calib port
3: LO input to Exp port
Default is `1`
Args:
channel (int): Channel number `1-8` supported
Returns:
str: Channel status `1-3`
"""
request = f"getChannelStatus{channel}"
return self._send_request(request)
def _send_command_loop(self, command: str, is_request: bool = False):
"""
Try to send the command until success or timeout
Args:
command (str): Corresponding command
is_request (bool, optional): Defaults to False
Returns:
str: Returns the result of _send_command()
"""
start_time = end_time = time.time()
sent = False
while not sent and end_time - start_time < self.timeout:
sent = self._send_command(command, is_request)
end_time = time.time()
return sent
def _send_request(self, request: str) -> str:
"""
Requests a message other than `ACK` from the HDIQ.
Args:
request (str): The request command
Returns:
str: Returns the corresponsing status
"""
reply = self._send_command_loop(request, is_request=True)
if reply is None:
return ""
return str(reply)
def _send_command(self, command: str, is_request: bool = False):
"""
Send the command to HDIQ at port `4242`.
Parse response from HDIQ.
Args:
command (str):
is_request (bool, optional): Defaults to False
Returns:
(str, bool): Returns str if there was a request,
otherwise bool
"""
with socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
) as sock:
sock.bind(self.local_ip_with_port)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, self._TTL)
sock.sendto(str.encode(command), self.ip_with_port)
ack = False
result = None
sock.settimeout(self.timeout)
try:
data, _ = sock.recvfrom(self.port)
if "ack" in data.decode("utf-8").lower():
ack = True
if is_request:
data, _ = sock.recvfrom(self.port)
result = data.decode("utf-8")
except socket.timeout:
pass
return result if is_request else ack | zhinst-hdiq | /zhinst_hdiq-1.0.2-py3-none-any.whl/zhinst/hdiq/hdiq.py | hdiq.py |
import typing as t
from enum import Enum
import numpy as np
from BaseDriver import LabberDriver
from zhinst.utils import convert_awg_waveform, parse_awg_waveform
Quantity = t.TypeVar("Quantity")
class Sources(Enum):
"""Signal Sources"""
NONE = 0
WAVES = 1
INTERLEAVED = 2
COMPLEX = 3
class Driver(LabberDriver):
"""This class implements a multi-qubit pulse generator."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._source = Sources.NONE
# def performOpen(self, options={}):
# """Perform the operation of opening the instrument connection."""
# wave1 = np.array(0.5*np.ones(1008))
# wave2 = np.array(-0.6*np.ones(1008))
# self.setValue("Wave 1 - Signal", wave1)
# self.sendValueToOther("Wave 2 - Signal", wave2)
# test = self.getValue("Interleaved - Signal")
# print(test)
def performSetValue(self, quant: Quantity, value: t.Any, **_):
"""Perform the Set Value instrument operation."""
if quant.name == "Interleaved - Signal":
self._source = Sources.INTERLEAVED
return self._update_from_interleaved(quant, value)
if self._source == Sources.INTERLEAVED and quant.name in [
"Interleaved - Num - Channels",
"Interleaved - Marker - Present",
]:
return self._update_from_interleaved(quant, value)
if quant.name == "Complex - Signal":
self._source = Sources.COMPLEX
return self._update_from_complex(quant, value)
if quant.name in ["Wave 1 - Signal", "Wave 2 - Signal", "Marker - Signal"]:
self._source = Sources.WAVES
return self._update_from_waves(quant, value)
return None
def _update_from_interleaved(self, quant: Quantity, value: t.Any) -> None:
"""Update quants based on the interleaved waveform.
Args:
quant: Changed Quant
value: New Value of quant
"""
# get Values
raw_wave = self._get_array("Interleaved - Signal", quant, value)
channels = self._get_value("Interleaved - Num - Channels", quant, value)
markers_present = self._get_value(
"Interleaved - Marker - Present", quant, value
)
if raw_wave is None:
return
wave1, wave2, markers = parse_awg_waveform(
raw_wave, channels=channels, markers_present=markers_present
)
complex_wave = np.empty(wave1.shape, dtype=np.complex128)
complex_wave.real = wave1
if wave2 is not None:
complex_wave.imag = wave2
self.setValue("Wave 1 - Signal", wave1)
self.setValue("Wave 2 - Signal", wave2)
self.setValue("Marker - Signal", markers)
self.setValue("Complex - Signal", complex_wave)
def _update_from_complex(self, quant, value):
"""Update quants based on the complex waveform.
Args:
quant: Changed Quant
value: New Value of quant
"""
complex_wave = self._get_array("Complex - Signal", quant, value)
wave1 = complex_wave.real
wave2 = complex_wave.imag
markers = self.getValue("Marker - Signal")
interleaved_waveform = convert_awg_waveform(
wave1,
wave2=wave2,
markers=markers,
)
self.setValue("Wave 1 - Signal", wave1)
self.setValue("Wave 2 - Signal", wave2)
self.setValue("Interleaved - Signal", interleaved_waveform)
def _update_from_waves(self, quant, value):
"""Update quants based on wave1 wave2 and marker.
Args:
quant: Changed Quant
value: New Value of quant
"""
wave1 = self._get_array("Wave 1 - Signal", quant, value)
wave2 = self._get_array("Wave 2 - Signal", quant, value)
markers = self._get_array("Marker - Signal", quant, value)
if wave1 is not None:
interleaved_waveform = convert_awg_waveform(
wave1,
wave2=wave2,
markers=markers,
)
self.setValue("Interleaved - Signal", interleaved_waveform)
complex_wave = np.empty(wave1.shape, dtype=np.complex128)
complex_wave.real = wave1
if wave2 is not None:
complex_wave.imag = wave2
self.setValue("Complex - Signal", complex_wave)
def _get_value(
self, quant_name: str, set_quant: Quantity, set_value: t.Any
) -> t.Any:
"""Get value for quant
If the quant it not the current one it will be fetched.
Args:
quant_name: Name of the target quant.
set_quant: Quant that is currently set.
set_value: Value of the quant that is currently set.
Returns:
Value for the target quant
"""
return set_value if set_quant.name == quant_name else self.getValue(quant_name)
def _get_array(
self, quant_name: str, set_quant: Quantity, set_value: t.Any
) -> t.Any:
"""Get array for quant
If the quant it not the current one it will be fetched.
Args:
quant_name: Name of the target quant.
set_quant: Quant that is currently set.
set_value: Value of the quant that is currently set.
Returns:
array for the target quant
"""
labber_value = self._get_value(quant_name, set_quant, set_value)
return labber_value["y"] if labber_value["y"].size > 0 else None | zhinst-labber | /zhinst_labber-0.3.1-py3-none-any.whl/zhinst/labber/static_drivers/Zurich_Instruments_Waveform_Processor/Zurich_Instruments_Waveform_Processor.py | Zurich_Instruments_Waveform_Processor.py |
import re
import typing as t
from zhinst.labber.generator import helpers
class Quant:
"""Quant representation of a node-like path.
Args:
quant: Quant node-like path.
defs: Quant definitions in Labber format.
"""
def __init__(self, quant: str, defs: t.Dict[str, str]):
self._quant = quant.strip("/")
self._quant_parts = self._quant.split("/")
self._defs = defs
@property
def suffix(self) -> str:
"""Suffix for the quant."""
return self._defs.get("suffix", "").lower()
@property
def title(self) -> str:
"""Quant title."""
return self._quant
@property
def label(self) -> str:
"""Quant label."""
if self._quant_parts[-1].isnumeric():
return "/".join(self._quant_parts[-2:])
return self._quant_parts[-1]
@property
def group(self) -> str:
"""Quant group."""
path = [x for x in self._quant_parts if not x.isnumeric()]
if len([x for x in self._quant_parts if x.isnumeric()]) > 1:
idx_ = 0
for idx, c in enumerate(self._quant_parts):
if c.isnumeric():
idx_ = idx
return "/".join(path[: idx_ - 1])
if len(path) > 1:
return "/".join(path[:-1])
return "/".join(path)
@property
def set_cmd(self) -> str:
"""Quant set command."""
return self.title
@property
def get_cmd(self) -> str:
"""Quant get command."""
return self.title
@property
def section(self) -> str:
"""Quant section."""
digit = re.search(r"\d", self._quant)
if digit is not None:
digit = digit.group(0)
return "".join(list(self._quant)[: self._quant.find(digit) + 1])
else:
return self.title.split("/")[0]
def as_dict(self) -> t.Dict[str, t.Dict[str, str]]:
"""Quant as a Python dictionary.
Returns:
Quant in a Python dictionary format.
"""
defs = self._defs.copy()
defs.pop("suffix", None)
if self.suffix:
label = self.title + "/" + self.suffix
else:
label = self.title
res = {
"label": label,
"group": self.group,
"section": self.section,
"set_cmd": self.set_cmd,
"get_cmd": self.get_cmd,
"permission": "WRITE",
}
res.update(defs)
return {label: res}
class NodeQuant:
"""Zurich instruments node information as a Labber quant.
Node information is transformed into a Labber suitable format.
Args:
node_info: Node information.
Example format:
{
"Node": "/DEV1234/QACHANNELS/1/GENERATOR/AUXTRIGGERS/0/CHANNEL",
"Description": "Selects the source of the digital Trigger.",
"Properties": "Read, Write, Setting",
"Type": "Integer (enumerated)",
"Unit": "None",
"Options": {}
}
"""
def __init__(self, node_info: t.Dict):
self._validate_node_info(node_info)
self._node_info = node_info
self._node_info.setdefault("Options", {})
self._node_path = helpers.delete_device_from_node_path(
node_info["Node"].upper()
)
self._node_path_no_prefix = self._node_path.strip("/")
self._path_parts = self._node_path_no_prefix.split("/")
self._properties = node_info.get("Properties", "").lower()
def _validate_node_info(self, node_info: t.Dict) -> None:
"""Validate Node info.
Generally nodes that requires polling are ignored due to limited
Labber functionality. This can change in the future.
Args:
node_info: Node info in LabOne format.
Raises:
ValueError: Value(s) are not supported.
"""
not_allowed_types = [
"ZIPWAWave",
"ZITriggerSample",
"ZICntSample",
"ZIScopeWave",
"ZIAuxInSample",
"ZIImpedanceSample",
]
if node_info.get("Type", None) in not_allowed_types:
raise ValueError(f"Node type {node_info.get('Type', None)} not allowed.")
@staticmethod
def _enum_description(value: str) -> t.Tuple[str, str]:
"""Split enum description into tuple
Args:
value: Node enum description.
Returns:
Enum description split into a tuple."""
v = value.split(": ")
if len(v) > 1:
v2 = v[0].split(",")
return v2[0].strip('"'), v[-1]
return "", v[0]
@property
def filtered_node_path(self) -> str:
"""Filtered node path without device prefix."""
return self._node_path
@property
def permission(self) -> str:
"""Quant permission."""
if "read" in self._properties and "write" in self._properties:
return "BOTH"
if "read" in self._properties:
return "READ"
if "write" in self._properties:
return "WRITE"
return "NONE"
@property
def show_in_measurement_dlg(self) -> t.Optional[str]:
"""Show in measurement dialog."""
if self.datatype in ["VECTOR", "COMPLEX", "VECTOR_COMPLEX"]:
if "result" in self.label.lower() or "wave" in self.label.lower():
return "True"
@property
def section(self) -> str:
"""Quant section."""
for idx, x in enumerate(self._path_parts, 1):
if idx == 3:
break
if x.isnumeric():
return "/".join(self._path_parts[0:idx])
return self._path_parts[0]
@property
def group(self) -> str:
"""Quant group.
Node path indexes are removed from the group representation.
"""
path = [x for x in self._path_parts if not x.isnumeric()]
if len([x for x in self._path_parts if x.isnumeric()]) > 1:
idx_ = 0
for idx, c in enumerate(self._path_parts):
if c.isnumeric():
idx_ = idx
return "/".join(path[: idx_ - 1])
if len(path) > 1:
return "/".join(path[:-1])
return "/".join(path)
@property
def label(self) -> str:
"""Quant label.
Label is a node path without DEV-prefix."""
return self._node_path_no_prefix
@property
def combo_defs(self) -> t.Dict[str, str]:
"""Labber combo definitions.
Turns enumerated options into a Labber combo definitions.
No combo definitions are generated if the node is READ-only.
Returns:
Labber combo definitions.
Format:
{
"cmd_def_1": 1,
"combo_def_1": 1,
"cmd_def_n": 1,
"combo_def_n": 1
}
"""
if "enumerated" in self._node_info["Type"].lower():
if self.permission == "READ":
return {}
defs = {}
for idx, (k, v) in enumerate(self._node_info["Options"].items(), 1):
value, _ = self._enum_description(v)
defs[f"cmd_def_{idx}"] = str(k)
defs[f"combo_def_{idx}"] = value if value else str(k)
return defs
@property
def tooltip(self) -> str:
"""Node tooltip as HTML body.
Options are converted into HTML lists and node is bolded.
For COMBO and READ-only quants, a bolded text to highlight READ-ONLY
is used.
"""
items = []
description = self._node_info["Description"]
for k, v in self._node_info["Options"].items():
value, desc = self._enum_description(v)
if self.permission == "READ":
items.append(f"{k}: {desc}")
else:
items.append(f"{value if value else k}: {desc}")
if self.permission == "READ" and self.datatype in ["STRING", "COMBO"]:
description = "<p><b>READ-ONLY!</p></b>" + description
return helpers.tooltip(
description,
enum=items,
node=self._node_path_no_prefix.upper(),
)
@property
def unit(self) -> t.Optional[str]:
"""Node unit to Labber units.
Special characters are ignored or replaced in the string representation.
"""
# HF2 does not have Unit.
unit = self._node_info.get("Unit", None)
if not unit:
return None
if unit.lower() in ["none", "dependent", "many", "boolean"]:
return None
unit = self._node_info["Unit"].replace("%", " percent").replace("'", "")
# Remove degree signs etc.
return unit.encode("ascii", "ignore").decode()
@property
def datatype(self) -> str:
"""Node datatype to Labber datatypes."""
unit = self._node_info.get("Type", "").lower()
if not unit:
return ""
if "enumerated" in unit:
return "COMBO"
boolean_nodes = [
"enable",
"single",
"on",
"busy",
"ready",
"reset",
"preampenable",
"locked",
"keepalive",
"forcetrigger",
"triggered",
"endless",
"preview",
"findlevel",
"clearwave",
"clearweight",
"force",
"trigforce",
]
if self._path_parts[-1].lower() in boolean_nodes:
return "BOOLEAN"
string_nodes = ["alias", "serial", "devtype", "fwrevision", "revision"]
if self._path_parts[-1].lower() in string_nodes:
return "STRING"
if unit == "double" or "integer" in unit:
return "DOUBLE"
if unit == "string":
return "STRING"
if unit in ["zivectordata", "ziadvisorwave"]:
return "VECTOR"
if unit in ["zidemodsample", "zidiosample", "complex double"]:
return "COMPLEX"
if unit in ["complex"]:
return "VECTOR_COMPLEX"
return "STRING"
@property
def set_cmd(self) -> t.Optional[str]:
"""Set command for the node if the node is writable."""
if "write" in self._properties:
return self._node_path_no_prefix
@property
def get_cmd(self) -> t.Optional[str]:
"""Get command for the node if the node is readable."""
if "read" in self._properties:
return self._node_path_no_prefix
@property
def title(self) -> str:
"""Title of the quant."""
return self.label
def as_dict(self) -> t.Dict[str, t.Dict]:
"""Python dictionary representation of the node quant.
Due to some problems with Labber, some modification for READ-only nodes are
needed:
If datatype is COMBO and permission is READ:
- COMBO -> Datatype DOUBLE (enumerated number)
if datatype is COMBO or STRING and permission is READ:
- Permission is removed from quant and a tooltip text to highlight
that this is READ-only.
Returns:
Dictionary where the keys and values are in a Labber format.
"""
d = {}
d["section"] = self.section.lower()
d["group"] = self.group.lower()
d["label"] = self.label.lower()
if self.datatype:
if self.permission == "READ" and self.datatype == "COMBO":
d["datatype"] = "DOUBLE"
else:
d["datatype"] = self.datatype
if self.unit:
d["unit"] = self.unit
d["tooltip"] = self.tooltip
d.update(self.combo_defs)
if not (self.permission == "READ" and self.datatype in ["COMBO", "STRING"]):
d["permission"] = self.permission
if self.set_cmd:
d["set_cmd"] = self.set_cmd
if self.get_cmd:
d["get_cmd"] = self.get_cmd
if self.show_in_measurement_dlg:
d["show_in_measurement_dlg"] = self.show_in_measurement_dlg
if self.datatype in ["VECTOR", "VECTOR_COMPLEX"]:
d["x_name"] = "Length"
d["x_unit"] = "Sample"
return {self.filtered_node_path.lower(): d}
class QuantGenerator:
"""Quant generator.
Args:
quants: List of quants in node-like format.
"""
def __init__(self, quants: t.List[str]) -> None:
self.quants = list(map(helpers.delete_device_from_node_path, quants))
@staticmethod
def find_nth_occurrence(s: str, target: str, n: int) -> int:
"""Find nth occurrence of the target in a string.
Args:
s: String
target: Target string to find from s
n: Nth occurrence of target in s
Returns:
Index of the nth occurrence in the string. -1 if not found."""
if s.count(target) < n + 1:
return -1
return s.find(target, s.find(target) + n)
@staticmethod
def path_from_indexes(
quant_original: str,
quant: str,
i: int,
indexes: t.List[int],
quants: t.List[str],
) -> t.List[str]:
"""Recursively generate quant path from given indexes.
Args:
quant_original: Original quant
quant: Traveled quant
i: Index of the wildcard *
indexes: Number of indexes added to wildcard
quants: List of quant paths
Returns:
List of generated quant paths.
"""
for x in range(indexes[i]):
idx = QuantGenerator.find_nth_occurrence(quant_original, "*", i)
q_list = list(quant)
if idx != -1:
q_list[idx] = str(x)
try:
quants = QuantGenerator.path_from_indexes(
quant_original, "".join(q_list), i + 1, indexes, quants
)
except IndexError:
quants.append("".join(q_list))
return quants
@staticmethod
def _to_regex(s: str) -> str:
"""Quant to regex.
Wildcard `*` is replaced with any numbers and case is ignored.
Args:
s: String to be transformed into regex.
Returns:
Regex of the input string."""
s = s.replace("/", r"\/")
s = s.replace("*", r"[0-9]+")
return "(?i)" + s
def quant_paths(
self, quant: str, indexes: t.List[t.Union[str, int]]
) -> t.List[str]:
"""Quant paths for all given indexes.
Args:
quant: Quant node-like path.
indexes: Indexes for wildcards (*). 'dev' | int
'dev' = Indexes from device.
int = The amount of indexes to be added.
Returns:
Quant paths with given indexes.
"""
wc_count = quant.count("*")
if wc_count == 0:
return [quant]
if not indexes:
indexes = ["dev" for _ in range(wc_count)]
if wc_count > len(indexes):
diff = wc_count - len(indexes)
indexes += ["dev" for _ in range(diff)]
idxs = []
for enum, idx in enumerate(indexes):
# Get the number of indexes from device.
if idx == "dev":
paths = set()
idx_pos = self.find_nth_occurrence(quant, "*", enum) + 1
p = re.compile(self._to_regex(quant[:idx_pos]))
for path in list(filter(p.match, self.quants)):
paths.add(re.findall(r"[0-9]+", path)[enum])
idxs.append(len(paths))
else:
# Add any number of indexes.
idxs.append(idx)
return self.path_from_indexes(quant, quant, 0, idxs, []) | zhinst-labber | /zhinst_labber-0.3.1-py3-none-any.whl/zhinst/labber/generator/quants.py | quants.py |
import fnmatch
import re
import typing as t
def _replace_characters(s: str) -> str:
"""Replace characters that are not suitable to present in Labber.
Returns:
Labber compatible string.
"""
if not s:
return ""
chars = [
("\n", " "),
("\r", ""),
('"', "`"),
("'", "`"),
(";", ":"),
("%", " percent"),
]
for c in chars:
s = s.replace(c[0], c[1])
return s
def _to_html_list(x: t.List[str]) -> str:
"""Convert list items to an HTML list.
Returns:
HTML list.
"""
html_list = "<ul>"
for item in x:
item_cleaned = _replace_characters(item)
html_list += f"<li>{item_cleaned}</li>"
html_list += "</ul>"
return html_list
def tooltip(
desc: str, node: t.Optional[str] = None, enum: t.Optional[t.List[str]] = None
) -> str:
"""Convert tooltip arguments to HTML body.
Args:
desc: Paragraph
node: Bolded text part.
enum: List of strings to be converted to an HTML list.
Returns:
HTML body with given arguments
"""
if desc.startswith("<html>"):
return desc
desc_cleaned = _replace_characters(desc)
desc = f"<p>{desc_cleaned}</p>"
enum_ = f"<p>{_to_html_list(enum)}</p>" if enum else ""
node_path = f"<p><b>{node.strip()}</b></p>" if node else ""
return "<html><body>" + desc + enum_ + node_path + "</body></html>"
def delete_device_from_node_path(path: str) -> str:
"""Delete device prefix from path.
Returns:
Path where 'DEVXXXX' prefix is subtracted.
"""
return re.sub(r"/DEV(\d+)", "", path.upper())[0:]
def match_in_dict_keys(target: str, data: dict) -> t.Tuple[str, t.Any]:
"""Find matches for target in data keys.
Returns:
Key, value pair of the data if the target matches a key in data.
Otherwise empty string and None
"""
for k, v in data.items():
if fnmatch.fnmatch(target.strip("/").lower(), f"{k.strip('/').lower()}*"):
return k, v
return "", None
def match_in_list(target: str, data: t.List[str]) -> str:
"""Find matches for target in given items.
Returns:
Item of the data where the target matches.
Otherwise empty string.
"""
for item in data:
if fnmatch.fnmatch(target.strip("/").lower(), f"{item.strip('/').lower()}*"):
return item
return "" | zhinst-labber | /zhinst_labber-0.3.1-py3-none-any.whl/zhinst/labber/generator/helpers.py | helpers.py |
import configparser
import fnmatch
import json
import typing as t
from collections import OrderedDict
from pathlib import Path
from distutils.dir_util import copy_tree
import natsort
from zhinst.toolkit import Session
from zhinst.toolkit.nodetree import Node
from zhinst.labber import __version__
from zhinst.labber.code_generator.drivers import generate_labber_device_driver_code
from zhinst.labber.generator.conf import LabberConfiguration
from zhinst.labber.generator.helpers import (
delete_device_from_node_path,
match_in_dict_keys,
match_in_list,
)
from zhinst.labber.generator.quants import NodeQuant, Quant, QuantGenerator
class LabberConfig:
"""Base class for generating Labber configuration.
The class generates necessary Labber driver data
It also converts available nodes into Labber driver configuration and
modifies them based on given settings file.
Args:
root: Zurich Instruments toolkit root node
name: Name of the root object
env_settings: Existing Labber settings
mode: Labber mode. `NORMAL` | `ADVANCED`
"""
def __init__(self, root: Node, name: str, env_settings: dict, mode="NORMAL"):
self._root = root
self._env_settings = LabberConfiguration(name, mode, env_settings)
self._quant_gen = QuantGenerator(list(root._root.raw_dict.keys()))
self._tk_name = name
self._name = name
self._general_settings = {}
self._settings = {}
def _update_section(self, quant: str, defs: t.Dict) -> t.Dict:
"""Update quant section.
Returns:
Defs with updated section from `env_settings`."""
_, section = match_in_dict_keys(quant, self.env_settings.quant_sections)
if section:
defs["section"] = section
return defs
def _update_group(self, quant: str, defs: t.Dict) -> t.Dict:
"""Update quant group.
if a match is found, `<n>` updated with corresponding
quant index.
Example:
quant: "sines/0"
group def: "/sines/<n>/*": "Sines <n>"
group = Sines 0
Returns:
Defs with updated group key from `env_settings`.
"""
indexes = [part for part in quant.split("/") if part.isnumeric()]
for pattern, group in self.env_settings.quant_groups.copy().items():
pattern = pattern.replace("<n>", "*")
r = fnmatch.fnmatch(
quant.strip("/").lower(), f"{pattern.strip('/').lower()}*"
)
if r:
cnt = group.count("<n>")
path = group.replace("<n>", "{}")
defs["group"] = path.format(*[indexes[idx] for idx in range(cnt)])
break
return defs
def _generate_node_quants(self) -> t.Dict:
"""Generate node quants from available nodes.
Returns:
Dictionary of available nodes in Labber format.
"""
quants = {}
for info in self._root._root.raw_dict.values():
if match_in_list(
delete_device_from_node_path(info["Node"]),
self.env_settings.ignored_nodes,
):
continue
try:
sec = NodeQuant(info)
except ValueError:
continue
quants.update(sec.as_dict())
return quants
def _generate_quants(self) -> t.Dict[str, dict]:
"""Generate Labber quants from available nodes and settings file.
Returns:
Generated Labber quants which consists of existing and added nodes from
`env_settings`
"""
nodes = self._generate_node_quants()
# Added nodes from configuration if the node exists but is not available
custom_quants = self.env_settings.quants.copy()
for node_quant, node_defs in nodes.copy().items():
settings_quant, settings_defs = match_in_dict_keys(
node_quant, self.env_settings.quants
)
if settings_quant:
[
nodes[node_quant].pop(node_defs, None)
for conf in settings_defs["conf"].values()
if not conf
]
node_defs.update(settings_defs["conf"])
nodes[node_quant] = node_defs
# If the quant is extended
if settings_defs.get("extend", None):
for path in self._quant_gen.quant_paths(
settings_quant, node_defs.get("indexes", [])
):
conf = settings_defs["extend"]
conf = self._update_group(path, conf)
conf = self._update_section(path, conf)
nodes.update(Quant(path, conf).as_dict())
custom_quants.pop(settings_quant, None)
nodes[node_quant] = self._update_group(node_quant, node_defs)
nodes[node_quant] = self._update_section(node_quant, node_defs)
# Manually added quants from configuration
for custom_quant, custom_defs in custom_quants.items():
if custom_defs.get("add", False):
for path in self._quant_gen.quant_paths(
custom_quant, custom_defs.get("indexes", [])
):
conf = custom_defs["conf"]
conf = self._update_group(path, conf)
conf = self._update_section(path, conf)
nodes.update(Quant(path, self._update_group(path, conf)).as_dict())
return nodes
def generated_code(self) -> str:
"""Generated labber code
Returns:
Labber driver code for the current object.
"""
return generate_labber_device_driver_code(self._name, self.settings_filename)
def config(self) -> t.Dict[str, t.Dict]:
"""Labber configuration as a Python dictionary.
Returns:
Generated Labber quants which consists of existing and added nodes from
`env_settings` and general settings."""
general = self.general_settings
nodes = self._generate_quants()
general.update(nodes)
return general
@property
def env_settings(self) -> LabberConfiguration:
"""Labber environment settings."""
return self._env_settings
@property
def settings_filename(self) -> str:
"""Settings filename."""
return "settings.json"
@property
def name(self) -> str:
"""Name of the config driver."""
return "Zurich_Instruments_" + self._name
@property
def general_settings(self) -> t.Dict:
"""General settings section for Labber."""
self._general_settings.update(self.env_settings.general_settings)
return {"General settings": self._general_settings}
@property
def settings(self) -> t.Dict:
"""Driver settings."""
return self._settings
class DeviceConfig(LabberConfig):
"""Class for generating Labber configuration for Zurich Instruments
devices.
The class generates necessary Labber driver data
It also converts available nodes into Labber driver configuration and
modifies them based on given settings file.
Args:
device: Zurich Instruments toolkit device node.
session: Existing DataServer session
env_settings: Existing Labber settings
mode: Labber mode. `NORMAL` | `ADVANCED`
"""
def __init__(self, device: Node, session: Session, env_settings: dict, mode: str):
self._tk_name = device.device_type.upper()
super().__init__(device, self._tk_name, env_settings, mode)
options = str(device.features.options()).replace("\n", "_")
self._name = f"{self._tk_name}_{options}" if options else self._tk_name
self.session = session
self.device = device
self._settings = {
"data_server": {
"host": self.session.server_host,
"port": self.session.server_port,
"hf2": self.session.is_hf2_server,
"shared_session": True,
},
"instrument": {"base_type": "device", "type": self._name},
"vector_quantity_value_map_array_keys": ["y"],
}
version = f"{session.about.version()}#{__version__}#{self.env_settings.version}"
self._general_settings = {
"name": f"Zurich Instruments {self._name}",
"version": version,
"driver_path": f"Zurich_Instruments_{self._name}",
}
class DataServerConfig(LabberConfig):
"""Class for generating Labber configuration for Zurich Instruments
DataServer.
The class generates necessary Labber driver data
It also converts available nodes into Labber driver configuration and
modifies them based on given settings file.
Args:
session: Existing DataServer session
env_settings: Existing Labber settings
mode: Labber mode. `NORMAL` | `ADVANCED`
"""
def __init__(self, session: Session, env_settings: dict, mode: str):
self._tk_name = "DataServer"
super().__init__(session, self._tk_name, env_settings, mode)
self.session = session
self._name = "DataServer"
self._settings = {
"data_server": {
"hf2": self.session.is_hf2_server,
"shared_session": True,
},
"instrument": {
"base_type": "DataServer",
},
}
version = f"{session.about.version()}#{__version__}#{self.env_settings.version}"
self._general_settings = {
"name": f"Zurich Instruments {self._name}",
"version": version,
"driver_path": f"Zurich_Instruments_{self._name}",
}
class ModuleConfig(LabberConfig):
"""Class for generating Labber configuration for Zurich Instruments
modules.
The class generates necessary Labber driver data
It also converts available nodes into Labber driver configuration and
modifies them based on given settings file.
Args:
name: Name of the toolkit module
session: Existing DataServer session
env_settings: Existing Labber settings
mode: Labber mode. `NORMAL` | `ADVANCED`
"""
def __init__(self, name: str, session: Session, env_settings: dict, mode: str):
self.module = getattr(session.modules, name)
self._tk_name = name
super().__init__(self.module, self._tk_name, env_settings, mode)
self.session = session
self._name = name.upper() + "_Module"
self._settings = {
"data_server": {
"host": self.session.server_host,
"port": self.session.server_port,
"hf2": self.session.is_hf2_server,
"shared_session": True,
},
"instrument": {"base_type": "module", "type": self._tk_name},
}
version = f"{session.about.version()}#{__version__}#{self.env_settings.version}"
self._general_settings = {
"name": f"Zurich Instruments {self._name}",
"version": version,
"driver_path": f"Zurich_Instruments_{self._name}",
}
def _path_to_labber_section(path: str, delim: str) -> str:
"""Path to Labber format. Delete slashes from start and end.
Returns:
Formatted path in Labber format with given delimited."""
return path.strip("/").replace("/", delim)
def order_labber_config(
data: OrderedDict, order: t.Dict[str, t.List[str]]
) -> OrderedDict:
data = data.copy()
data_ = OrderedDict()
if not "sections" in order:
return data
while data:
for section in order["sections"]:
r = {
k: v
for k, v in data.items()
if v.get("section", "").lower() == section.lower()
}
data_.update(r)
[data.pop(k) for k in r.keys()]
data_.update(data)
data.clear()
return data_
def conf_to_labber_format(
data: dict, delim: str, order: t.Dict[str, t.List[str]]
) -> dict:
"""Transform data into Labber format.
* Natural sort dictionary keys
* Replace slashes with delimiter
* Title sections
Returns:
Formatted data
"""
def _to_title_keep_uppercase(s: str) -> str:
if s.islower():
return s.title()
return s
sorted_keys = natsort.natsorted(list(data.keys()))
data = OrderedDict({k: data[k] for k in sorted_keys}.items())
data = order_labber_config(data, order)
for title, quant in data.copy().items():
title_ = str(title)
if not title == "General settings":
title_ = _to_title_keep_uppercase(title_)
title_ = _path_to_labber_section(title_, delim)
data.pop(title, None)
data[title_] = {}
for key, value in quant.items():
if key.lower() == "permission":
continue
if key not in ["set_cmd", "get_cmd", "tooltip", "datatype"]:
key = _path_to_labber_section(str(key), delim)
value = _path_to_labber_section(str(value), delim)
if key.lower() in ["label", "group", "section"]:
key = _to_title_keep_uppercase(key)
value = _to_title_keep_uppercase(value)
data[title_].update({key: value})
return data
def dict_to_config(
config: configparser.ConfigParser,
data: dict,
delim: str,
order: t.Dict[str, t.List[str]],
) -> None:
"""Update config with give data.
The data will be formatted and then set as config sections.
"""
data = conf_to_labber_format(data, delim, order)
for title, items in data.items():
config.add_section(title)
for name, value in items.items():
config.set(title, name, value)
class Filehandler:
"""FileHandler class for generating Labber configuration files.
Args:
config: Labber configuration class
root_dir: Root directory where the files are generated
upgrade: If the existing files should be overwritten
"""
def __init__(self, config: LabberConfig, root_dir: str, upgrade=False):
self._config = config
self._root_dir = Path(root_dir) / config.name
self._root_dir.mkdir(exist_ok=True)
self._upgrade = upgrade
self._created_files = []
self._upgraded_files = []
def write_to_file(self, path: Path, filehandler: t.Callable) -> None:
"""Write to file.
Args:
path: Filepath
filehandler: Handler to be called for saving the file
"""
if self._upgrade:
if not path.exists():
self._created_files.append(path)
else:
self._upgraded_files.append(path)
with open(path, "w", encoding="utf-8") as file:
filehandler(file)
else:
if not path.exists():
with open(path, "w", encoding="utf-8") as file:
filehandler(file)
self._created_files.append(path)
def write_settings_file(self) -> None:
"""Write settings file (.*json-format)."""
path = self._root_dir / self._config.settings_filename
self.write_to_file(
path, lambda x: json.dump(self._config.settings, x, indent=2)
)
def write_config_file(self, delim: str) -> None:
"""Write configuration file (*.ini-format)."""
path = self._root_dir / f"{self._config.name}.ini"
config = configparser.ConfigParser()
dict_to_config(
config,
self._config.config(),
delim=delim,
order=self._config.env_settings.quant_order,
)
self.write_to_file(path, lambda x: config.write(x))
def write_python_driver(self) -> None:
"""Write Python driver file (*.py-format)."""
path = self._root_dir / f"{self._config.name}.py"
self.write_to_file(path, lambda x: x.write(self._config.generated_code()))
@property
def upgraded_files(self) -> t.List[Path]:
"""List of upgraded files."""
return self._upgraded_files
@property
def created_files(self) -> t.List[Path]:
"""List of created files."""
return self._created_files
def open_settings_file() -> dict:
"""Open settings file.
Returns:
Contents of the opened settings file.
"""
settings_file = Path(__file__).parent.parent / "resources/settings.json"
with open(settings_file, "r") as json_f:
return json.load(json_f)
def generate_labber_files(
driver_directory: str,
mode: str,
device_id: str,
server_host: str,
upgrade: bool = False,
server_port: t.Optional[int] = None,
hf2: t.Optional[bool] = None,
):
"""Generate Labber files for the selected device.
Args:
driver_directory: Base directory for generated driver files.
mode: Driver mode. `NORMAL` | `ADVANCED`.
Normal has select amount of functionality available.
Advanced has most of the nodes available.
device_id: Zurich Instruments device ID. (e.g: dev1234)
server_host: DataServer host
upgrade: Overwrite existing drivers
server_port: DataServer port
hf2: If the device is HF2.
"""
session = Session(server_host=server_host, server_port=server_port, hf2=hf2)
dev = session.connect_device(device_id)
# Files generated to echo
generated_files = []
upgraded_files = []
# Settings file
json_settings = open_settings_file()
configs = [
DataServerConfig(session, json_settings, mode),
DeviceConfig(dev, session, json_settings, mode),
]
# Modules
# TODO: When hf2 option enabled:
# RuntimeError: Unsupported API level for specified server
if not hf2:
modules: t.List[str] = json_settings["misc"]["ziModules"].copy()
if "SHF" not in dev.device_type:
modules.remove("shfqa_sweeper")
else:
modules.remove("sweeper")
configs += [ModuleConfig(mod, session, json_settings, mode) for mod in modules]
for config in configs:
filegen = Filehandler(config, root_dir=driver_directory, upgrade=upgrade)
filegen.write_config_file(delim=json_settings["misc"]["labberDelimiter"])
filegen.write_python_driver()
filegen.write_settings_file()
generated_files += filegen.created_files
upgraded_files += filegen.upgraded_files
# static files
existing_files = [path for path in Path(driver_directory).rglob("*")]
static_files = copy_tree(
(Path(__file__).parents[1] / "static_drivers").absolute(), driver_directory
)
for static_file in static_files:
if Path(static_file) in existing_files:
upgraded_files.append(Path(static_file))
else:
generated_files.append(Path(static_file))
return generated_files, upgraded_files | zhinst-labber | /zhinst_labber-0.3.1-py3-none-any.whl/zhinst/labber/generator/generator.py | generator.py |
import csv
import fnmatch
import json
import logging
import os
import re
import string
import typing as t
from itertools import repeat
from pathlib import Path
import numpy as np
from BaseDriver import LabberDriver
from InstrumentDriver_Interface import Interface
from zhinst.toolkit import Session, Waveforms
from zhinst.toolkit.driver.devices import DeviceType
from zhinst.toolkit.driver.modules import ModuleType
from zhinst.labber.driver.logger import configure_logger
from zhinst.labber.driver.snapshot_manager import SnapshotManager, TransactionManager
Quantity = t.TypeVar("Quantity")
NumpyArray = t.TypeVar("NumpyArray")
GLOBAL_SETTINGS = Path(__file__).parent / "../resources/settings.json"
created_sessions = {}
logger = logging.getLogger(__name__)
class BaseDevice(LabberDriver):
"""Generic Labber base driver for all drivers from Zurich Instruments.
The driver is based on zhinst-toolkit. It works for devices, LabOne modules
and a session.
It requires both local and global settings. The global settings are shared
with the generator script and contains information about special node
handling and function calls. The local settings are passed as an argument
and contain instrument/user specific information. The following fields are
supported/required:
* data_server (information about the data server session)
* host: Address of the data server. (default = "localhost")
* port: Port of the data server. (default = 8004)
* hf2: Flag if the data server is for hf2 device. (default = false)
* shared_session: Flag if the session should be shared with Labber.
Warning: If set to false some feature may no longer be supported.
(default = true)
* instrument: (Labber instrument specific information)
* base_type: Base type of the instrument. (device, module, session)
* type: Type of the module. Not used for session.
module => name of the module in toolkit
device => device type
},
* logger_level: Logging level of python logger. If not specified the global
settings are used.
* logger_path: Optional logger path where the logging information will be
stored (in addition to the std output which is always enabled).
The driver will accept all arguments and forward them to the
``LabberDriver`` directly.
Args:
settings: local settings
"""
def __init__(self, *args, settings=t.Dict, **kwargs):
super().__init__(*args, **kwargs)
self._session = None
self._instrument = None
self._transaction = None
self._snapshot = None
self._instrument_settings = settings
self._device_type = settings["instrument"].get("type", "")
instrument_type = settings["instrument"].get("base_type", "")
log_level = settings.get("logger_level", None)
# read information from global settings file
with GLOBAL_SETTINGS.open("r") as file:
node_info = json.loads(file.read())
self._node_info = node_info["common"].get("quants", {})
if self._device_type:
if instrument_type == "device":
dev_type = self._device_type.split("_")[0].rstrip(string.digits)
device_info = node_info.get(dev_type, {}).get("quants", {})
else:
device_info = node_info.get(self._device_type, {}).get("quants", {})
self._node_info = {**self._node_info, **device_info}
self._function_info = node_info.get("functions", {})
self._path_seperator = node_info["misc"]["labberDelimiter"]
# use global log level if no local one is defined
log_level = node_info["misc"]["LogLevel"] if not log_level else log_level
configure_logger(
logger, log_level, self._instrument_settings.get("logger_path", None)
)
logger.debug("PID: %d", os.getpid())
# Set up node to quant map
self._node_quant_map = {
self._quant_to_path(quant): quant for quant in self.dQuantities
}
def performOpen(self, options: t.Dict = {}) -> None:
"""Perform the operation of opening the instrument connection.
Args:
options: Additional information provided by Labber.
"""
self._session = self._get_session(
self._instrument_settings["data_server"],
self._instrument_settings["instrument"].get("base_type", "DataServer"),
)
self._instrument = self._create_instrument(
self._instrument_settings["instrument"]
)
self._snapshot = SnapshotManager(self._instrument.root)
self._transaction = TransactionManager(self._instrument, self)
def performSetValue(
self,
quant: Quantity,
value: t.Any,
sweepRate: float = 0.0,
options: t.Dict = {},
) -> t.Any:
"""Perform the Set Value instrument operation.
It is important that the code inspects the sweepRate parameter to see if
the user wants to set the value directly (sweepRate=0.0), or perform
sweeping (sweepRate>0.0). Note that in sweep mode (sweepRate>0.0), the
function should not wait for the sweep to finish, since the sweep
checking/waiting is handled by the Instrument Server. The sweepRate
parameter is defined in terms of change per second or change per
minute, as set by the sweep_minute configuration parameter
defined in the section above.
TODO special treatment for sweeping required?
Args:
quant: Quantity that should be set.
value: Value to be set.
sweepRate: Sweep rate. 0 if no sweep should be performed.
options: Additional information provided by Labber.
{
'operation':1
'quant':'QA Channel 1 - Trigger Level'
'value':0.501
'sweep_rate':0.0
'wait_for_sweep':True
'delay':693483.64
}
Returns:
Value that was set. (If None Labber will automatically use the input
value instead)
"""
# Start transaction if necessary
if "call_no" in options and not self._transaction.is_running():
self._transaction.start()
try:
node_info = self._get_node_info(quant.name)
if "call_no" in options and not node_info.get("transaction", True):
logger.info(
"%s: Transaction is not supported for this node. "
"Please set value manually.",
quant.name,
)
return value
if node_info.get("function", ""):
quant.setValue(False if node_info.get("trigger", False) else value)
function_path = node_info.get("function_path", ".")
function_path = self._quant_to_path(quant.name) / function_path
self.call_function(
node_info["function"],
function_path.resolve(),
)
return False if node_info.get("trigger", False) else value
if not quant.set_cmd:
logger.info("%s: is read only and will not be set.", quant.name)
return self.performGetValue(quant)
# Add device if necessary
if node_info.get("is_node_path", False) and "dev" not in value.lower():
value, _ = self._raw_path_to_zi_node(value)
value = self._set_value_toolkit(
quant, value, wait_for=node_info.get("wait_for", False)
)
return False if node_info.get("trigger", False) else value
# Stop transaction if necessary (should be ended regardless of any exceptions)
finally:
if self._transaction.is_running() and self.isFinalCall(options):
try:
self._transaction.end()
except Exception as error:
logger.error("Error during ending a transaction: %s", error)
def performGetValue(self, quant: Quantity, options: t.Dict = {}) -> t.Any:
"""Perform the Get Value instrument operation.
Args:
quant: Quantity that should be set.
options: Additional information provided by Labber.
{
'operation':2
'quant':'QA Channel 1 - Trigger Level'
'delay':693555.843
}
Returns:
New value of the quantity.
"""
node_info = self._get_node_info(quant.name)
# Get CFG => reset function values to default
if self.dOp["operation"] == Interface.GET_CFG and node_info.get("function", ""):
logger.info("%s: reset to default", quant.name)
return "" if quant.datatype in [quant.STRING, quant.PATH] else 0
# Call function. (No function execution during GET_CFG)
if node_info.get("function", ""):
function_path = node_info.get("function_path", ".")
function_path = self._quant_to_path(quant.name) / function_path
self.call_function(
node_info["function"],
function_path.resolve(),
)
# Get value from toolkit
elif quant.get_cmd:
get_cmd = (
quant.get_cmd[3:]
if quant.get_cmd.lower().startswith("zi/")
else quant.get_cmd
)
# use a snapshot for the GET_CFG command
if self.dOp["operation"] in [Interface.GET_CFG, Interface.SET_CFG]:
value = None
try:
value = self._parse_value(quant, self._snapshot.get_value(get_cmd))
except RuntimeError as error:
logger.debug("%s", error)
logger.info("%s: get %s", quant.name, value)
return value if value is not None else quant.getValue()
# clear snapshot if GET_CFG is finished
self._snapshot.clear()
try:
value = self._parse_value(
quant, self._instrument[get_cmd](parse=False, enum=False)
)
logger.info("%s: get %s", quant.name, value)
return value if value is not None else quant.getValue()
except Exception as error:
logger.error("%s", error)
return quant.getValue()
# def performClose(self, bError: bool = False, options: t.Dict = {}) -> None:
# """Perform the close instrument connection operation."""
# pass
# def initSetConfig(self) -> None:
# """Run before setting values in Set Config."""
# pass
# def performArm(self, quant_names: str, options: t.Dict = {}) -> None:
# """Perform the instrument arm operation"""
# pass
def _parse_value(self, quant: Quantity, value: t.Any) -> t.Any:
"""Parse the value received from toolkit for a node.
Args:
quant: Labber quantity
value: Received value
Returns:
parsed value
"""
if isinstance(value, dict):
if "x" in value and "y" in value:
return complex(value["x"], value["y"])
if "dio" in value:
return value["dio"][0]
logger.error("Unknown data received %s", value)
return None
# Labber handles enums as strings
if quant.cmd_def:
value = str(value)
return value
def _get_session(
self, data_server_info: t.Dict[str, t.Any], base_type: str
) -> Session:
"""Return a Session to the dataserver.
One single session to each data server is reused per default in Labber.
The "shared_session" option in the settings can disable this behavior.
Args:
data_server_info: settings info for the Data Server.
base_type: BaseType of the Labber Instruement.
Returns:
Valid toolkit Session object.
"""
target_host = data_server_info.get("host", "localhost")
target_hf2 = data_server_info.get("hf2", False)
target_port = data_server_info.get("port", 8005 if target_hf2 else 8004)
# Use the Instrument Address if the Instrument is a DataSever
if base_type == "DataServer":
raw_server = self.comCfg.getAddressString()
split_raw_server = raw_server.split(":")
target_host = split_raw_server[0]
if len(split_raw_server) > 1:
target_port = int(split_raw_server[1])
logger.info("Data Server Session %s:%s", target_host, target_port)
if data_server_info.get("shared_session", True):
for (host, port), session in created_sessions.items():
if target_host == host and target_port == port:
return session
new_session = Session(target_host, target_port, hf2=target_hf2)
created_sessions[(target_host, target_port)] = new_session
return new_session
def _create_instrument(
self, instrument_info: t.Dict[str, t.Any]
) -> t.Union[Session, DeviceType, ModuleType]:
"""Create a connection through toolkit to the Instrument.
Instrument in this case means a Labber instrument which can be a
ZI Device, LabOne module or data server sessions itself.
Args:
instrument_info: settings info for the Labber Instrument.
Returns:
toolkit object for the specified Instrument.
"""
base_type = instrument_info.get("base_type", "session")
if base_type in ["session", "DataServer"]:
logger.info("Created Session Instrument")
return self._session
if base_type == "module":
logger.info(
"Created Instrument for LabOne Module %s for Device %s",
instrument_info.get("type", "unknown").lower(),
self.comCfg.getAddressString(),
)
try:
# create new instance of module
# (otherwise multiple module can not be used at the same time)
module = instrument_info["type"].lower()
module = module if module == "shfqa_sweeper" else f"{module}_module"
module = getattr(self._session.modules, f"create_{module}")()
self._session.connect_device(self.comCfg.getAddressString())
module.device(self.comCfg.getAddressString())
return module
except KeyError as error:
raise RuntimeError(
"Settingsfile is specifing a module as instrument but is "
'missing the "type" property.'
) from error
except AttributeError as error:
raise RuntimeError(
f"LabOne module with name {instrument_info['type'].lower()}"
" does not exist in toolkit."
) from error
logger.info("Created Instrument for Device %s", self.comCfg.getAddressString())
return self._session.connect_device(self.comCfg.getAddressString())
def _quant_to_path(self, quant_name: str) -> Path:
"""Convert Quantity name into its path representation
Args:
quant_name: Name of the Quant
Returns:
Path (/ seperated string)
"""
return Path(
"/" + "/".join(quant_name.lower().split(self._path_seperator))
).resolve()
def _path_to_quant(self, quant_path: Path) -> str:
try:
return self._node_quant_map[quant_path.resolve()]
except KeyError:
name = self._path_seperator.join(quant_path.parts[1:])
self._node_quant_map[name] = name
return name
def _get_node_info(self, quant_name: t.Union[str, Path]) -> t.Dict[str, t.Any]:
"""Get the node info for a Quantity
If there is no Info available the result will be an empty dictionary.
Args:
quant_name: Name of the Quant
Returns:
Node info.
"""
node_path = (
quant_name
if isinstance(quant_name, Path)
else self._quant_to_path(quant_name)
)
node_path = "/" + "/".join(node_path.parts[1:]).lower()
for parent_node, node_info in self._node_info.items():
if fnmatch.fnmatch(node_path, parent_node):
return node_info.get("driver", {})
return {}
def _set_value_toolkit(
self,
quant: Quantity,
value: t.Any,
*,
wait_for: bool = False,
) -> None:
"""Set a value through toolkit to a node.
The function does not raise an Exception but rather logs all errors
Args:
quant: Quant of the node to set.
value: Value.
options: Labber options for the opperation
wait_for: Flag if the function should block until the value is set
on the device.
"""
try:
# get enumerated value if there is one
if quant.cmd_def:
value = quant.cmd_def[quant.combo_defs.index(value)]
# VECTOR datatype value can also be a dictionary, where the value of the quant is "y" key.
if quant.datatype == 4: # VECTOR enum value
if isinstance(value, dict):
for vector_key in self._instrument_settings.get(
"vector_quantity_value_map_array_keys", ["y"]
):
if vector_key in value:
value = value[vector_key]
break
logger.info("%s: set %s", quant.name, value)
self._instrument[quant.set_cmd](value)
if wait_for and not self._transaction.is_running():
self._instrument[quant.set_cmd].wait_for_state_change(value)
except Exception as error:
logger.error("%s", error)
@staticmethod
def _csv_row_to_vector(csv_row: t.List[str]) -> t.Optional[NumpyArray]:
"""Convert a csv row into a numpy array.
Args:
csv_row: Parsed CSV row
Returns:
Numpy array.
"""
if not csv_row:
return None
datatype = type(eval(csv_row[0]))
return np.array(csv_row, dtype=datatype.__name__)
@staticmethod
def _import_waveforms(
waves1: Path, waves2: Path = None, markers: Path = None
) -> Waveforms:
"""Import Waveforms from CSV files.
Args:
waves1: csv for real part waves
waves2: csv for imag part waves
marker: csv for markers
Returns:
Waveform object.
"""
wave0_reader = csv.reader(
waves1.open("r", newline=""), delimiter=",", quotechar="|"
)
wave1_reader = repeat([])
if waves2 and waves2.exists():
wave1_reader = csv.reader(
waves2.open("r", newline=""), delimiter=",", quotechar="|"
)
marker_reader = repeat([])
if markers and markers.exists():
marker_reader = csv.reader(
markers.open("r", newline=""), delimiter=",", quotechar="|"
)
waves = Waveforms()
for i, row in enumerate(zip(wave0_reader, wave1_reader, marker_reader)):
if not row[0]:
continue
waves[i] = (
BaseDevice._csv_row_to_vector(row[0]),
BaseDevice._csv_row_to_vector(row[1]),
BaseDevice._csv_row_to_vector(row[2]),
)
return waves
def _get_quant_value(self, quant_path: Path) -> t.Any:
"""Get Value from a Quantity.
The raw value is processed according to the node info.
Args:
quant_name: Name of the Quantity
Returns:
processed value of the Quantity, do call empty
"""
quant_info = self._get_node_info(quant_path)
quant_type = quant_info.get("type", "default")
quant_name = self._path_to_quant(quant_path)
quant_value = self.getValue(quant_name)
call_empty = quant_info.get("call_empty", True)
if quant_type == "JSON":
try:
with open(Path(quant_value), "r") as file:
return json.loads(file.read()), call_empty
except IOError as error:
logger.error("%s", error)
return {}, call_empty
if quant_type == "TEXT":
try:
with open(Path(quant_value), "r") as file:
return file.read(), call_empty
except IOError as error:
logger.error("%s", error)
return "", call_empty
if quant_type == "CSV":
try:
return self._import_waveforms(Path(quant_value)), call_empty
except IOError as error:
logger.error("%s", error)
return Waveforms(), call_empty
if quant_type == "CSVARRAY":
try:
waveform = self._import_waveforms(Path(quant_value))
return waveform[0][0], call_empty
except IOError as error:
logger.error("%s", error)
return np.array([], dtype=complex), call_empty
return quant_value, call_empty
def _get_toolkit_function(self, path_list: t.List[str]) -> t.Callable:
"""Convert a function path into a toolkit function object.
Args:
path: Path of the function.
Returns:
toolkit function object.
"""
# get function object
function = self._instrument
for name in path_list:
if name.isnumeric():
function = function[int(name)]
else:
function = getattr(function, name.lower())
return function
def call_function(self, name: str, path: Path) -> None:
"""Call an process a function.
If this function is called within a transaction the execution is
delayed if the function allows it (call_type == Bundle).
Args:
name: Internal name of the function.
path: Path of the toolkit function.
"""
func_info = self._function_info[name]
if self.dOp["operation"] == Interface.SET_CFG and not func_info.get(
"is_setting", True
):
return
if (
self._transaction.is_running()
and func_info.get("call_type", "") == "Bundle"
):
self._transaction.add_function(name, path)
return
if name == "module_subscribe":
return self._call_module_subscribe(
Path(func_info.get("signals", "/signal/*"))
)
if name == "module_read":
return self._call_module_read(
Path(func_info.get("signals", "/signal/*")),
Path(func_info.get("result", "/result/*")),
)
if name == "module_clear":
return self._call_module_clear(Path(func_info.get("result", "/result/*")))
if name == "module_execute":
return self._call_module_execute()
return self._call_toolkit_function(path, func_info)
def _raw_path_to_zi_node(self, raw: str) -> t.Tuple[str, str]:
"""Convert a raw input path value into zi node
Add the device to the path if it is missing.
Some modules require that the user specifies the actual signal that
should be used. E.g. the sweeper module result contains all signals
for the subscribed nodes. In Labber customers can use the ``::`` as a
delimiter to specify which signal they want to use in the result.
Args:
raw: Raw Input from the user field.
Returns:
subscribable node path, specified signal (empty if none is specified)
"""
module_path = raw.split("::")
path = module_path[0]
signal = module_path[1] if len(module_path) > 1 else ""
if path and "dev" not in path:
path = path.lstrip("/")
path = f"/{self.comCfg.getAddressString().lower()}/{path}"
return path, signal
def _call_module_subscribe(self, signals: Path) -> None:
"""Subscribe to signal nodes in module.
First all nodes will be unsubscribed and than the new ones will be
subscribed. (Since the module is on the client side that is not expensive)
Args:
signals: Wildcard path for all signal quantities that should be
subscribed.
"""
self._instrument.raw_module.unsubscribe("*")
logger.info(f"unsubscribed all nodes")
for signal_path in fnmatch.filter(
map(str, self._node_quant_map), signals.resolve()
):
quant_name = self._node_quant_map[Path(signal_path)]
quant_value, _ = self._raw_path_to_zi_node(
self.getValue(quant_name).lower()
)
if quant_value:
self._instrument.raw_module.subscribe(quant_value)
logger.info("subscribed to node %s", quant_value)
@staticmethod
def _get_signal_result(result: t.Dict, signal: str = None) -> t.Any:
"""Get a specific signal out of the a result dict.
If no signal is specified it tries to uses a default value.
If the specified signal does not exist or no default value is found
None is returned.
Args:
result: Result dictionary.
signal: Signal in the result.
Return:
Data for the used signal
"""
signal_result = None
if signal:
signal_result = result.get(signal, None)
elif "value" in result:
signal_result = result["value"]
elif "r" in result:
signal_result = result["r"]
elif "abs" in result:
signal_result = result["abs"]
elif "x" in result:
signal_result = result["x"]
return signal_result
def _call_module_read(self, signals: Path, results: Path) -> None:
"""Read the results of a module and update the result arrays.
Calls poll on the module. The results are updated with the data that
matches the signal pathes. Meaning the signals must be subscribed before
calling this function in order to get data.
If the result does not contain data for a signal the coresponding result
will be left unchanged. (See _call_module_clear for clearing the results.)
Args:
signals: Wildcard path for all signal quantities
signals: Wildcard path for all result array quantities.
"""
poll_result = self._instrument.raw_module.read(flat=True)
logger.debug("Get module results: %s", poll_result)
if poll_result:
# Loop through all signals and update values if they are available
signal_paths = fnmatch.filter(
map(str, self._node_quant_map), signals.resolve()
)
result_paths = fnmatch.filter(
map(str, self._node_quant_map), results.resolve()
)
for signal, result in zip(signal_paths, result_paths):
signal_quant = self._node_quant_map[Path(signal)]
signal_value, option = self._raw_path_to_zi_node(
self.getValue(signal_quant).lower()
)
# Get result for current node.
signal_result = poll_result.get(signal_value, None)
# the Labber driver only uses the latest result
while isinstance(signal_result, list):
signal_result = signal_result[-1]
if signal_result:
available_options = list(signal_result.keys())
signal_result = self._get_signal_result(signal_result, option)
if signal_result is None:
logger.error(
"Valid signal for %s needed. Must be one of %s. \
Use node/path::signal to specify a signal",
re.sub(r"[a-zA-Z]:", "", signal.replace("\\", "/")),
available_options,
)
continue
# the Labber driver only uses the latest result
signal_result = (
signal_result[-1] if signal_result.ndim > 1 else signal_result
)
result_quant = self._node_quant_map[Path(result)]
logger.info("%s: received %s", result_quant, signal_result[-10:])
self.setValue(result_quant, signal_result)
def _call_module_clear(self, results: Path) -> None:
"""Clears the data on all result quantities.
Necessary since the read function only updates the result quantities
when new data is available.
Args:
signals: Wildcard path for all result array quantities.
"""
logger.info("Clear module results")
result_paths = fnmatch.filter(map(str, self._node_quant_map), results.resolve())
for result_path in result_paths:
quant_name = self._node_quant_map[Path(result_path)]
quant = self.getQuantity(quant_name)
if quant.datatype == quant.VECTOR:
self.setValue(quant_name, np.array([]))
return
def _call_module_execute(self) -> None:
"""Module execute function handling.
Depending on the operation this functions this functions:
* Start module (Enable set to true)
* Stop module (Enable set to false)
* Read if finished (Read operation)
"""
enable = self.getValue("Enable")
if self.dOp["operation"] in [Interface.GET_CFG, Interface.GET]:
value = not self._instrument.raw_module.finished()
self.setValue("Enable", value)
logger.info("Enable: get %s", value)
elif enable:
logger.info("Enable: set 1")
self._instrument.raw_module.execute()
else:
logger.info("Enable: set 0")
self._instrument.raw_module.finish()
def _call_toolkit_function(self, path: Path, func_info: t.Dict) -> None:
"""Calls a toolkit function
This function can handle both input and return arguments. Both of them
need to be specified in the function_info. Input arguments ("Args") are
a dictionary of kwargs and their coresponding quantities (relative path
to the function path). The output arguments ("Returns") are a list
of relative quantities. Each of these quantities mus have a
``return_value`` entry in their driver info that specifies which part
of the raw return value should be used as a new value for the quantity.
Args:
path: Path of the toolkit function.
func_info: Additional information from the settings.json file about
the function.
"""
kwargs = {}
for arg_name, relative_quant_name in func_info.get("Args").items():
if isinstance(relative_quant_name, list):
waveform_paths = {}
for relative_quant_name_el in relative_quant_name:
quant_path = (path / relative_quant_name_el).resolve()
quant_name = self._path_to_quant(quant_path)
path_value = self.getValue(quant_name)
waveform_paths[quant_path.stem] = (
None if str(path_value) in [".", ""] else Path(path_value)
)
try:
kwargs[arg_name] = self._import_waveforms(**waveform_paths)
except IOError as error:
logger.error("%s", error)
kwargs[arg_name] = Waveforms()
else:
quant_name = (path / relative_quant_name).resolve()
kwargs[arg_name], call_empty = self._get_quant_value(quant_name)
if not call_empty and not kwargs[arg_name]:
logger.warning(
"%s: %s must not be empty",
self._path_to_quant(path),
quant_name,
)
return
function = self._get_toolkit_function(path.parts[1:])
logger.info("%s: call with %s", self._path_to_quant(path), kwargs)
try:
return_values = function(**kwargs)
except Exception as error:
logger.error("%s", error)
return
logger.info(
"%s: returned %s", self._path_to_quant(path), str(return_values)[-10:]
)
for relative_quant_name in func_info.get("Returns"):
quant_path = (path / relative_quant_name).resolve()
quant_name = self._path_to_quant(quant_path)
info = self._get_node_info(quant_path).get("return_value", "")
try:
value = eval("return_values" + info)
except Exception as error:
logger.error("%s", error)
value = self.getValue(quant_name)
try:
self.setValue(quant_name, value)
except KeyError:
logger.debug("%s does not exist", quant_name) | zhinst-labber | /zhinst_labber-0.3.1-py3-none-any.whl/zhinst/labber/driver/base_instrument.py | base_instrument.py |
import typing as t
from zhinst.toolkit.nodetree import NodeTree
class SnapshotManager:
"""Manages a instrument snapshot.
Lazy snapshot manager that gets all nodes values from toolkit with a single
transaction and the reuses the values in later calls until ``clear`` is
called.
Args:
nodetree: Toolkit nodetree which is used for getting the values.
"""
def __init__(self, nodetree: NodeTree):
self._values = {}
self._nodetree = nodetree
def get_value(self, path: str) -> t.Any:
"""Get a value from the snapshot.
If the internal snaphot is empty a new one is taken. If the value is
not present in the snapshot a single get operation is issued.
Args:
path: Path of the node (e.g. /test/a)
Returns:
Value for the specified node
Raises:
KeyError: If the value is not part of the snapshot and also can not
be fetched with a single get command.
"""
if not self._values:
self._values = self._nodetree["*"](parse=False, enum=False)
try:
return self._values[self._nodetree[path]]
except KeyError:
# node not found in snapshot
print(f"{path} not found in snapshot")
return None
def clear(self) -> None:
"""Clears the current snapshot if there is any."""
self._values = {}
class TransactionManager:
"""Manages a set transaction
It both handles nodes and functions. The node transaction is handled within
toolkit and the functions are cached an called in a loop at the end of the
transaction.
Args:
tk_instrument: toolkit object of the instrument
labber_instrument: labber object of the instrument
"""
def __init__(
self,
tk_instrument: t.Union["Session", "DeviceType", "ModuleType"],
labber_instrument: "BaseDevice",
):
self._transaction = None
self._tk_instrument = tk_instrument
self._labber_instrument = labber_instrument
self._functions = None
def start(self) -> None:
"""Start a new transaction.
Does not do any sanity checks if a transaction can be started or if
there is already a running one.
"""
self._transaction = self._tk_instrument.root.set_transaction()
self._transaction.__enter__()
self._functions = []
def add_function(self, name: str, path: str) -> None:
"""Add function to the transaction.
Args:
name: Internal name of the function.
path: Path of the toolkit function.
"""
self._functions.append((name, path))
def end(self) -> None:
"""End a running transaction.
Does not do any sanity checks if a transaction can be ended or if
there is even a running one.
After the toolkit transaction is closed all cached functions are called
in a loop. (Each function is only called once even if it was cached
multiple times).
"""
self._transaction.__exit__(None, None, None)
self._transaction = None
# Call every function only once
functions = [
func
for n, func in enumerate(self._functions)
if func not in self._functions[:n]
]
for function in functions:
self._labber_instrument.call_function(function[0], function[1])
self._functions = None
def is_running(self) -> bool:
"""Check if a transaction is running or not.
Returns:
Status of the transaction.
"""
return self._transaction is not None | zhinst-labber | /zhinst_labber-0.3.1-py3-none-any.whl/zhinst/labber/driver/snapshot_manager.py | snapshot_manager.py |
import typing as t
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.driver.devices.hdawg import HDAWG as HDAWGDriver
from zhinst.qcodes.driver.devices.pqsc import PQSC as PQSCDriver
from zhinst.qcodes.driver.devices.shfqa import SHFQA as SHFQADriver
from zhinst.qcodes.driver.devices.shfqc import SHFQC as SHFQCDriver
from zhinst.qcodes.driver.devices.shfsg import SHFSG as SHFSGDriver
from zhinst.qcodes.driver.devices.uhfli import UHFLI as UHFLIDriver
from zhinst.qcodes.driver.devices.uhfqa import UHFQA as UHFQADriver
from zhinst.qcodes.session import ZISession
class ZIDevice(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments ZIDevice.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class SHFQA(SHFQADriver):
"""QCoDeS driver for the Zurich Instruments SHFQA.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class SHFSG(SHFSGDriver):
"""QCoDeS driver for the Zurich Instruments SHFSG.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class HDAWG(HDAWGDriver):
"""QCoDeS driver for the Zurich Instruments HDAWG.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class PQSC(PQSCDriver):
"""QCoDeS driver for the Zurich Instruments PQSC.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class SHFQC(SHFQCDriver):
"""QCoDeS driver for the Zurich Instruments SHFQC.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class UHFLI(UHFLIDriver):
"""QCoDeS driver for the Zurich Instruments UHFLI.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class UHFQA(UHFQADriver):
"""QCoDeS driver for the Zurich Instruments UHFQA.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class MFLI(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments MFLI.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class MFIA(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments MFIA.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8004)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8004,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=False, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self
class HF2(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments HF2.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port. (default = 8005)
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the flag will create a new session.
Warning:
Creating a new session should be done carefully and reusing
the created session is not possible. Consider instantiating a
new session directly.
"""
def __init__(
self,
serial: str,
host: str,
port: int = 8005,
*,
interface: t.Optional[str] = None,
name=None,
raw=False,
new_session: bool = False,
):
session = ZISession(host, port, hf2=True, new_session=new_session)
tk_device = session.toolkit_session.connect_device(serial, interface=interface)
super().__init__(tk_device, session, name=name, raw=raw)
session.devices[self.serial] = self | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/device_creator.py | device_creator.py |
from collections.abc import MutableMapping
import typing as t
from zhinst.toolkit.session import Devices as TKDevices
from zhinst.toolkit.session import PollFlags
from zhinst.toolkit.session import Session as TKSession
from zhinst.toolkit.session import ModuleHandler as TKModuleHandler
from zhinst.toolkit.nodetree.helper import lazy_property
from zhinst.core import ziDAQServer
import zhinst.qcodes.driver.devices as ZIDevices
import zhinst.qcodes.driver.modules as ZIModules
from zhinst.qcodes.qcodes_adaptions import (
init_nodetree,
tk_node_to_parameter,
ZIParameter,
ZIInstrument,
)
class Devices(MutableMapping):
"""Mapping class for the connected devices.
Maps the connected devices from data server to lazy device objects.
On every access the connected devices are read from the data server. This
ensures that even if devices get connected/disconnected through another
session the list will be up to date.
Args:
session: active session to the data server.
tk_devices: toolkit devices object.
"""
def __init__(self, session: "Session", tk_devices: TKDevices):
self._tk_devices = tk_devices
self._session = session
self._devices: t.Dict[str, ZIDevices.DeviceType] = {}
self._default_properties: t.Dict[
str, t.Tuple[t.Optional[str], t.Optional[bool]]
] = {}
def __getitem__(self, key) -> ZIDevices.DeviceType:
key = key.lower()
if key in self.connected():
if key not in self._devices:
tk_device = self._tk_devices[key]
name, raw = self._default_properties.get(key, (None, False))
self._devices[key] = ZIDevices.DEVICE_CLASS_BY_MODEL.get(
tk_device.__class__.__name__, ZIDevices.ZIBaseInstrument
)(tk_device, self._session, name=name, raw=raw)
return self._devices[key]
raise KeyError(key)
def __setitem__(self, key: str, device: ZIDevices.DeviceType) -> None:
if device.serial not in self.connected():
raise LookupError(
"Illegal operation. Devices must be connected through the session."
)
self._devices[key] = device
def __delitem__(self, key):
self._devices.pop(key, None)
def __iter__(self):
return iter(self.connected())
def __len__(self):
return len(self.connected())
def update_device_properties(
self, serial: str, name: t.Optional[str], raw: t.Optional[bool]
) -> None:
"""Update the properties for a device.
The device options are used when the QCoDeS option of a new device
is created.
Args:
serial: Serial of the device (e.g. dev1234)
name: Optional name of the QCoDeS device object
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
Raises:
RuntimeError: If the device is already created
"""
if serial in self._devices:
raise RuntimeError(
f"The Qcodes Instance of {serial} already exists.\n"
"The device properties can therfor no longer be changed"
)
self._default_properties[serial.lower()] = (name, raw)
def connected(self) -> t.List[str]:
"""Get a list of devices connected to the data server.
Returns:
list[str]: List of all connected devices.
"""
return self._tk_devices.connected()
def visible(self) -> t.List[str]:
"""Get a list of devices visible to the data server.
Returns:
list[str]: List of all connected devices.
"""
return self._tk_devices.visible()
class ModuleHandler:
"""Modules of LabOne.
Handler for all additional so called modules by LabOne. A LabOne module is
bound to a user session but creates a independent session to the Data Server.
This has the advantage that they do not interfere with the user session. It
also means that creating a session causes additional resources allocation,
both at the client and the data server. New modules should therefore only be
instantiated with care.
Toolkit holds a lazy generated instance of all modules. This ensures that
not more than one modules of each type gets created by accident and that the
access to the modules is optimized.
Of course there are many use cases where more than one module of a single
type is required. This class therefore also exposes a ``create`` function for
each LabOne module. These functions create a unmanaged instance of that
module (unmanaged means toolkit does not hold an instance of that module).
Args:
session: Active user session
tk_modules: Underlying toolkit module handler
"""
def __init__(self, session: "Session", tk_modules: TKModuleHandler):
self._session = session
self._tk_modules = tk_modules
def create_awg_module(self) -> ZIModules.ZIBaseModule:
"""Create a QCoDeS instance of the AWGModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `awg_module`.
Returns:
created module
"""
module = self._tk_modules.create_awg_module()
return ZIModules.ZIBaseModule(module, self._session, name="awg_module")
def create_daq_module(self) -> ZIModules.ZIDAQModule:
"""Create a QCoDeS instance of the DAQModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `awg_module`.
Returns:
created module
"""
module = self._tk_modules.create_daq_module()
return ZIModules.ZIDAQModule(module, self._session)
def create_device_settings_module(self) -> ZIModules.ZIDeviceSettingsModule:
"""Create a QCoDeS instance of the DeviceSettingsModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `awg_module`.
Returns:
DeviceSettingsModule: created module
"""
module = self._tk_modules.create_device_settings_module()
return ZIModules.ZIDeviceSettingsModule(module, self._session)
def create_impedance_module(self) -> ZIModules.ZIImpedanceModule:
"""Create a QCoDeS instance of the ImpedanceModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `impedance_module`.
Returns:
created module
"""
module = self._tk_modules.create_device_settings_module()
return ZIModules.ZIImpedanceModule(module, self._session)
def create_mds_module(self) -> ZIModules.ZIBaseModule:
"""Create a QCoDeS instance of the PIDAdvisorModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `multi_device_sync_module`.
Returns:
created module
"""
module = self._tk_modules.create_mds_module()
return ZIModules.ZIBaseModule(module, self._session, name="mds_module")
def create_pid_advisor_module(self) -> ZIModules.ZIPIDAdvisorModule:
"""Create a QCoDeS instance of the PIDAdvisorModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `pid_advisor_module`.
Returns:
created module
"""
module = self._tk_modules.create_pid_advisor_module()
return ZIModules.ZIPIDAdvisorModule(module, self._session)
def create_precompensation_advisor_module(
self,
) -> ZIModules.ZIPrecompensationAdvisorModule:
"""Create a QCoDeS instance of the PrecompensationAdvisorModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `precompensation_advisor_module`.
Returns:
created module
"""
module = self._tk_modules.create_precompensation_advisor_module()
return ZIModules.ZIPrecompensationAdvisorModule(module, self._session)
def create_qa_module(self) -> ZIModules.ZIBaseModule:
"""Create a QCoDeS instance of the AwgModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `qa_module`.
Returns:
created module
"""
module = self._tk_modules.create_qa_module()
return ZIModules.ZIBaseModule(module, self._session, name="qa_module")
def create_scope_module(self) -> ZIModules.ZIScopeModule:
"""Create a QCoDeS instance of the AwgModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `scope_module`.
Returns:
created module
"""
module = self._tk_modules.create_scope_module()
return ZIModules.ZIScopeModule(module, self._session)
def create_sweeper_module(self) -> ZIModules.ZISweeperModule:
"""Create a QCoDeS instance of the SweeperModule.
The new instance creates a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `sweeper_module`.
Returns:
created module
"""
module = self._tk_modules.create_sweeper_module()
return ZIModules.ZISweeperModule(module, self._session)
def create_shfqa_sweeper(self) -> ZIModules.ZISHFQASweeper:
"""Create an instance of the SHFQASweeper.
For now the general sweeper module does not support the SHFQA. However a
python based implementation called ``SHFSweeper`` does already provide
this functionality. The ``SHFSweeper`` is part of the ``zhinst`` module
and can be found in the utils.
Toolkit wraps around the ``SHFSweeper`` and exposes a interface that is
similar to the LabOne modules, meaning the parameters are exposed in a
node tree like structure.
In addition a new session is created. This has the benefit that the
sweeper implementation does not interfere with the the commands and
setups from the user.
Returns:
created object
"""
module = self._tk_modules.create_shfqa_sweeper()
return ZIModules.ZISHFQASweeper(module, self._session)
@lazy_property
def awg(self) -> ZIModules.ZIBaseModule:
"""Managed instance of the zhinst.core.AwgModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_awg_module()
@lazy_property
def daq(self) -> ZIModules.ZIBaseModule:
"""Managed instance of the zhinst.core.DataAcquisitionModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_daq_module()
@lazy_property
def device_settings(self) -> ZIModules.ZIDeviceSettingsModule:
"""Managed instance of the zhinst.core.DeviceSettingsModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_device_settings_module()
@lazy_property
def impedance(self) -> ZIModules.ZIImpedanceModule:
"""Managed instance of the zhinst.core.ImpedanceModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_impedance_module()
@lazy_property
def mds(self) -> ZIModules.ZIBaseModule:
"""Managed instance of the zhinst.core.MultiDeviceSyncModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_mds_module()
@lazy_property
def pid_advisor(self) -> ZIModules.ZIPIDAdvisorModule:
"""Managed instance of the zhinst.core.PidAdvisorModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_pid_advisor_module()
@lazy_property
def precompensation_advisor(self) -> ZIModules.ZIPrecompensationAdvisorModule:
"""Managed instance of the zhinst.core.PrecompensationAdvisorModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_precompensation_advisor_module()
@lazy_property
def qa(self) -> ZIModules.ZIBaseModule:
"""Managed instance of the zhinst.core.QuantumAnalyzerModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_qa_module()
@lazy_property
def scope(self) -> ZIModules.ZIScopeModule:
"""Managed instance of the zhinst.core.ScopeModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_scope_module()
@lazy_property
def sweeper(self) -> ZIModules.ZISweeperModule:
"""Managed instance of the zhinst.core.SweeperModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_sweeper_module()
@lazy_property
def shfqa_sweeper(self) -> ZIModules.ZISHFQASweeper:
"""Managed instance of the zhinst.core.SweeperModule.
Managed in this sense means that only one instance is created
and hold inside the connection Manager. This makes it easier to access
the modules from with toolkit, since creating a module requires
resources.
"""
return self.create_shfqa_sweeper()
class ZISession:
"""Session to a data server.
Zurich Instruments devices use a server-based connectivity methodology.
Server-based means that all communication between the user and the
instrument takes place via a computer program called a server, the data
sever. The data sever recognizes available instruments and manages all
communication between the instrument and the host computer on one side, and
communication to all the connected clients on the other side. (For more
information on the architecture please refer to the user manual
http://docs.zhinst.com/labone_programming_manual/introduction.html)
The entry point into any connection is therefor a client session to a
existing data sever. This class represents a single client session to a
data server. The session enables the user to connect to one or multiple
instruments (also creates the dedicated objects for each device), access
the LabOne modules and poll data.
Since QCoDeS normally instantiate the device specific objects directly
this driver also exposes helper classes for that directly. These helper
classes create a session and connect the specified device to it. To avoid
that each device has a own session by default ``ZISession`` only creates one
session to a single data server and reuses that.
Info:
Except for the HF2 a single session can be used to connect to all
devices from Zurich Instruments. Since the HF2 is historically based on
another data server called the hf2 data server it is not possible to
connect HF2 devices a "normal" data server and also not possible to
connect devices apart from HF2 to the hf2 data server.
Args:
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port 8004 (8005 for HF2 if specified).
(default = None)
hf2: Flag if the session should be established with an HF2 data sever or
the "normal" one for all other devices. If not specified the session
will detect the type of the data server based on the port.
(default = None)
new_session: By default zhinst-qcodes reuses already existing data
server session (within itself only), meaning only one session to a
data server exists. Setting the Flag will create a new session.
Warning: Creating a new session should be done carefully since it
requires more resources and can create unwanted side effects.
connection: Existing daq server object. If specified the session will
not create a new session to the data server but reuse the passed
one. (default = None)
"""
def __new__(
cls,
server_host: str,
server_port: t.Optional[int] = None,
*,
hf2: t.Optional[bool] = None,
new_session=False,
connection: ziDAQServer = None,
):
"""Session creator."""
if not new_session:
for instance in Session.instances():
if instance.server_host == server_host and (
instance.is_hf2_server == hf2
or server_port is None
or instance.server_port == server_port
):
return instance
return Session(server_host, server_port, hf2=hf2, connection=connection)
class Session(ZIInstrument):
"""Session to a data server.
Zurich Instruments devices use a server-based connectivity methodology.
Server-based means that all communication between the user and the
instrument takes place via a computer program called a server, the data
sever. The data sever recognizes available instruments and manages all
communication between the instrument and the host computer on one side, and
communication to all the connected clients on the other side. (For more
information on the architecture please refer to the user manual
http://docs.zhinst.com/labone_programming_manual/introduction.html)
The entry point into for any connection is therefor a client session to a
existing data sever. This class represents a single client session to a
data server. The session enables the user to connect to one or multiple
instruments (also creates the dedicated objects for each device), access
the LabOne modules and poll data. In short it is the only object the user
need to create by himself.
Info:
Except for the HF2 a single session can be used to connect to all
devices from Zurich Instruments. Since the HF2 is historically based on
another data server called the hf2 data server it is not possible to
connect HF2 devices a "normal" data server and also not possible to
connect devices apart from HF2 to the hf2 data server.
Args:
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port 8004 (8005 for HF2 if specified).
(default = None)
hf2: Flag if the session should be established with an HF2 data sever or
the "normal" one for all other devices. If not specified the session
will detect the type of the data server based on the port.
(default = None)
connection: Existing daq server object. If specified the session will
not create a new session to the data server but reuse the passed
one. (default = None)
"""
def __init__(
self,
server_host: str,
server_port: t.Optional[int] = None,
*,
hf2: t.Optional[bool] = None,
connection: ziDAQServer = None,
):
self._tk_object = TKSession(
server_host, server_port, connection=connection, hf2=hf2
)
super().__init__(f"zi_session_{len(self.instances())}", self._tk_object.root)
self._devices = Devices(self, self._tk_object.devices)
self._modules = ModuleHandler(self, self._tk_object.modules)
init_nodetree(self, self._tk_object.root, self._snapshot_cache)
def connect_device(
self,
serial: str,
*,
interface: t.Optional[str] = None,
name: t.Optional[str] = None,
raw: t.Optional[bool] = None,
) -> ZIDevices.DeviceType:
"""Establish a connection to a device.
Info:
It is allowed to call this function for an already connected device.
In that case the function simply returns the device object of the
already connected device.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the
instrument.
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
name: Name of the instrument in qcodes.
(default = "zi_{dev_type}_{serial}")
raw: Flag if qcodes instance should only created with the nodes and
not forwarding the toolkit functions. (default = False)
Returns:
Device object
"""
if name or raw is not None:
self._devices.update_device_properties(serial, name, raw)
self._tk_object.connect_device(serial, interface=interface)
return self._devices[serial]
def disconnect_device(self, serial: str) -> None:
"""Disconnect a device.
Warning:
This function will return immediately. The disconnection of the
device may not yet finished.
Args:
serial (str): Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
"""
self._devices.pop(serial, None)
self._tk_object.disconnect_device(serial)
def sync(self) -> None:
"""Synchronize all connected devices.
Synchronization in this case means creating a defined state.
The following steps are performed:
* Ensures that all set commands have been flushed to the device
* Ensures that get and poll commands only return data which was
recorded after the sync command. (ALL poll buffers are cleared!)
* Blocks until all devices have cleared their busy flag.
Warning:
The sync is performed for all devices connected to the daq server
Warning:
This command is a blocking command that can take a substantial
amount of time.
Raises:
RuntimeError: ZIAPIServerException: Timeout during sync of device
"""
self._tk_object.sync()
def poll(
self,
recording_time: float = 0.1,
timeout: float = 0.5,
flags: PollFlags = PollFlags.DEFAULT,
) -> t.Dict[ZIParameter, t.Dict[str, t.Any]]:
"""Polls all subscribed data.
Poll the value changes in all subscribed nodes since either subscribing
or the last poll (assuming no buffer overflow has occurred on the Data
Server).
Args:
recording_time: defines the duration of the poll. (Note that not
only the newly recorder values are polled but all values since
either subscribing or the last pill). Needs to be larger than
zero. (default = 0.1)
timeout: Adds an additional timeout in seconds on top of
`recording_time`. Only relevant when communicating in a slow
network. In this case it may be set to a value larger than the
expected round-trip time in the network. (default = 0.5)
flags: Flags for the polling (see :class `PollFlags`:)
Returns:
Polled data in a dictionary. The key is a `Node` object and the
value is a dictionary with the raw data from the device
"""
polled_data_tk = self._tk_object.poll(
recording_time=recording_time, timeout=timeout, flags=flags
)
polled_data = {}
for tk_node, data in polled_data_tk.items():
tk_node = self._tk_object.raw_path_to_node(tk_node)
device = self.devices[tk_node.root.prefix_hide]
parameter = tk_node_to_parameter(device, tk_node)
polled_data[parameter] = data
return polled_data
@property
def devices(self) -> Devices:
"""Mapping for the connected devices."""
return self._devices
@property
def modules(self) -> ModuleHandler:
"""Modules of LabOne."""
return self._modules
@property
def is_hf2_server(self) -> bool:
"""Flag if the data server is a HF2 Data Server."""
return self._tk_object.is_hf2_server
@property
def daq_server(self) -> ziDAQServer:
"""Managed instance of the zi.ziDAQServer."""
return self._tk_object.daq_server
@property
def server_host(self) -> str:
"""Server host."""
return self._tk_object.server_host
@property
def server_port(self) -> int:
"""Server port."""
return self._tk_object.server_port
@property
def toolkit_session(self) -> TKSession:
"""Underlying zhinst-toolkit session."""
return self._tk_object | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/session.py | session.py |
import re
from datetime import datetime
import typing as t
from contextlib import contextmanager, nullcontext
from collections.abc import Mapping
import numpy as np
from qcodes.instrument.base import Instrument
from qcodes.instrument.channel import ChannelList, InstrumentChannel
from qcodes.instrument.parameter import Parameter
from qcodes.utils.validators import ComplexNumbers
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import NodeDict as TKNodeDict
from zhinst.toolkit.nodetree.node import NodeInfo
class ZISnapshotHelper:
"""Helper class for the snapshot with Zurich Instrument devices.
Instead of getting each node with a single get command this class bundles
the get into a single command and stores the returned values into a
temporary dictionary.
"""
def __init__(self, nodetree: NodeTree, is_module: bool = False):
self._is_running = False
self._value_dict: t.Dict[str, t.Any] = {}
self._start = datetime.now()
self._nodetree = nodetree
self._is_module = is_module
@contextmanager
def snapshot(self, name: t.Optional[str] = None):
"""Context manager for a optimized snapshot with ZI devices."""
is_owner = not self._is_running
if is_owner:
self._start_snapshot(name)
try:
yield
finally:
if is_owner:
self._stop_snapshot()
def _start_snapshot(self, name: t.Optional[str] = None) -> bool:
"""Start a snapshot and make a single get to the device.
Args:
name: Name of the subnode which the snapshot should
be taken. If not specified a snapshot of all nodes will be taken.
(default = None)
Returns:
bool: Flag if a new snapshot was started.
"""
if not self._nodetree or self._is_running:
return False
self._is_running = True
if not self._is_module:
kwargs = {
"excludestreaming": True,
"settingsonly": False,
"excludevectors": True,
"flat": True,
}
else:
kwargs = {"flat": True}
prefix = self._nodetree.prefix_hide
if not name:
name = prefix if prefix else ""
else:
name = "/" + prefix + "/" + name
self._value_dict = self._nodetree.connection.get(f"{name}/*", **kwargs)
self._start = datetime.now()
return True
def _stop_snapshot(self) -> None:
"""Stop a snapshot to prevent use of outdate data by accident."""
self._is_running = False
self._value_dict = {}
def get(self, parameter: Parameter, fallback_get: t.Callable) -> t.Any:
"""Get the value for a specific QCoDeS Parameter.
Tries to mimic the behavior of a normal get (e.g. update cache).
If the value is not found in the dictionary the fallback get is called.
The fallback get should get the value from the device.
Args:
parameter: QCoDeS Parameter object
fallback_get: fallback function to get the value from the device
Returns:
Value for the Node
"""
value = self._value_dict.get(parameter.zi_node.lower())
if value is not None:
try:
value = value["value"][0]
except (IndexError, TypeError):
# HF2 has no timestamp -> no dict
value = value[0]
# convert numpy types to standart types
value = value.item() if hasattr(value, "item") else value
# convert complex into string
value = str(value) if isinstance(value, complex) else value
parameter.cache._update_with(
value=value, raw_value=value, timestamp=self._start
)
else: # fallback is normal get
value = fallback_get()
return value
@staticmethod
def print_readable_snapshot(
qcodes_object: Instrument, update: bool = False, max_chars: int = 80
) -> None:
"""Prints a readable version of the snapshot.
The readable snapshot includes the name, value and unit of each
parameter.
A convenience function to quickly get an overview of the
status of an instrument.
Args:
qcodes_object (object): Object for which the snapshot should be printed.
update (bool): Flag if the state should be queried from the
instrument.
max_chars (int): The maximum number of characters per line. The
readable snapshot will be cropped if this value is exceeded.
Defaults to 80 to be consistent with default terminal width.
"""
floating_types = (float, np.integer, np.floating)
snapshot = qcodes_object.snapshot(update=update)
snapshot_parameters = snapshot.get("parameters")
if snapshot_parameters:
# Min of 50 is to prevent a super long parameter name to break this
# function
par_lengths = [len(p) for p in snapshot_parameters]
par_field_len = min(max(par_lengths) + 1, 50) if par_lengths else 0
print(qcodes_object.name + ":")
print(f"\t{'parameter':<{par_field_len}}: value")
print("\t" + "-" * (max_chars - 8))
for parameter in sorted(snapshot_parameters):
parameter = snapshot_parameters[parameter]
name = parameter["name"]
msg = f"\t{name:<{par_field_len}}:"
# in case of e.g. ArrayParameters, that usually have
# snapshot_value == False, the parameter may not have
# a value in the snapshot
val = parameter.get("value", "Not available")
unit = parameter.get("unit", None)
if unit is None:
# this may be a multi parameter
unit = parameter.get("units", None)
if isinstance(val, floating_types):
msg += f"\t{val:.5g} "
# numpy float and int types format like builtins
else:
msg += f"\t{val} "
if unit != "": # corresponds to no unit
msg += f"({unit})"
# Truncate the message if it is longer than max length
if len(msg) > max_chars and max_chars != -1:
msg = msg[0 : max_chars - 3] + "..." # noqa: E203
print(msg)
for submodule in qcodes_object.submodules.values():
submodule.print_readable_snapshot(update=update, max_chars=max_chars)
@property
def is_running(self) -> bool:
"""Flag if a snapshot is in progress."""
return self._is_running
class ZIParameter(Parameter):
"""Zurich Instrument specific QCoDeS Parameter.
Overwrite the snapshot functionality to use the ZISnapshotHelper.
Forwards all args and kwargs to the QCoDeS Parameter class.
Args:
snapshot_cache (ZISnapshotHelper): ZI specific SnapshotHelper object
zi_node (Node): ZI specific node object of the nodetree
"""
def __init__(
self,
*args,
snapshot_cache: ZISnapshotHelper,
zi_node: str,
tk_node: Node,
**kwargs,
):
super().__init__(*args, **kwargs)
self.get_raw = kwargs["get_cmd"]
self.set_raw = kwargs["set_cmd"]
self.get = self._wrap_get(self.get_raw)
self.set = self._set_zi
self._snapshot_cache = snapshot_cache
self._zi_node = zi_node
self._tk_node = tk_node
def __call__(self, *args, **kwargs):
"""Call operator that either gets (empty) or gets the value of a node.
Args:
value: Optional value that should be set to the node. If not
specified the operator will return the value of the node
instead.
deep: Flag if the operation should block until the device has
acknowledged the operation. The operation returns the value
acknowledged by the device. This takes significantly longer
than a normal operation and should be used carefully.
enum: Flag if enumerated values should return the enum value as
string or return the raw number.
parse: Flag if the GetParser or SetParser, if present, should be
applied or not.
Returns:
Value of the node for a get operation. If the deep flag is set the
acknowledged value from the device is returned (applies also for
the set operation).
Raises:
AttributeError: If the connection does not support the necessary
function to get/set the value.
RuntimeError: If self.node_info.type if one of the following:
[ZIPWAWave, ZITriggerSample, ZICntSample, ZIImpedanceSample,
ZIScopeWave, ZIAuxInSample]. The reason is that these nodes can
only be polled.
TypeError: if the deep command is not available for this node
(e.g. sample nodes)
"""
if len(args) == 0:
if self.gettable:
return self.get(**kwargs)
raise NotImplementedError("no get cmd found in" + f" Parameter {self.name}")
if self.settable:
return self.set(*args, **kwargs)
raise NotImplementedError("no set cmd found in" + f" Parameter {self.name}")
def _set_zi(self, *args, **kwargs):
"""ZI specific set that supports returning values.
QCoDeS does not provide a way to return a value for the set command.
However a `deep` command in LabOne does return the acknowledged value
by the device. This function in combination with the custom `__call__`
method bypasses this problem.
This function simply wraps around the QCoDeS specific set functionality
(_wrap_set) stores the returned value by zhinst-toolkit and calls the
get functionality with the returned value. Thus is acts as a set and
get within on single command without overwriting the QCoDeS specific
implementation.
"""
set_return = None
def set_wrapper(*args, **kwargs) -> None:
nonlocal set_return
set_return = self.set_raw(*args, **kwargs)
self._wrap_set(set_wrapper)(*args, **kwargs)
return self._wrap_get(lambda: set_return)() if set_return is not None else None
def snapshot_base(
self, update: bool = True, params_to_skip_update: t.List[str] = None
) -> dict:
"""State of the parameter as a JSON-compatible dict.
(everything that the custom JSON encoder class
:class:`qcodes.utils.helpers.NumpyJSONEncoder` supports)
If the parameter has been initiated with ``snapshot_value=False``,
the snapshot will NOT include the ``value`` and ``raw_value`` of the
parameter.
Overwrite base class function to use the snapshot_cache.
Args:
update: If True, update the state by calling ``parameter.get()``
unless ``snapshot_get`` of the parameter is ``False``.
If ``update`` is ``None``, use the current value from the
``cache`` unless the cache is invalid. If ``False``, never call
``parameter.get()``.
params_to_skip_update: No effect but may be passed from base class
Returns:
base snapshot
"""
get = self.__dict__["get"]
try:
self.get = lambda: self._snapshot_cache.get(self, get)
return super().snapshot_base(
update=update, params_to_skip_update=params_to_skip_update
)
finally:
self.get = get
def subscribe(self) -> None:
"""Subscribe to nodes. Fetch data with the poll command.
In order to avoid fetching old data that is still in the buffer execute
a flush command before subscribing to data streams.
"""
self._tk_node.subscribe()
def unsubscribe(self) -> None:
"""Unsubscribe data stream.
Use this command after recording to avoid buffer overflows that may
increase the latency of other command.
"""
self._tk_node.unsubscribe()
def get_as_event(self) -> None:
"""Trigger an event.
The node data is returned by a subsequent poll command.
"""
self._tk_node.get_as_event()
def wait_for_state_change(
self,
value: t.Union[int, str],
*,
invert: bool = False,
timeout: float = 2,
sleep_time: float = 0.005,
) -> None:
"""Waits until the node has the expected state/value.
WARNING: Only supports integer values as reference.
Args:
value: expected value of the node.
invert: Instead of waiting for the value, the function will wait for
any value except the passed value instead. (default = False)
Useful when waiting for value to change from existing one.
timeout: max wait time. (default = 2)
sleep_time: sleep interval in seconds. (default = 0.006)
"""
self._tk_node.wait_for_state_change(
value, invert=invert, timeout=timeout, sleep_time=sleep_time
)
@property
def node_info(self) -> NodeInfo:
"""Zurich Instrument node representation of the Parameter."""
return self._tk_node.node_info
@property
def zi_node(self) -> Node:
"""Zurich Instrument node representation of the Parameter."""
return self._zi_node
@property
def tk_node(self) -> Node:
"""Toolkit node of the Parameter."""
return self._tk_node
class ZINode(InstrumentChannel):
"""Zurich Instrument specific QCoDeS InstrumentChannel.
Overwrite the snapshot functionality to use the ZISnapshotHelper.
Forwards all args and kwargs to the QCoDeS InstrumentChannel class.
Args:
snapshot_cache (ZISnapshotHelper): ZI specific SnapshotHelper object
zi_node (Node): ZI specific node object of the nodetree
"""
def __init__(
self,
*args,
snapshot_cache: ZISnapshotHelper,
zi_node: Node = None,
**kwargs,
):
super().__init__(*args, **kwargs)
self._snapshot_cache = snapshot_cache
self._zi_node = zi_node
def snapshot(self, update: bool = True) -> dict:
"""Decorate a snapshot dictionary with metadata.
Override base method to make update default True and use the
ZISnapshotHelper.
Args:
update: Passed to snapshot_base. (default = True)
Returns:
dict: Base snapshot.
"""
with self._snapshot_cache.snapshot(self._zi_node) if update else nullcontext():
return super().snapshot(update)
def print_readable_snapshot(self, update: bool = True, max_chars: int = 80) -> None:
"""Prints a readable version of the snapshot.
The readable snapshot includes the name, value and unit of each
parameter.
A convenience function to quickly get an overview of the
status of an instrument.
Args:
update: If ``True``, update the state by querying the
instrument. If ``False``, just use the latest values in memory.
This argument gets passed to the snapshot function.
max_chars: the maximum number of characters per line. The
readable snapshot will be cropped if this value is exceeded.
Defaults to 80 to be consistent with default terminal width.
"""
with self._snapshot_cache.snapshot(self._zi_node) if update else nullcontext():
return super().print_readable_snapshot(update, max_chars)
class ZIChannelList(ChannelList):
"""Zurich Instrument specific QCoDeS InstrumentChannel.
Overwrite the snapshot functionality to use the ZISnapshotHelper.
Forwards all args and kwargs to the QCoDeS InstrumentChannel class.
Args:
snapshot_cache (ZISnapshotHelper): ZI specific SnapshotHelper object
zi_node (Node): ZI specific node object of the nodetree
"""
def __init__(self, *args, snapshot_cache=None, zi_node=None, **kwargs):
super().__init__(*args, **kwargs)
self._snapshot_cache = snapshot_cache
self._zi_node = zi_node
def snapshot(self, update: bool = True) -> dict:
"""Decorate a snapshot dictionary with metadata.
Override base method to make update default True and use the
ZISnapshotHelper.
Args:
update: Passed to snapshot_base. (default = True)
Returns:
dict: Base snapshot.
"""
with self._snapshot_cache.snapshot(self._zi_node) if update else nullcontext():
return super().snapshot(update)
def print_readable_snapshot(self, update: bool = True, max_chars: int = 80) -> None:
"""Prints a readable version of the snapshot.
The readable snapshot includes the name, value and unit of each
parameter.
A convenience function to quickly get an overview of the
status of an instrument.
Args:
update: If ``True``, update the state by querying the
instrument. If ``False``, just use the latest values in memory.
This argument gets passed to the snapshot function.
max_chars: the maximum number of characters per line. The
readable snapshot will be cropped if this value is exceeded.
Defaults to 80 to be consistent with default terminal width.
"""
with self._snapshot_cache.snapshot(self._zi_node) if update else nullcontext():
return super().print_readable_snapshot(update, max_chars)
class ZIInstrument(Instrument):
"""Zurich Instrument specific Qcodes Instrument.
Overwrite the snapshot functionality to use the ZISnapshotHelper.
Args:
name: Name of
snapshot_cache (ZISnapshotHelper): ZI specific SnapshotHelper object
zi_node (Node): ZI specific node object of the nodetree
"""
def __init__(self, name, nodetree: NodeTree, is_module=False):
super().__init__(name)
self._snapshot_cache = ZISnapshotHelper(nodetree, is_module=is_module)
def snapshot(self, update: bool = True) -> dict:
"""Decorate a snapshot dictionary with metadata.
Override base method to make update default True and use the
ZISnapshotHelper.
Args:
update: Passed to snapshot_base.
Returns:
dict: Base snapshot.
"""
with self._snapshot_cache.snapshot() if update else nullcontext():
return super().snapshot(update)
def print_readable_snapshot(self, update: bool = True, max_chars: int = 80) -> None:
"""Prints a readable version of the snapshot.
The readable snapshot includes the name, value and unit of each
parameter.
A convenience function to quickly get an overview of the
status of an instrument.
Args:
update: If ``True``, update the state by querying the
instrument. If ``False``, just use the latest values in memory.
This argument gets passed to the snapshot function.
max_chars: the maximum number of characters per line. The
readable snapshot will be cropped if this value is exceeded.
Defaults to 80 to be consistent with default terminal width.
"""
with self._snapshot_cache.snapshot() if update else nullcontext():
return super().print_readable_snapshot(update, max_chars)
class NodeDict(Mapping):
"""Mapping of dictionary structure results.
The mapping allows to access data with both the string and the qcodes
parameter objects.
"""
def __init__(self, tk_result: t.Union[t.Dict[str, t.Any], TKNodeDict]):
if not isinstance(tk_result, TKNodeDict):
self._result = TKNodeDict(tk_result)
else:
self._result = tk_result
def __repr__(self):
return repr(self._result)
def __getitem__(self, key: t.Union[str, ZIParameter]):
if isinstance(key, ZIParameter):
return self._result[key.zi_node.lower()]
return self._result[key]
def __iter__(self):
return iter(self._result)
def __len__(self):
return len(self._result)
def to_dict(self) -> t.Dict[str, t.Any]:
"""Convert the WildcardResult to a dictionary.
After conversion, :class:`Node` objects cannot be used to get items.
"""
return self._result.to_dict()
def tk_node_to_qcodes_list(tk_node: Node) -> t.List[str]:
"""Convert a toolkit node to a list of elements that form a QCoDeS object.
Args:
tk_node: Toolkit node to convert.
Return:
List of strings that form a QCoDeS object.
"""
if tk_node.raw_tree[-1].isdigit():
parents = tk_node.raw_tree
name = "value"
else:
parents = tk_node.raw_tree[:-1]
name = tk_node.raw_tree[-1]
# Attributes are not allowed to start with a number (#31)
name = "_" + name if name[0].isdigit() else name
parents = list(parents)
numbers = [subnode for subnode in parents if subnode.isdigit()]
while numbers:
number = numbers.pop()
index = parents.index(number)
parents[index - 1] = parents[index - 1] + number
parents.pop(index)
if not numbers:
numbers = [subnode for subnode in parents if subnode.isdigit()]
parents.append(name)
return parents
def tk_node_to_parameter(root: t.Any, tk_node: Node) -> t.Any:
"""Convert a Toolkit node into a QCoDeS Parameter.
Args:
root: Root from which the node should be derived.
tk_node: Toolkit node to convert.
Returns:
QCoDeS Parameter that matches the given tk node.
"""
qcodes_list = list(tk_node.raw_tree)
if qcodes_list[-1].isdigit():
qcodes_list.append("value")
current_layer = root
for element in qcodes_list[:-1]:
qcodes_list = tk_node_to_qcodes_list(tk_node)
if element.isdigit():
current_layer = current_layer[int(element)]
else:
current_layer = current_layer.submodules[element]
return current_layer.parameters[qcodes_list[-1]]
def _get_submodule(
layer, parents: t.List[str], snapshot_cache: ZISnapshotHelper
) -> ZINode:
"""Get the nested parent element for a node.
Reuse existing subnodes and automatically create them if they don`t
exist.
Args:
parents: Nested parents of a node as str.
snapshot_cache: Object of the snapshot cache.
Returns:
ZINode: direct parent of the node
"""
weird_nodes = ["tamp0", "tamp1"]
current_layer = layer
for i, node in enumerate(parents):
if node[-1].isdigit() and node not in weird_nodes:
offset = 0
for char in reversed(node):
if char.isdigit():
offset += 1
else:
break
number = int(node[-offset:])
name = node[:-offset]
if not current_layer.submodules or name not in current_layer.submodules:
# create channel_list
channel_list = ZIChannelList(
current_layer,
name,
ZINode,
zi_node="/".join(parents[:i] + [name]),
snapshot_cache=snapshot_cache,
)
current_layer.add_submodule(name, channel_list)
if len(current_layer.submodules[name]) <= number:
# Add new items to list until the required length is reached. (#31)
current_length = len(current_layer.submodules[name])
for item in range(number - current_length + 1):
module = ZINode(
current_layer,
name + str(current_length + item),
zi_node="/".join(
parents[:i] + [name, str(current_length + item)]
),
snapshot_cache=snapshot_cache,
)
current_layer.submodules[name].append(module)
current_layer = current_layer.submodules[name][number]
elif node not in current_layer.submodules:
module = ZINode(
current_layer,
node,
zi_node="/".join(parents[: i + 1]),
snapshot_cache=snapshot_cache,
)
current_layer.add_submodule(node, module)
current_layer = module
else:
current_layer = current_layer.submodules.get(node)
return current_layer
def init_nodetree(
layer,
nodetree: NodeTree,
snapshot_cache: ZISnapshotHelper,
blacklist: tuple = tuple(),
) -> None:
"""Generate nested qcodes parameter from the device nodetree.
Args:
layer: current layer in the nodetree.
nodetree: underlying toolkit node tree.
snapshot_cache: Instance of the SnapshotHelper.
blacklist: nodes to be blacklisted.
"""
snapshot_blacklist = ["fwlog", "values"]
is_complex = re.compile("demods/./sample")
for node, info in nodetree:
if info.get("Node", "") in blacklist:
continue
try:
qcodes_list = tk_node_to_qcodes_list(node)
name = qcodes_list[-1]
parent = _get_submodule(layer, qcodes_list[:-1], snapshot_cache)
do_snapshot = (
"Stream" not in info.get("Properties")
and "ZIVector" not in info.get("Type")
and "Read" in info.get("Properties")
and not any(x in node.raw_tree for x in snapshot_blacklist)
)
parent.add_parameter(
parameter_class=ZIParameter,
name=name,
docstring=info.get("Description"),
unit=info.get("Unit")
if info.get("Unit") not in ["None", "Dependent"]
else None,
get_cmd=node._get,
set_cmd=node._set,
vals=ComplexNumbers()
if re.match(is_complex, info.get("Node").lower())
else None,
snapshot_value=do_snapshot,
snapshot_get=do_snapshot,
zi_node=info.get("Node"),
tk_node=node,
snapshot_cache=snapshot_cache,
)
except ValueError as e:
print(f"Node {info.get('Node')} could not be added as parameter\n", e) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/qcodes_adaptions.py | qcodes_adaptions.py |
import typing as t
from typing import Optional
from zhinst.toolkit.driver.modules.impedance_module import (
ImpedanceModule as TKImpedanceModule,
)
from zhinst.qcodes.driver.modules.base_module import ZIBaseModule
if t.TYPE_CHECKING:
from zhinst.qcodes.session import Session
class ZIImpedanceModule(ZIBaseModule):
"""Implements a base Impedance Module for Lock-In instruments.
The Impedance Module corresponds to the Cal sub-tab in the LabOne User
Interface Impedance Analyzer tab. It allows the user to perform a
compensation that will be applied to impedance measurements.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/impedance_module.html
Args:
tk_object: Underlying zhinst-toolkit object.
session: Session to the Data Server.
name: Name of the module in QCoDeS.
"""
def __init__(
self,
tk_object: TKImpedanceModule,
session: "Session",
name: str = "impedance_module",
):
super().__init__(tk_object, session, name)
def wait_done(
self,
step: Optional[int] = None,
*,
timeout: float = 20.0,
sleep_time: float = 0.5,
) -> None:
"""Waits until the specified compensation step is complete.
Args:
step: The compensation step to wait for completion.
timeout: The maximum waiting time in seconds for the compensation
to complete (default: 20).
sleep_time: Time in seconds to wait between
requesting the state. (default: 0.5)
Raises:
TimeoutError: The compensation is not completed before timeout.
"""
return self._tk_object.wait_done(
step=step, timeout=timeout, sleep_time=sleep_time
)
def finish(self) -> None:
"""Stop the module."""
return self._tk_object.finish()
def finished(self, step: Optional[int] = None) -> bool:
"""Check if the calibration or a step of it is finished.
Args:
step: Calibration step. If not None this function checks if the
specified step is finished. Otherwise it checks if the
hole calibration is done.
Returns:
Flag if the calibration or a step is finished.
"""
return self._tk_object.finished(step=step) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/modules/impedance_module.py | impedance_module.py |
import typing as t
from zhinst.toolkit.driver.modules.scope_module import ScopeModule as TKScopeModule
from zhinst.qcodes.driver.modules.base_module import ZIBaseModule
if t.TYPE_CHECKING:
from zhinst.qcodes.session import Session
class ZIScopeModule(ZIBaseModule):
"""Scope Module.
The Scope Module corresponds to the functionality available in the Scope
tab in the LabOne User Interface and provides API users with an interface
to acquire assembled and scaled scope data from the instrument
programmatically.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/scope_module.html
Although it is possible to acquire scope data using the lower-level
subscribe/poll method, the Scope Module provides API users with several
advantages. Specifically, the Scope Module:
* Provides a uniform interface to acquire scope data from all instrument
classes (HF2 scope usage differs from and MF and UHF devices, especially
with regards to scaling).
* Scales and offsets the scope wave data to get physically meaningful
values. If data is polled from the device node using subscribe/poll the
scaling and offset must be applied manually.
* Assembles large multi-block transferred scope data into single complete
records. When the scope is configured to record large scope lengths and
data is directly polled from the device node /DEV…/SCOPES/n/WAVE the data
is split into multiple blocks for efficient transfer of data from the
Data Server to the API; these must then be programmatically reassembled.
The Scope Module performs this assembly and returns complete scope
records (unless used in pass-through mode, mode=0).
* Can be configured to return the FFT of the acquired scope records
(with mode=3) as provided by the Scope Tab in the LabOne UI. FFT data is
not available from the device nodes in the /DEV/…./SCOPES/ branch using
subscribe/poll.
* Can be configured to average the acquired scope records the
averager/parameters.
* Can be configured to return a specific number of scope records using the
historylength parameter.
Args:
tk_object: Underlying zhinst-toolkit object.
session: Session to the Data Server.
name: Name of the module in QCoDeS.
"""
def __init__(
self, tk_object: TKScopeModule, session: "Session", name: str = "scope_module"
):
super().__init__(tk_object, session, name)
def finish(self) -> None:
"""Stop the module."""
return self._tk_object.finish() | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/modules/scope_module.py | scope_module.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.