code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import six class selenium(object): def __init__(self, zap): self.zap = zap @property def option_browser_extensions(self): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionBrowserExtensions/'))) @property def option_chrome_binary_path(self): """ Returns the current path to Chrome binary This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionChromeBinaryPath/'))) @property def option_chrome_driver_path(self): """ Returns the current path to ChromeDriver This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionChromeDriverPath/'))) @property def option_firefox_binary_path(self): """ Returns the current path to Firefox binary This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionFirefoxBinaryPath/'))) @property def option_firefox_driver_path(self): """ Returns the current path to Firefox driver (geckodriver) This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionFirefoxDriverPath/'))) @property def option_ie_driver_path(self): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionIeDriverPath/'))) @property def option_last_directory(self): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionLastDirectory/'))) @property def option_phantom_js_binary_path(self): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/view/optionPhantomJsBinaryPath/'))) def set_option_chrome_binary_path(self, string, apikey=''): """ Sets the current path to Chrome binary This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionChromeBinaryPath/', {'String': string, 'apikey': apikey}))) def set_option_chrome_driver_path(self, string, apikey=''): """ Sets the current path to ChromeDriver This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionChromeDriverPath/', {'String': string, 'apikey': apikey}))) def set_option_firefox_binary_path(self, string, apikey=''): """ Sets the current path to Firefox binary This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionFirefoxBinaryPath/', {'String': string, 'apikey': apikey}))) def set_option_firefox_driver_path(self, string, apikey=''): """ Sets the current path to Firefox driver (geckodriver) This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionFirefoxDriverPath/', {'String': string, 'apikey': apikey}))) def set_option_ie_driver_path(self, string, apikey=''): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionIeDriverPath/', {'String': string, 'apikey': apikey}))) def set_option_last_directory(self, string, apikey=''): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionLastDirectory/', {'String': string, 'apikey': apikey}))) def set_option_phantom_js_binary_path(self, string, apikey=''): """ This component is optional and therefore the API will only work if it is installed """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'selenium/action/setOptionPhantomJsBinaryPath/', {'String': string, 'apikey': apikey})))
zaproxy
/zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/selenium.py
selenium.py
import six class users(object): def __init__(self, zap): self.zap = zap def users_list(self, contextid=None): """ Gets a list of users that belong to the context with the given ID, or all users if none provided. """ params = {} if contextid is not None: params['contextId'] = contextid return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params))) def get_user_by_id(self, contextid, userid): """ Gets the data of the user with the given ID that belongs to the context with the given ID. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid}))) def get_authentication_credentials_config_params(self, contextid): """ Gets the configuration parameters for the credentials of the context with the given ID. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid}))) def get_authentication_credentials(self, contextid, userid): """ Gets the authentication credentials of the user with given ID that belongs to the context with the given ID. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid}))) def get_authentication_state(self, contextid, userid): """ Gets the authentication state information for the user identified by the Context and User Ids. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid}))) def get_authentication_session(self, contextid, userid): """ Gets the authentication session information for the user identified by the Context and User Ids, e.g. cookies and realm credentials. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid}))) def new_user(self, contextid, name, apikey=''): """ Creates a new user with the given name for the context with the given ID. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey}))) def remove_user(self, contextid, userid, apikey=''): """ Removes the user with the given ID that belongs to the context with the given ID. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_user_enabled(self, contextid, userid, enabled, apikey=''): """ Sets whether or not the user, with the given ID that belongs to the context with the given ID, should be enabled. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey}))) def set_user_name(self, contextid, userid, name, apikey=''): """ Renames the user with the given ID that belongs to the context with the given ID. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey}))) def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''): """ Sets the authentication credentials for the user with the given ID that belongs to the context with the given ID. """ params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if authcredentialsconfigparams is not None: params['authCredentialsConfigParams'] = authcredentialsconfigparams return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params))) def authenticate_as_user(self, contextid, userid, apikey=''): """ Tries to authenticate as the identified user, returning the authentication request and whether it appears to have succeeded. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def poll_as_user(self, contextid, userid, apikey=''): """ Tries to poll as the identified user, returning the authentication request and whether it appears to have succeeded. This will only work if the polling verification strategy has been configured. """ return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey}))) def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''): """ Sets fields in the authentication state for the user identified by the Context and User Ids. """ params = {'contextId': contextid, 'userId': userid, 'apikey': apikey} if lastpollresult is not None: params['lastPollResult'] = lastpollresult if lastpolltimeinms is not None: params['lastPollTimeInMs'] = lastpolltimeinms if requestssincelastpoll is not None: params['requestsSinceLastPoll'] = requestssincelastpoll return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params))) def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''): """ Sets the specified cookie for the user identified by the Context and User Ids. """ params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey} if path is not None: params['path'] = path if secure is not None: params['secure'] = secure return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setCookie/', params)))
zaproxy
/zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/users.py
users.py
zaptools ======== PreAlfa 2 ZapTools is wrapper to handle websocket connection, based on events to a nice and smooth integration. Usage ----- How to use **zaptools** ~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python from fastapi import FastAPI from zaptools import FastApiZapAdapter, SocketClient app : FastAPI = FastAPI() adapter = FastApiZapAdapter(app= app, route= "/" ) @adapter.on_client_connected async def on_connected(client : SocketClient , adapter : FastApiZapAdapter): print("client connected : ", client.id_connection) await client.send_event("myEvent", "Data Payload to send") @adapter.on_event("event") async def event(payload, client:SocketClient , adapter : FastApiZapAdapter ): print(payload) await client.send_event("anyEvent", {"msg" : "a Json Format"}) @adapter.on_client_disconnected async def client_disconected(client : SocketClient , adapter : FastApiZapAdapter): print("client disconnected: ", client.id_connection) **zaptools** is only compatible with FastAPI apps, so we need to create a **FastAPI** app and then create a instance of *FastApiZapAdapter*, the constructor need the app (FastApi app) and a specific route ("/") .. code-block:: python app : FastAPI = FastAPI() adapter = FastApiZapAdapter(app= app, route= "/" ) Now we can use the :code:`adapter` to define the function to be called when a event is triggered, for example is we need to check if a new client is connected to our Socket server, and then response to client: .. code-block:: python @adapter.on_client_connected async def on_connected(client : SocketClient , adapter : FastApiZapAdapter): print("client connected : ", client.id_connection) await client.send_event("myEvent", "Data Payload to send") the :code:`SocketClient` is a class that have :code:`id_connection` and :code:`send_event` method, remember ever :code:`await` the :code:`send_event`. Also, you can define a function to be called when a client is disconnected: .. code-block:: python @adapter.on_client_disconnected async def client_disconected(client : SocketClient , adapter : FastApiZapAdapter): print("client disconnected: ", client.id_connection) We can register a event, in this case we have the payload: .. code-block:: python @adapter.on_event("eventName") async def event(payload, client:SocketClient , adapter : FastApiZapAdapter ): print(payload) await client.send_event("eventName", {"msg" : "a Json Format"}) And finally, start the FastAPI app normally (uvicorn) Installation ------------ :code:`pip install zaptools` Requirements ------------ FastApi Compatibility ------------- Python 3.7+ Licence ------- MIT Authors ------- zaptools was written by Nathan Mejia
zaptools
/zaptools-0.0.4.tar.gz/zaptools-0.0.4/README.rst
README.rst
GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: <program> Copyright (C) <year> <name of author> This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <http://www.gnu.org/licenses/>. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.
zapy
/zapy-0.0.14.tar.gz/zapy-0.0.14/LICENSE.md
LICENSE.md
==================== Zaqar Tempest Plugin ==================== Tempest plugin for Zaqar project. It contains the tempest plugin for the functional testing of Zaqar Project. * Free software: Apache license * Documentation: http://docs.openstack.org/developer/zaqar * Source: http://git.openstack.org/cgit/openstack/zaqar-tempest-plugin * Bugs: http://bugs.launchpad.net/zaqar Running the tests ----------------- To run all tests from this plugin, install zaqar, zaqar-tempest-plugin and tempest into your environment, make sure to configure the tempest correctly, and then from the tempest repo, run:: $ tempest run --regex zaqar_tempest_plugin --config-file /etc/tempest/tempest.conf
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/README.rst
README.rst
If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/zaqar
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/CONTRIBUTING.rst
CONTRIBUTING.rst
import urllib from oslo_utils import uuidutils from oslo_serialization import jsonutils as json from tempest.lib.common import rest_client from zaqar_tempest_plugin.api_schema.response.v1 \ import queues as v1schema from zaqar_tempest_plugin.api_schema.response.v1_1 \ import queues as v11schema from zaqar_tempest_plugin.api_schema.response.v2 \ import queues as v2schema class MessagingClient(rest_client.RestClient): def __init__(self, auth_provider, service, region, **kwargs): super(MessagingClient, self).__init__( auth_provider, service, region, **kwargs) self.version = '1' self.uri_prefix = 'v{0}'.format(self.version) client_id = uuidutils.generate_uuid(dashed=False) self.headers = {'Client-ID': client_id} class V1MessagingClient(MessagingClient): def __init__(self, auth_provider, service, region, **kwargs): super(V1MessagingClient, self).__init__( auth_provider, service, region, **kwargs) self.version = '1' def list_queues(self): uri = '{0}/queues'.format(self.uri_prefix) resp, body = self.get(uri) if resp['status'] != '204': body = json.loads(body) self.validate_response(v1schema.list_queues, resp, body) return resp, body def create_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.put(uri, body=None) self.expected_success(201, resp.status) return resp, body def show_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.get(uri) self.expected_success(204, resp.status) return resp, body def head_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.head(uri) self.expected_success(204, resp.status) return resp, body def delete_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.delete(uri) self.expected_success(204, resp.status) return resp, body def show_queue_stats(self, queue_name): uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name) resp, body = self.get(uri) body = json.loads(body) self.validate_response(v1schema.queue_stats, resp, body) return resp, body def show_queue_metadata(self, queue_name): uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return resp, body def set_queue_metadata(self, queue_name, rbody): uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) resp, body = self.put(uri, body=json.dumps(rbody)) self.expected_success(204, resp.status) return resp, body def post_messages(self, queue_name, rbody): uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) body = json.loads(body) self.validate_response(v1schema.post_messages, resp, body) return resp, body def list_messages(self, queue_name): uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v1schema.list_messages, resp, body) return resp, body def show_single_message(self, message_uri): resp, body = self.get(message_uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v1schema.get_single_message, resp, body) return resp, body def show_multiple_messages(self, message_uri): resp, body = self.get(message_uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v1schema.get_multiple_messages, resp, body) return resp, body def delete_messages(self, message_uri): resp, body = self.delete(message_uri) self.expected_success(204, resp.status) return resp, body def post_claims(self, queue_name, rbody, url_params=False): uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name) if url_params: uri += '?%s' % urllib.parse.urlencode(url_params) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) body = json.loads(body) self.validate_response(v1schema.claim_messages, resp, body) return resp, body def query_claim(self, claim_uri): resp, body = self.get(claim_uri) if resp['status'] != '204': body = json.loads(body) self.validate_response(v1schema.query_claim, resp, body) return resp, body def update_claim(self, claim_uri, rbody): resp, body = self.patch(claim_uri, body=json.dumps(rbody)) self.expected_success(204, resp.status) return resp, body def delete_claim(self, claim_uri): resp, body = self.delete(claim_uri) self.expected_success(204, resp.status) return resp, body class V11MessagingClient(MessagingClient): def __init__(self, auth_provider, service, region, **kwargs): super(V11MessagingClient, self).__init__( auth_provider, service, region, **kwargs) self.version = '1.1' self.uri_prefix = 'v{0}'.format(self.version) def list_queues(self): uri = '{0}/queues'.format(self.uri_prefix) resp, body = self.get(uri, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v11schema.list_queues, resp, body) return resp, body def create_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.put(uri, body=None, headers=self.headers) self.expected_success(201, resp.status) return resp, body def show_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) self.expected_success(200, resp.status) return resp, body def delete_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.delete(uri, headers=self.headers) self.expected_success(204, resp.status) return resp, body def show_queue_stats(self, queue_name): uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) body = json.loads(body) self.validate_response(v11schema.queue_stats, resp, body) return resp, body def show_queue_metadata(self, queue_name): uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) self.expected_success(200, resp.status) body = json.loads(body) return resp, body def set_queue_metadata(self, queue_name, rbody): uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) resp, body = self.put(uri, body=json.dumps(rbody), headers=self.headers) self.expected_success(204, resp.status) return resp, body def post_messages(self, queue_name, rbody): uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) body = json.loads(body) self.validate_response(v11schema.post_messages, resp, body) return resp, body def list_messages(self, queue_name): uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v11schema.list_messages, resp, body) return resp, body def show_single_message(self, message_uri): resp, body = self.get(message_uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v11schema.get_single_message, resp, body) return resp, body def show_multiple_messages(self, message_uri): resp, body = self.get(message_uri, extra_headers=True, headers=self.headers) if resp['status'] != '404': body = json.loads(body) self.validate_response(v11schema.get_multiple_messages, resp, body) return resp, body def delete_messages(self, message_uri): resp, body = self.delete(message_uri, headers=self.headers) self.expected_success(204, resp.status) return resp, body def post_claims(self, queue_name, rbody, url_params=False): uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name) if url_params: uri += '?%s' % urllib.parse.urlencode(url_params) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) body = json.loads(body) self.validate_response(v11schema.claim_messages, resp, body) return resp, body def query_claim(self, claim_uri): resp, body = self.get(claim_uri, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v11schema.query_claim, resp, body) return resp, body def update_claim(self, claim_uri, rbody): resp, body = self.patch(claim_uri, body=json.dumps(rbody), headers=self.headers) self.expected_success(204, resp.status) return resp, body def delete_claim(self, claim_uri): resp, body = self.delete(claim_uri, headers=self.headers) self.expected_success(204, resp.status) return resp, body class V2MessagingClient(MessagingClient): def __init__(self, auth_provider, service, region, **kwargs): super(V2MessagingClient, self).__init__( auth_provider, service, region, **kwargs) self.version = '2' self.uri_prefix = 'v{0}'.format(self.version) def list_queues(self, url_params=False): uri = '{0}/queues'.format(self.uri_prefix) if url_params: uri += '?%s' % urllib.parse.urlencode(url_params) resp, body = self.get(uri, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v2schema.list_queues, resp, body) return resp, body def create_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.put(uri, body=None, headers=self.headers) self.expected_success(201, resp.status) return resp, body def show_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) self.expected_success(200, resp.status) return resp, body def delete_queue(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.delete(uri, headers=self.headers) self.expected_success(204, resp.status) return resp, body def purge_queue(self, queue_name, resource=None): uri = '{0}/queues/{1}/purge'.format(self.uri_prefix, queue_name) rbody = {"resource_types": ["messages", "subscriptions"]} if resource: rbody = {"resource_types": resource} resp, body = self.post(uri, body=json.dumps(rbody), headers=self.headers) self.expected_success(204, resp.status) return resp, body def show_queue_stats(self, queue_name): uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) body = json.loads(body) self.validate_response(v2schema.queue_stats, resp, body) return resp, body def show_queue_metadata(self, queue_name): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) self.expected_success(200, resp.status) body = json.loads(body) return resp, body def set_queue_metadata(self, queue_name, rbody): uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) headers = self.headers.copy() headers['Content-Type'] =\ 'application/openstack-messaging-v2.0-json-patch' resp, body = self.patch(uri, body=json.dumps(rbody), headers=headers) self.expected_success(200, resp.status) return resp, body def post_messages(self, queue_name, rbody): uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) body = json.loads(body) self.validate_response(v2schema.post_messages, resp, body) return resp, body def list_messages(self, queue_name): uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v2schema.list_messages, resp, body) return resp, body def show_single_message(self, message_uri): resp, body = self.get(message_uri, extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v2schema.get_single_message, resp, body) return resp, body def show_multiple_messages(self, message_uri): resp, body = self.get(message_uri, extra_headers=True, headers=self.headers) if resp['status'] != '404': body = json.loads(body) self.validate_response(v2schema.get_multiple_messages, resp, body) return resp, body def delete_messages(self, message_uri): resp, body = self.delete(message_uri, headers=self.headers) self.expected_success(204, resp.status) return resp, body def post_claims(self, queue_name, rbody, url_params=False): uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name) if url_params: uri += '?%s' % urllib.parse.urlencode(url_params) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v2schema.claim_messages, resp, body) return resp, body def query_claim(self, claim_uri): resp, body = self.get(claim_uri, headers=self.headers) if resp['status'] != '204': body = json.loads(body) self.validate_response(v2schema.query_claim, resp, body) return resp, body def update_claim(self, claim_uri, rbody): resp, body = self.patch(claim_uri, body=json.dumps(rbody), headers=self.headers) self.expected_success(204, resp.status) return resp, body def delete_claim(self, claim_uri): resp, body = self.delete(claim_uri, headers=self.headers) self.expected_success(204, resp.status) return resp, body def create_subscription(self, queue_name, rbody): uri = '{0}/queues/{1}/subscriptions'.format(self.uri_prefix, queue_name) resp, body = self.post(uri, body=json.dumps(rbody), extra_headers=True, headers=self.headers) body = json.loads(body) self.validate_response(v2schema.create_subscription, resp, body) return resp, body def delete_subscription(self, queue_name, subscription_id): uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix, queue_name, subscription_id) resp, body = self.delete(uri, headers=self.headers) return resp, body def list_subscription(self, queue_name): uri = '{0}/queues/{1}/subscriptions'.format(self.uri_prefix, queue_name) resp, body = self.get(uri, headers=self.headers) body = json.loads(body) self.validate_response(v2schema.list_subscriptions, resp, body) return resp, body def show_subscription(self, queue_name, subscription_id): uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix, queue_name, subscription_id) resp, body = self.get(uri, headers=self.headers) body = json.loads(body) self.validate_response(v2schema.show_single_subscription, resp, body) return resp, body def update_subscription(self, queue_name, subscription_id, rbody): uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix, queue_name, subscription_id) resp, body = self.patch(uri, body=json.dumps(rbody), headers=self.headers) return resp, body
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/zaqar_tempest_plugin/services/messaging/json/messaging_client.py
messaging_client.py
list_link = { 'type': 'object', 'properties': { 'rel': {'type': 'string'}, 'href': { 'type': 'string', 'format': 'uri' } }, 'required': ['href', 'rel'] } list_queue = { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'href': { 'type': 'string', 'format': 'uri' }, 'metadata': {'type': 'object'} }, 'required': ['name', 'href'] } list_queues = { 'status_code': [200, 204], 'response_body': { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': list_link, 'maxItems': 1 }, 'queues': { 'type': 'array', 'items': list_queue } }, 'required': ['links', 'queues'] } } age = { 'type': 'number', 'minimum': 0 } message_link = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri' }, 'age': age, 'created': { 'type': 'string', 'format': 'date-time' } }, 'required': ['href', 'age', 'created'] } messages = { 'type': 'object', 'properties': { 'free': {'type': 'number'}, 'claimed': {'type': 'number'}, 'total': {'type': 'number'}, 'oldest': message_link, 'newest': message_link }, 'required': ['free', 'claimed', 'total'] } queue_stats = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'messages': messages }, 'required': ['messages'] } } resource_schema = { 'type': 'array', 'items': { 'type': 'string' }, 'minItems': 1 } post_messages = { 'status_code': [201], 'response_body': { 'type': 'object', 'properties': { 'resources': resource_schema, 'partial': {'type': 'boolean'} } }, 'required': ['resources', 'partial'] } message_ttl = { 'type': 'number', 'minimum': 1 } list_messages_links = { 'type': 'array', 'maxItems': 1, 'minItems': 0, 'items': { 'type': 'object', 'properties': { 'rel': {'type': 'string'}, 'href': {'type': 'string'} }, 'required': ['rel', 'href'] } } list_messages_response = { 'type': 'array', 'minItems': 0, 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'ttl': message_ttl, 'age': age, 'body': {'type': 'object'}, 'checksum': {'type': 'string'}, }, 'required': ['href', 'ttl', 'age', 'body'] } } list_messages = { 'status_code': [200, 204], 'response_body': { 'type': 'object', 'properties': { 'links': list_messages_links, 'messages': list_messages_response } }, 'required': ['links', 'messages'] } single_message = { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'ttl': message_ttl, 'age': age, 'body': {'type': 'object'}, 'id': {'type': 'string'}, 'checksum': {'type': 'string'}, }, 'required': ['href', 'ttl', 'age', 'body', 'id'] } get_single_message = { 'status_code': [200], 'response_body': single_message } get_multiple_messages = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": single_message, "minItems": 1, } } } } messages_claimed = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri' }, 'ttl': message_ttl, 'age': {'type': 'number'}, 'body': {'type': 'object'}, 'id': {'type': 'string'}, 'checksum': {'type': 'string'}, }, 'required': ['href', 'ttl', 'age', 'body', 'id'] } claim_messages = { 'status_code': [201, 204], 'response_body': { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": single_message, "minItems": 1, } } } } claim_ttl = { 'type': 'number', 'minimum': 1 } query_claim = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'age': {'type': 'number'}, 'ttl': claim_ttl, 'messages': { 'type': 'array', 'minItems': 1 } }, 'required': ['ttl', 'age', 'messages'] } } create_subscription = { 'status_code': [201], 'response_body': { 'type': 'object', 'properties': { 'subscription_id': {'type': 'string'}, }, 'required': ['subscription_id'] } } single_subscription = { 'type': 'object', 'properties': { 'subscriber': {'type': 'string'}, 'source': {'type': 'string'}, 'options': {'type': 'object'}, 'id': {'type': 'string'}, 'ttl': message_ttl, }, 'required': ['subscriber', 'source', 'options', 'id', 'ttl'] } show_single_subscription = { 'status_code': [200], 'response_body': single_subscription } list_subscriptions = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'subscriptions': { "type": "array", "items": single_subscription, }, 'links': { 'type': 'array', 'items': list_link, 'maxItems': 1 }, }, 'required': ['subscriptions', 'links'] } }
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/zaqar_tempest_plugin/api_schema/response/v2/queues.py
queues.py
list_link = { 'type': 'object', 'properties': { 'rel': {'type': 'string'}, 'href': { 'type': 'string', 'format': 'uri' } }, 'required': ['href', 'rel'] } list_queue = { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'href': { 'type': 'string', 'format': 'uri' }, 'metadata': {'type': 'object'} }, 'required': ['name', 'href'] } list_queues = { 'status_code': [200, 204], 'response_body': { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': list_link, 'maxItems': 1 }, 'queues': { 'type': 'array', 'items': list_queue } }, 'required': ['links', 'queues'] } } age = { 'type': 'number', 'minimum': 0 } message_link = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri' }, 'age': age, 'created': { 'type': 'string', 'format': 'date-time' } }, 'required': ['href', 'age', 'created'] } messages = { 'type': 'object', 'properties': { 'free': {'type': 'number'}, 'claimed': {'type': 'number'}, 'total': {'type': 'number'}, 'oldest': message_link, 'newest': message_link }, 'required': ['free', 'claimed', 'total'] } queue_stats = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'messages': messages }, 'required': ['messages'] } } resource_schema = { 'type': 'array', 'items': { 'type': 'string' }, 'minItems': 1 } post_messages = { 'status_code': [201], 'response_body': { 'type': 'object', 'properties': { 'resources': resource_schema, 'partial': {'type': 'boolean'} } }, 'required': ['resources', 'partial'] } message_ttl = { 'type': 'number', 'minimum': 1 } list_messages_links = { 'type': 'array', 'maxItems': 1, 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'rel': {'type': 'string'}, 'href': {'type': 'string'} }, 'required': ['rel', 'href'] } } list_messages_response = { 'type': 'array', 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'ttl': message_ttl, 'age': age, 'body': {'type': 'object'}, 'checksum': {'type': 'string'}, }, 'required': ['href', 'ttl', 'age', 'body'] } } list_messages = { 'status_code': [200, 204], 'response_body': { 'type': 'object', 'properties': { 'links': list_messages_links, 'messages': list_messages_response } }, 'required': ['links', 'messages'] } single_message = { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'ttl': message_ttl, 'age': age, 'body': {'type': 'object'}, 'id': {'type': 'string'}, 'checksum': {'type': 'string'}, }, 'required': ['href', 'ttl', 'age', 'body', 'id'] } get_single_message = { 'status_code': [200], 'response_body': single_message } get_multiple_messages = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": single_message, "minItems": 1, } } } } messages_claimed = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri' }, 'ttl': message_ttl, 'age': {'type': 'number'}, 'body': {'type': 'object'}, 'id': {'type': 'string'}, 'checksum': {'type': 'string'}, }, 'required': ['href', 'ttl', 'age', 'body', 'id'] } claim_messages = { 'status_code': [201, 204], 'response_body': { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": single_message, "minItems": 1, } } } } claim_ttl = { 'type': 'number', 'minimum': 1 } query_claim = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'age': {'type': 'number'}, 'ttl': claim_ttl, 'messages': { 'type': 'array', 'minItems': 1 } }, 'required': ['ttl', 'age', 'messages'] } }
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/zaqar_tempest_plugin/api_schema/response/v1_1/queues.py
queues.py
list_link = { 'type': 'object', 'properties': { 'rel': {'type': 'string'}, 'href': { 'type': 'string', 'format': 'uri' } }, 'required': ['href', 'rel'] } list_queue = { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'href': { 'type': 'string', 'format': 'uri' }, 'metadata': {'type': 'object'} }, 'required': ['name', 'href'] } list_queues = { 'status_code': [200, 204], 'response_body': { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': list_link, 'maxItems': 1 }, 'queues': { 'type': 'array', 'items': list_queue } }, 'required': ['links', 'queues'] } } age = { 'type': 'number', 'minimum': 0 } message_link = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri' }, 'age': age, 'created': { 'type': 'string', 'format': 'date-time' } }, 'required': ['href', 'age', 'created'] } messages = { 'type': 'object', 'properties': { 'free': {'type': 'number'}, 'claimed': {'type': 'number'}, 'total': {'type': 'number'}, 'oldest': message_link, 'newest': message_link }, 'required': ['free', 'claimed', 'total'] } queue_stats = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'messages': messages }, 'required': ['messages'] } } resource_schema = { 'type': 'array', 'items': { 'type': 'string' }, 'minItems': 1 } post_messages = { 'status_code': [201], 'response_body': { 'type': 'object', 'properties': { 'resources': resource_schema, 'partial': {'type': 'boolean'} } }, 'required': ['resources', 'partial'] } message_ttl = { 'type': 'number', 'minimum': 1 } list_messages_links = { 'type': 'array', 'maxItems': 1, 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'rel': {'type': 'string'}, 'href': {'type': 'string'} }, 'required': ['rel', 'href'] } } list_messages_response = { 'type': 'array', 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'ttl': message_ttl, 'age': age, 'body': {'type': 'object'} }, 'required': ['href', 'ttl', 'age', 'body'] } } list_messages = { 'status_code': [200, 204], 'response_body': { 'type': 'object', 'properties': { 'links': list_messages_links, 'messages': list_messages_response } }, 'required': ['links', 'messages'] } single_message = { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'ttl': message_ttl, 'age': age, 'body': {'type': 'object'} }, 'required': ['href', 'ttl', 'age', 'body'] } get_single_message = { 'status_code': [200], 'response_body': single_message } get_multiple_messages = { 'status_code': [200], 'response_body': { 'type': 'array', 'items': single_message, 'minItems': 1 } } messages_claimed = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri' }, 'ttl': message_ttl, 'age': {'type': 'number'}, 'body': {'type': 'object'} }, 'required': ['href', 'ttl', 'age', 'body'] } claim_messages = { 'status_code': [201, 204], 'response_body': { 'type': 'array', 'items': messages_claimed, 'minItems': 1 } } claim_ttl = { 'type': 'number', 'minimum': 1 } query_claim = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'age': {'type': 'number'}, 'ttl': claim_ttl, 'messages': { 'type': 'array', 'minItems': 1 } }, 'required': ['ttl', 'age', 'messages'] } }
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/zaqar_tempest_plugin/api_schema/response/v1/queues.py
queues.py
.. openstack documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. ==================================================== Welcome to the documentation of zaqar_tempest_plugin ==================================================== Contents: .. toctree:: :maxdepth: 2 readme installation contributing Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
zaqar-tempest-plugin
/zaqar_tempest_plugin-1.6.0.tar.gz/zaqar_tempest_plugin-1.6.0/doc/source/index.rst
index.rst
'use strict'; var fs = require('fs'); var path = require('path'); var child_process = require("child_process"); module.exports = function (config) { // This tox venv is setup in the post-install npm step var pythonVersion = "python3."; var stdout = child_process.execFileSync("python3", ["--version"]); pythonVersion += stdout.toString().split(".")[1]; var toxPath = '../.tox/karma/lib/' + pythonVersion + '/site-packages/'; console.log("Karma will check on directory: ", toxPath); config.set({ preprocessors: { // Used to collect templates for preprocessing. // NOTE: the templates must also be listed in the files section below. './static/**/*.html': ['ng-html2js'], // Used to indicate files requiring coverage reports. './static/**/!(*.spec).js': ['coverage'], }, // Sets up module to process templates. ngHtml2JsPreprocessor: { prependPrefix: '/', moduleName: 'templates' }, basePath: './', // Contains both source and test files. files: [ /* * shim, partly stolen from /i18n/js/horizon/ * Contains expected items not provided elsewhere (dynamically by * Django or via jasmine template. */ '../test-shim.js', // from jasmine.html toxPath + 'xstatic/pkg/jquery/data/jquery.js', toxPath + 'xstatic/pkg/angular/data/angular.js', toxPath + 'xstatic/pkg/angular/data/angular-route.js', toxPath + 'xstatic/pkg/angular/data/angular-mocks.js', toxPath + 'xstatic/pkg/angular/data/angular-cookies.js', toxPath + 'xstatic/pkg/angular_bootstrap/data/angular-bootstrap.js', toxPath + 'xstatic/pkg/angular_gettext/data/angular-gettext.js', toxPath + 'xstatic/pkg/angular/data/angular-sanitize.js', toxPath + 'xstatic/pkg/d3/data/d3.js', toxPath + 'xstatic/pkg/rickshaw/data/rickshaw.js', toxPath + 'xstatic/pkg/angular_smart_table/data/smart-table.js', toxPath + 'xstatic/pkg/angular_lrdragndrop/data/lrdragndrop.js', toxPath + 'xstatic/pkg/spin/data/spin.js', toxPath + 'xstatic/pkg/spin/data/spin.jquery.js', toxPath + 'xstatic/pkg/tv4/data/tv4.js', toxPath + 'xstatic/pkg/objectpath/data/ObjectPath.js', toxPath + 'xstatic/pkg/angular_schema_form/data/schema-form.js', toxPath + 'xstatic/pkg/angular_fileupload/data/ng-file-upload.js', // TODO: These should be mocked. toxPath + '/horizon/static/horizon/js/horizon.js', /** * Include framework source code from horizon that we need. * Otherwise, karma will not be able to find them when testing. * These files should be mocked in the foreseeable future. */ toxPath + 'horizon/static/framework/**/*.module.js', toxPath + 'horizon/static/framework/**/!(*.spec|*.mock).js', toxPath + 'openstack_dashboard/static/**/*.module.js', toxPath + 'openstack_dashboard/static/**/!(*.spec|*.mock).js', toxPath + 'openstack_dashboard/dashboards/**/static/**/*.module.js', toxPath + 'openstack_dashboard/dashboards/**/static/**/!(*.spec|*.mock).js', /** * First, list all the files that defines application's angular modules. * Those files have extension of `.module.js`. The order among them is * not significant. */ './static/**/*.module.js', /** * Followed by other JavaScript files that defines angular providers * on the modules defined in files listed above. And they are not mock * files or spec files defined below. The order among them is not * significant. */ './static/**/!(*.spec|*.mock).js', /** * Then, list files for mocks with `mock.js` extension. The order * among them should not be significant. */ toxPath + 'openstack_dashboard/static/**/*.mock.js', /** * Finally, list files for spec with `spec.js` extension. The order * among them should not be significant. */ './static/**/*.spec.js', /** * Angular external templates */ './static/**/*.html' ], autoWatch: true, frameworks: ['jasmine'], browsers: ['Firefox'], browserNoActivityTimeout: 60000, reporters: ['progress', 'coverage', 'threshold'], plugins: [ 'karma-firefox-launcher', 'karma-jasmine', 'karma-ng-html2js-preprocessor', 'karma-coverage', 'karma-threshold-reporter' ], // Places coverage report in HTML format in the subdirectory below. coverageReporter: { type: 'html', dir: '../cover/karma/' }, // Coverage threshold values. thresholdReporter: { statements: 10, // target 100 branches: 0, // target 100 functions: 10, // target 100 lines: 10 // target 100 } }); };
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/karma.conf.js
karma.conf.js
import logging from zaqarclient.queues import client as zaqar_client from horizon import exceptions from horizon.utils.memoized import memoized from openstack_dashboard.api import base LOG = logging.getLogger(__name__) RESERVED_QUEUE_METADATA = ["_max_messages_post_size", "_default_message_ttl"] @memoized def zaqarclient(request): zaqar_url = "" service_type = 'messaging' try: zaqar_url = base.url_for(request, service_type) except exceptions.ServiceCatalogException: LOG.debug('No messaging service is configured.') return None LOG.debug('zaqarclient connection created using the token "%s" and url' '"%s"' % (request.user.token.id, zaqar_url)) opts = {'os_auth_token': request.user.token.id, 'os_auth_url': base.url_for(request, 'identity'), 'os_project_id': request.user.tenant_id, 'os_service_type': service_type} auth_opts = {'backend': 'keystone', 'options': opts} conf = {'auth_opts': auth_opts} return zaqar_client.Client(url=zaqar_url, version=2, conf=conf) def queue_list(request, limit=None, marker=None): return zaqarclient(request).queues(limit=limit, marker=marker) def queue_create(request, queue_name, metadata): # Pop up a modal form, which contains several inputbox: # 1. queue_name # 2. ttl # 3. max message size # 4. Metadata queue = zaqarclient(request).queue(queue_name, force_create=True) queue.metadata(new_meta=metadata) return queue def queue_delete(request, queue_name): queue = zaqarclient(request).queue(queue_name, auto_create=False) queue.delete() def queue_update(request, queue_name, metadata): # Popup a modal form, the queue name is a realonly label or inputbox. # user can change ttl, max message size and metadata queue = zaqarclient(request).queue(queue_name, auto_create=False) for key in RESERVED_QUEUE_METADATA: if (key in metadata and isinstance(metadata[key], str)): metadata[key] = int(metadata[key]) queue.metadata(new_meta=metadata) return queue def queue_get(request, queue_name): return zaqarclient(request).queue(queue_name, auto_create=False) def queue_purge(request, queue_name, resource_types): queue = zaqarclient(request).queue(queue_name, auto_create=False) queue.purge(resource_types=resource_types) def message_post(request, queue_name, messages_data): return zaqarclient(request).queue(queue_name).post(messages_data) def message_list(request, queue_name): return zaqarclient(request).queue(queue_name).messages() def queue_signed_url(request, queue_name, paths, ttl_seconds, methods): queue = zaqarclient(request).queue(queue_name, auto_create=False) return queue.signed_url(paths=paths, ttl_seconds=ttl_seconds, methods=methods) def subscription_list(request, queue_name): return [{'subscriber': s.subscriber, 'id': s.id, 'ttl': s.ttl, 'age': s.age, 'confirmed': s.confirmed, 'options': s.options} for s in zaqarclient(request).subscriptions(queue_name)] def subscription_create(request, queue_name, sub_data): subscription = zaqarclient(request).subscription(queue_name, **sub_data) return {'subscriber': subscription.subscriber, 'id': subscription.id, 'ttl': subscription.ttl, 'age': subscription.age, 'confirmed': subscription.confirmed, 'options': subscription.options} def subscription_delete(request, queue_name, sub_data): subscription = zaqarclient(request).subscription(queue_name, **sub_data) subscription.delete() def subscription_update(request, queue_name, old_data, new_data): subscription = zaqarclient(request).subscription(queue_name, **old_data) subscription.update(new_data) return subscription def pool_list(request, limit=None, marker=None): return zaqarclient(request).pools(limit=limit, marker=marker, detailed=True) def pool_create(request, pool_name, params): pool = zaqarclient(request).pool(pool_name, **params) return pool def pool_delete(request, pool_name): pool = zaqarclient(request).pool(pool_name, auto_create=False) pool.delete() def pool_update(request, pool_name, params): pool = zaqarclient(request).pool(pool_name, auto_create=False) pool.update(params) return pool def pool_get(request, pool_name): return zaqarclient(request).pool(pool_name, auto_create=False).get() def flavor_list(request, limit=None, marker=None): return zaqarclient(request).flavors(limit=limit, marker=marker, detailed=True) def flavor_create(request, flavor_name, params): flavor = zaqarclient(request).flavor(flavor_name, **params) return flavor def flavor_delete(request, flavor_name): flavor = zaqarclient(request).flavor(flavor_name, auto_create=False) flavor.delete() def flavor_update(request, flavor_name, params): flavor = zaqarclient(request).flavor(flavor_name, auto_create=False) flavor.update(params) return flavor def flavor_get(request, flavor_name): return zaqarclient(request).flavor(flavor_name, auto_create=False).get()
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/api/zaqar.py
zaqar.py
import json import yaml from django.utils.translation import gettext_lazy as _ from django.views import generic from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils as rest_utils from zaqar_ui.api import zaqar def _convert_to_yaml(data, default_flow_style=False): if not data: return '' try: return yaml.safe_dump(data, default_flow_style=default_flow_style) except Exception: return '' def _load_yaml(data): if not data: loaded_data = {} else: try: loaded_data = yaml.safe_load(data) except Exception as ex: raise Exception(_('The specified input is not a valid ' 'YAML format: %s') % str(ex)) return loaded_data @urls.register class Queue(generic.View): """API for retrieving a single queue""" url_regex = r'zaqar/queues/(?P<queue_name>[^/]+)$' @rest_utils.ajax() def get(self, request, queue_name): """Get a specific queue""" queue = zaqar.queue_get(request, queue_name) stats = queue.stats['messages'] queue_info = {'name': queue_name, 'claimed': stats['claimed'], 'free': stats['free'], 'total': stats['total'], 'metadata': queue.metadata()} return queue_info @rest_utils.ajax(data_required=True) def post(self, request, queue_name): """Update a queue. Returns the updated queue object on success. """ queue = zaqar.queue_update(request, queue_name, **request.DATA) location = '/api/zaqars/queue/%s' % queue._name response = {'name': queue._name, 'metadata': queue._metadata} return rest_utils.CreatedResponse(location, response) @urls.register class QueueActions(generic.View): """API for actions on a single queue""" url_regex = r'zaqar/queues/(?P<queue_name>[^/]+)/(?P<action>[^/]+)$' @rest_utils.ajax(data_required=True) def post(self, request, queue_name, action): """Actions for a queue""" if action == "purge": resource_types = request.DATA.get("resource_types") zaqar.queue_purge(request, queue_name, resource_types) elif action == "share": paths = request.DATA.get("paths") ttl_seconds = request.DATA.get("ttl_seconds") methods = request.DATA.get("methods") return zaqar.queue_signed_url(request, queue_name, paths, ttl_seconds, methods) @urls.register class Queues(generic.View): """API for queues""" url_regex = r'zaqar/queues/$' @rest_utils.ajax() def get(self, request): """Get a list of the Queues for a project. The returned result is an object with property 'items' and each item under this is a queue. """ result = zaqar.queue_list(request) queues = [] for q in result: stats = q.stats['messages'] queues.append({'name': q.name, 'claimed': stats['claimed'], 'free': stats['free'], 'total': stats['total'], 'metadata': q.metadata()}) return queues @rest_utils.ajax(data_required=True) def delete(self, request): """Delete one or more queue by name. Returns HTTP 204 (no content) on successful deletion. """ for queue_name in request.DATA: zaqar.queue_delete(request, queue_name) @rest_utils.ajax(data_required=True) def put(self, request): """Create a new queue. Returns the new queue object on success. """ new_queue = zaqar.queue_create(request, **request.DATA) location = '/api/zaqar/queues/%s' % new_queue.name response = {'name': new_queue.name, 'claimed': 0, 'free': 0, 'total': 0, 'metadata': new_queue._metadata} return rest_utils.CreatedResponse(location, response) @urls.register class Subscriptions(generic.View): """API for Subscriptions""" url_regex = r'zaqar/queues/(?P<queue_name>[^/]+)/subscriptions/$' @rest_utils.ajax() def get(self, request, queue_name): """Get a list of the Subscriptions for a queue.""" return zaqar.subscription_list(request, queue_name) @rest_utils.ajax(data_required=True) def delete(self, request, queue_name): """Delete one or more queue by name. Returns HTTP 204 (no content) on successful deletion. """ zaqar.subscription_delete(request, queue_name, request.DATA) @rest_utils.ajax(data_required=True) def put(self, request, queue_name): """Create a new subscription. Returns the new queue object on success. """ return zaqar.subscription_create(request, queue_name, request.DATA) @urls.register class Messages(generic.View): """API for messages""" url_regex = r'zaqar/queues/(?P<queue_name>[^/]+)/messages/$' @rest_utils.ajax() def get(self, request, queue_name): """Get a list of messages""" result = zaqar.message_list(request, queue_name) messages = [] for m in result: claim_id = None if m.claim_id: claim_id = m.claim_id() messages.append({'age': m.age, 'body': m.body, 'claim_id': claim_id, 'id': m.id, 'href': m.href, 'ttl': m.ttl}) return messages @rest_utils.ajax(data_required=True) def post(self, request, queue_name): """Create new messages""" messages = json.loads(request.DATA.get("messages")) return zaqar.message_post(request, queue_name, messages) @urls.register class Subscription(generic.View): """API for retrieving a single subscription""" url_regex = r'zaqar/queues/(?P<queue_name>[^/]+)/' \ r'subscription/(?P<subscriber>[^/]+)/$' @rest_utils.ajax(data_required=True) def post(self, request, queue_name, subscriber): zaqar.subscription_update(request, queue_name, {'id': subscriber}, request.DATA) @urls.register class Pool(generic.View): """API for retrieving a single pool""" url_regex = r'zaqar/pools/(?P<pool_name>[^/]+)$' @rest_utils.ajax() def get(self, request, pool_name): """Get a specific pool""" pool = zaqar.pool_get(request, pool_name) pool['id'] = pool.get('name') pool['options'] = _convert_to_yaml(pool.get('options')) return pool @rest_utils.ajax(data_required=True) def post(self, request, pool_name): """Update a pool. Returns the updated pool object on success. """ request.DATA["options"] = _load_yaml(request.DATA.get("options")) params = request.DATA pool_name = params.pop('name') new_pool = zaqar.pool_update(request, pool_name, params) location = '/api/zaqar/pools/%s' % new_pool.name response = {'name': new_pool.name, 'uri': new_pool.uri, 'weight': new_pool.weight, 'group': new_pool.group, 'options': new_pool.options} return rest_utils.CreatedResponse(location, response) @urls.register class Pools(generic.View): """API for pools""" url_regex = r'zaqar/pools/$' @rest_utils.ajax() def get(self, request): """Get a list of the Pools for admin. The returned result is an object with property 'items' and each item under this is a pool. """ result = zaqar.pool_list(request) pools = [] for p in result: options = _convert_to_yaml(p.options) pools.append({'id': p.name, 'name': p.name, 'uri': p.uri, 'weight': p.weight, 'group': p.group, 'options': options}) return {'items': pools} @rest_utils.ajax(data_required=True) def delete(self, request): """Delete one or more pool by name. Returns HTTP 204 (no content) on successful deletion. """ for pool_name in request.DATA: zaqar.pool_delete(request, pool_name) @rest_utils.ajax(data_required=True) def put(self, request): """Create a new pool. Returns the new pool object on success. """ request.DATA['options'] = _load_yaml(request.DATA.get('options')) params = request.DATA pool_name = params.pop('name') new_pool = zaqar.pool_create(request, pool_name, params) location = '/api/zaqar/pools/%s' % new_pool.name response = {'name': new_pool.name, 'uri': new_pool.uri, 'weight': new_pool.weight, 'group': new_pool.group, 'options': new_pool.options} return rest_utils.CreatedResponse(location, response) @urls.register class Flavor(generic.View): """API for retrieving a single flavor""" url_regex = r'zaqar/flavors/(?P<flavor_name>[^/]+)$' @rest_utils.ajax() def get(self, request, flavor_name): """Get a specific flavor""" flavor = zaqar.flavor_get(request, flavor_name) flavor['id'] = flavor.get('name') flavor['capabilities'] = _convert_to_yaml(flavor.get('capabilities')) return flavor @rest_utils.ajax(data_required=True) def post(self, request, flavor_name): """Update a flavor. Returns the updated flavor object on success. """ capabilities = request.DATA.get('capabilities') request.DATA['capabilities'] = _load_yaml(capabilities) params = request.DATA flavor_name = params.pop('name') new_flavor = zaqar.flavor_update(request, flavor_name, params) location = '/api/zaqar/flavors/%s' % new_flavor.name response = {'name': new_flavor.name, 'pool_group': new_flavor.pool_group, 'capabilities': new_flavor.capabilities} return rest_utils.CreatedResponse(location, response) @urls.register class Flavors(generic.View): """API for flavors""" url_regex = r'zaqar/flavors/$' @rest_utils.ajax() def get(self, request): """Get a list of the Flavors for admin. The returned result is an object with property 'items' and each item under this is a flavor. """ result = zaqar.flavor_list(request) flavors = [] for f in result: capabilities = _convert_to_yaml(f.capabilities) flavors.append({'id': f.name, 'name': f.name, 'pool_group': f.pool_group, 'capabilities': capabilities}) return {'items': flavors} @rest_utils.ajax(data_required=True) def delete(self, request): """Delete one or more flavor by name. Returns HTTP 204 (no content) on successful deletion. """ for flavor_name in request.DATA: zaqar.flavor_delete(request, flavor_name) @rest_utils.ajax(data_required=True) def put(self, request): """Create a new flavor. Returns the new flavor object on success. """ capabilities = request.DATA.get('capabilities') request.DATA['capabilities'] = _load_yaml(capabilities) params = request.DATA flavor_name = params.pop('name') new_flavor = zaqar.flavor_create(request, flavor_name, params) location = '/api/zaqar/flavors/%s' % new_flavor.name response = {'name': new_flavor.name, 'pool_group': new_flavor.pool_group, 'capabilities': new_flavor.capabilities} return rest_utils.CreatedResponse(location, response)
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/api/rest/zaqar.py
zaqar.py
(function () { 'use strict'; angular .module('horizon.app.core.openstack-service-api') .factory('horizon.app.core.openstack-service-api.zaqar', ZaqarAPI); ZaqarAPI.$inject = [ 'horizon.framework.util.http.service', 'horizon.framework.widgets.toast.service' ]; function ZaqarAPI(apiService, toast) { var queuePath = '/api/zaqar/queues/'; var msgPath = '/api/zaqar/queues/%s/messages/'; var subPath = '/api/zaqar/queues/%s/subscriptions/'; var poolPath = '/api/zaqar/pools/'; var flavorPath = '/api/zaqar/flavors/'; var service = { getQueues: getQueues, getQueue: getQueue, createQueue: createQueue, deleteQueue: deleteQueue, updateQueue: updateQueue, purgeQueue: purgeQueue, postMessages: postMessages, getMessages: getMessages, signedUrl: signedUrl, getSubscriptions: getSubscriptions, addSubscription: addSubscription, deleteSubscription: deleteSubscription, getPools: getPools, getPool: getPool, createPool: createPool, deletePool: deletePool, updatePool: updatePool, getFlavors: getFlavors, getFlavor: getFlavor, createFlavor: createFlavor, deleteFlavor: deleteFlavor, updateFlavor: updateFlavor }; return service; ////////// function getQueues() { var msg = gettext('Unable to retrieve the Queues.'); return apiService.get(queuePath).error(error(msg)); } function getQueue(queueName) { var msg = gettext('Unable to retrieve the Queue.'); return apiService.get(queuePath + queueName).error(error(msg)); } function createQueue(newQueue) { var msg = gettext('Unable to create the queue.'); return apiService.put(queuePath, newQueue).error(error(msg)); } function deleteQueue(queueName) { return apiService.delete(queuePath, [queueName]); } function updateQueue(queue) { var msg = gettext('Unable to update the queue.'); var url = queuePath + queue.queue_name; var form = { metadata: queue.metadata }; return apiService.post(url, form).error(error(msg)); } function purgeQueue(queueName, resourceTypes) { var msg = gettext('Unable to purge the queue.'); var url = queuePath + queueName + '/purge'; var form = resourceTypes; return apiService.post(url, form).error(error(msg)); } function getMessages(queueName) { var msg = gettext('Unable to get messages.'); var url = interpolate(msgPath, [queueName]); return apiService.get(url).error(error(msg)); } function postMessages(queueName, msgs) { var msg = gettext('Unable to post messages.'); var url = interpolate(msgPath, [queueName]); return apiService.post(url, msgs).error(error(msg)); } function signedUrl(queueName, form) { var msg = gettext('Unable to create signed URL.'); var url = queuePath + queueName + '/share'; return apiService.post(url, form).error(error(msg)); } function getSubscriptions(queue) { var url = interpolate(subPath, [queue.name]); return apiService.get(url); } function addSubscription(sub) { var msg = gettext('Unable to add subscription.'); var url = interpolate(subPath, [sub.queueName]); return apiService.put(url, sub).error(error(msg)); } function deleteSubscription(queueName, subscription) { var msg = gettext('Unable to delete subscription.'); var url = interpolate(subPath, [queueName]); return apiService.delete(url, subscription).error(error(msg)); } function getPools() { var msg = gettext('Unable to retrieve the pools.'); return apiService.get(poolPath).error(error(msg)); } function getPool(poolName) { var msg = gettext('Unable to retrieve the pool.'); var url = poolPath + poolName; return apiService.get(url).error(error(msg)); } function createPool(newPool) { var msg = gettext('Unable to create the pool.'); return apiService.put(poolPath, newPool).error(error(msg)); } function deletePool(poolName) { return apiService.delete(poolPath, [poolName]); } function updatePool(pool) { var msg = gettext('Unable to update the pool.'); var url = poolPath + pool.name; return apiService.post(url, pool).error(error(msg)); } function getFlavors() { var msg = gettext('Unable to retrieve the flavors.'); return apiService.get(flavorPath).error(error(msg)); } function getFlavor(flavorName) { var msg = gettext('Unable to retrieve the flavor.'); var url = flavorPath + flavorName; return apiService.get(url).error(error(msg)); } function createFlavor(newFlavor) { var msg = gettext('Unable to create the flavor.'); return apiService.put(flavorPath, newFlavor).error(error(msg)); } function deleteFlavor(flavorName) { return apiService.delete(flavorPath, [flavorName]); } function updateFlavor(flavor) { var msg = gettext('Unable to update the flavor.'); var url = flavorPath + flavor.name; return apiService.post(url, flavor).error(error(msg)); } function error(message) { return function() { toast.add('error', message); }; } } }());
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/app/core/openstack-service-api/zaqar.service.js
zaqar.service.js
(function() { 'use strict'; /** * @ngdoc overview * @name queueController * @ngController * * @description * Controller for the queues table */ angular .module('horizon.dashboard.project.queues') .controller('horizon.dashboard.project.queues.table.queueController', queueController); queueController.$inject = [ '$scope', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.basePath', 'horizon.dashboard.project.queues.events', 'horizon.dashboard.project.queues.resourceType', 'horizon.framework.conf.resource-type-registry.service' ]; function queueController($scope, zaqar, base, events, type, registry) { var ctrl = this; ctrl.queues = []; ctrl.queuesSrc = []; ctrl.resourceType = registry.getResourceType(type); ctrl.subsTemplate = base + 'table/subscription.html'; init(); initScope(); ////////// function initScope() { var createWatcher = $scope.$on(events.CREATE_SUCCESS, onCreateSuccess); var deleteWatcher = $scope.$on(events.DELETE_SUCCESS, onDeleteSuccess); var updateWatcher = $scope.$on(events.UPDATE_SUCCESS, onUpdateSuccess); var purgeWatcher = $scope.$on(events.PURGE_SUCCESS, onPurgeSuccess); var postMessageWatcher = $scope.$on(events.POST_MESSAGE_SUCCESS, onPostMessageSuccess); var subWatcher = $scope.$on(events.SUBSCRIPTION_CREATE_SUCCESS, broadcastEvents); $scope.$on('$destroy', function destroy() { createWatcher(); deleteWatcher(); updateWatcher(); purgeWatcher(); postMessageWatcher(); subWatcher(); }); } ////////// function init() { ctrl.resourceType.initActions($scope); zaqar.getQueues().then(showQueues); } function broadcastEvents(event, data) { if (event.targetScope !== $scope) { $scope.$broadcast(event.name, data); } } function showQueues(response) { // hz-table expects all items to have the id field // so we need to manually add name as id here ctrl.queuesSrc = response.data; ctrl.queuesSrc.map(function addIdentifier(queue) { queue.id = queue.name; }); } function refreshQueue(queueName) { zaqar.getQueue(queueName).then(function(response) { response.data.id = queueName; for (var i = 0; i < ctrl.queuesSrc.length; i++) { var queue = ctrl.queuesSrc[i]; if (queue.id === queueName) { ctrl.queuesSrc[i] = response.data; } } for (var i = 0; i < ctrl.queues.length; i++) { var queue = ctrl.queues[i]; if (queue.id === queueName) { ctrl.queues[i] = response.data; } } }); } function refreshSubscriptions(queueName) { var queue = new Object(); queue.name = queueName; zaqar.getSubscriptions(queue).then(function() { $scope.tCtrl.broadcastExpansion(queue); }); } function onCreateSuccess(e, newQueue) { e.stopPropagation(); newQueue.id = newQueue.name; ctrl.queuesSrc.push(newQueue); } function onDeleteSuccess(e, deletedNames) { // remove existing item from table e.stopPropagation(); for (var i = ctrl.queuesSrc.length - 1; i >= 0; i--) { var queue = ctrl.queuesSrc[i]; if (deletedNames.indexOf(queue.name) >= 0) { ctrl.queuesSrc.splice(i, 1); } } // clear selections upon deletion $scope.$emit('hzTable:clearSelected'); } function onUpdateSuccess(e, queue) { e.stopPropagation(); queue.id = queue.name; // update queue ctrl.queuesSrc[queue.id] = queue; } function onPurgeSuccess(e, queueName) { e.stopPropagation(); // purge queue refreshQueue(queueName); refreshSubscriptions(queueName); } function onPostMessageSuccess(e, queueName) { e.stopPropagation(); refreshQueue(queueName); } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/table/queue.controller.js
queue.controller.js
(function() { 'use strict'; /** * @ngdoc overview * @name subscriptionController * @ngController * * @description * Controller for the subscriptions table */ angular .module('horizon.dashboard.project.queues') .controller('horizon.dashboard.project.queues.table.subscriptionController', subscriptionController); subscriptionController.$inject = [ '$scope', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.events', 'horizon.framework.widgets.toast.service' ]; function subscriptionController($scope, zaqar, events, toast) { var ctrl = this; ctrl.queuesMap = {}; ctrl.deleteSubscription = deleteSubscription; init(); initScope(); ////////// function initScope() { var expandWatcher = $scope.$on('hzTable:rowExpanded', getSubscriptions); var createWatcher = $scope.$on(events.SUBSCRIPTION_CREATE_SUCCESS, addSubscription); $scope.$on('$destroy', function destroy() { expandWatcher(); createWatcher(); }); } function init() {} ////////// function checkAndInitMap(id) { if (!ctrl.queuesMap.hasOwnProperty(id)) { ctrl.queuesMap[id] = { subscriptions: [] }; } } function addSubscription(event, sub) { checkAndInitMap(sub.queueName); ctrl.queuesMap[sub.queueName].subscriptions.push(sub); } function deleteSubscription(queue, sub) { var msg = gettext('Removed %(subscriber)s subscriber from the %(queue)s queue.'); var context = { subscriber: sub.subscriber, queue: queue.name }; zaqar.deleteSubscription(queue.name, sub).success(deleteSuccess); function deleteSuccess() { toast.add('success', interpolate(msg, context, true)); var index = ctrl.queuesMap[queue.name].subscriptions.indexOf(sub); if (index >= 0) { ctrl.queuesMap[queue.name].subscriptions.splice(index, 1); } } } function getSubscriptions(event, queue) { zaqar.getSubscriptions(queue).success(function (response) { checkAndInitMap(queue.name); ctrl.queuesMap[queue.name].subscriptions = response; }); } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/table/subscription.controller.js
subscription.controller.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory('horizon.dashboard.project.queues.actions.createSubscriptionService', createSubscriptionService); createSubscriptionService.$inject = [ '$q', 'horizon.app.core.metadata.service', 'horizon.app.core.openstack-service-api.policy', 'horizon.dashboard.project.queues.events', 'horizon.dashboard.project.queues.actions.createSubscriptionWorkflow', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.framework.widgets.modal.wizard-modal.service', 'horizon.framework.widgets.toast.service' ]; /** * @ngDoc factory * @name horizon.dashboard.project.queues.actions.createSubscriptionService * @param {Object} $q * @param {Object} meta * @param {Object} policy * @param {Object} events * @param {Object} createSubWorkflow * @param {Object} zaqar * @param {Object} wizard * @param {Object} toast * @returns {Object} create subscription service * @description A service to open the subscriptions wizard. */ function createSubscriptionService( $q, meta, policy, events, createSubWorkflow, zaqar, wizard, toast) { var message = { success: gettext('Subscription %s was successfully created.') }; var scope; var model = null; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function perform(queue, $scope) { scope = $scope; model = { subscriber: null, ttl: null, options: {} }; model.queueName = queue.name; wizard.modal({ workflow: createSubWorkflow, submit: submit }); } function allowed() { return policy.ifAllowed({ rules: [['queue', 'add_subscriptions']] }); } function submit(stepModels) { angular.extend(model, stepModels.subscriptionForm); return zaqar.addSubscription(model).then(success, error); } function success(response) { angular.extend(model, response.data); toast.add('success', interpolate(message.success, [model.subscriber])); scope.$emit(events.SUBSCRIPTION_CREATE_SUCCESS, model); } function error() { // TODO: Currently, when server throws an error // close the modal dialog and display the error message // In the future, display the error message inside the dialog // and allow user to continue with workflow return; } } // end of createSubscriptionService })(); // end of IIFE
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/create-subscription.service.js
create-subscription.service.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory( 'horizon.dashboard.project.queues.actions.postMessageService', postMessageService); postMessageService.$inject = [ '$q', 'horizon.dashboard.project.queues.basePath', 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.events', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.form.ModalFormService', 'horizon.framework.widgets.toast.service' ]; /** * @ngdoc factory * @name horizon.dashboard.project.queues.actions.postMessageService * @param {Object} $q * @param {String} basePath * @param {Object} policy * @param {Object} zaqar * @param {Object} events * @param {Object} gettext * @param {Object} $qExtensions * @param {Object} modal * @param {Object} toast * @returns {Object} post messages service * @description Brings up the post messages modal dialog. * On submit, post messages to given queues. * On cancel, do nothing. */ function postMessageService( $q, basePath, policy, zaqar, events, gettext, $qExtensions, modal, toast ) { // schema var schema = { type: "object", properties: { postMessages: { title: gettext("Post Messages"), type: "string" } } }; // form var form = [ { type: 'section', htmlClass: 'row', items: [ { type: 'section', htmlClass: 'col-sm-6', items: [ { key: 'messages', type: 'textarea' } ] }, { type: 'template', templateUrl: basePath + 'actions/post-message.help.html' } ] } ]; // model var model = {}; var message = { success: gettext('Messages has been posted to queue %s successfully.') }; var service = { initAction: initAction, perform: perform, allowed: allowed }; var scope; return service; ////////////// function initAction() { } function allowed() { return $qExtensions.booleanAsPromise(true); } function perform(selected, $scope) { scope = $scope; model = { id: selected.id, name: selected.name }; // modal config var config = { "title": gettext('List Messages'), "submitText": gettext('Post'), "schema": schema, "form": form, "model": model }; return modal.open(config).then(submit); } function submit(context) { var id = context.model.id; var name = context.model.name; delete context.model.id; delete context.model.name; return zaqar.postMessages(id, context.model).then(function() { toast.add('success', interpolate(message.success, [name])); scope.$emit(events.POST_MESSAGE_SUCCESS, name); }); } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/post-message.service.js
post-message.service.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory('horizon.dashboard.project.queues.actions.updateQueueService', updateQueueService); updateQueueService.$inject = [ 'horizon.app.core.metadata.service', 'horizon.app.core.openstack-service-api.policy', 'horizon.dashboard.project.queues.events', 'horizon.dashboard.project.queues.actions.updateQueueWorkflow', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.framework.widgets.modal.wizard-modal.service', 'horizon.framework.widgets.toast.service' ]; /** * @ngDoc factory * @name horizon.dashboard.project.queues.actions.updateQueueService * @param {Object} meta * @param {Object} policy * @param {Object} events * @param {Object} updateQueueWorkflow * @param {Object} zaqar * @param {Object} wizard * @param {Object} toast * @returns {Object} update queue service * @description A service to open the queues wizard. */ function updateQueueService(meta, policy, events, updateQueueWorkflow, zaqar, wizard, toast) { var message = { success: gettext('Queue %s was successfully updated.') }; var scope; var model = { queue_name: null, metadata: {} }; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function perform(queue, $scope) { scope = $scope; model = queue; model.queue_name = queue.name; wizard.modal({ data: {queue: model}, workflow: updateQueueWorkflow, submit: submit }); } function allowed() { return policy.ifAllowed({ rules: [['queue', 'update_queue']] }); } function submit(stepModels) { model = stepModels.queueDetailsForm; model.metadata = stepModels.queueMetadataForm; return zaqar.updateQueue(model).then(success); } function success(response) { toast.add('success', interpolate(message.success, [response.data.name])); scope.$emit(events.UPDATE_SUCCESS, response.data); } } // end of updateQueueService })(); // end of IIFE
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/update-queue.service.js
update-queue.service.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory('horizon.dashboard.project.queues.actions.createQueueService', createQueueService); createQueueService.$inject = [ 'horizon.app.core.metadata.service', 'horizon.app.core.openstack-service-api.policy', 'horizon.dashboard.project.queues.events', 'horizon.dashboard.project.queues.actions.createQueueWorkflow', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.framework.widgets.modal.wizard-modal.service', 'horizon.framework.widgets.toast.service' ]; /** * @ngDoc factory * @name horizon.dashboard.project.queues.actions.createQueueService * @param {Object} meta * @param {Object} policy * @param {Object} events * @param {Object} createQueueWorkflow * @param {Object} zaqar * @param {Object} wizard * @param {Object} toast * @returns {Object} service * @description A service to open the queues wizard. */ function createQueueService(meta, policy, events, createQueueWorkflow, zaqar, wizard, toast) { var message = { success: gettext('Queue %s was successfully created.') }; var scope; var model = { queue_name: null, metadata: {} }; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function perform(selected, $scope) { scope = $scope; return wizard.modal({ workflow: createQueueWorkflow, submit: submit }).result; } function allowed() { return policy.ifAllowed({ rules: [['queue', 'add_queue']] }); } function submit(stepModels) { model = stepModels.queueDetailsForm; model.metadata = stepModels.queueMetadataForm; return zaqar.createQueue(model).then(success); } function success(response) { toast.add('success', interpolate(message.success, [response.data.name])); scope.$emit(events.CREATE_SUCCESS, response.data); } } // end of createQueueService })(); // end of IIFE
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/create-queue.service.js
create-queue.service.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.project.queues.signed-url.service * @description * Service for the signed url for the queue */ angular .module('horizon.dashboard.project.queues') .factory( 'horizon.dashboard.project.queues.actions.signedUrlService', signedUrlService); signedUrlService.$inject = [ 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.basePath', 'horizon.dashboard.project.queues.events', 'horizon.dashboard.project.queues.resourceType', 'horizon.framework.util.actions.action-result.service', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.form.ModalFormService', 'horizon.framework.widgets.modal-wait-spinner.service', 'horizon.framework.widgets.toast.service' ]; function signedUrlService( policy, zaqar, basePath, events, resourceType, actionResult, gettext, $qExtensions, modal, waitSpinner, toast ) { // schema var schema = { type: "object", properties: { name: { }, paths: { }, ttl_seconds: { type: "number", minimum: 1 }, methods: { } } }; // form var form = [ { type: 'section', htmlClass: 'row', items: [ { type: "section", htmlClass: "col-sm-12", items: [ { // for result message type: "help", helpvalue: "", condition: true }, { key: "paths", type: "checkboxes", title: gettext("Paths"), titleMap: [ {value: "messages", name: gettext("Messages")}, {value: "subscriptions", name: gettext("Subscriptions")}, {value: "claims", name: gettext("Claims")} ], htmlClass: "horizontal-checkboxes" }, { key: "ttl_seconds", title: gettext("TTL Seconds") }, { key: "methods", title: gettext("Methods"), type: "checkboxes", titleMap: [ {value: "GET", name: gettext("GET")}, {value: "HEAD", name: gettext("HEAD")}, {value: "OPTIONS", name: gettext("OPTIONS")}, {value: "POST", name: gettext("POST")}, {value: "PUT", name: gettext("PUT")}, {value: "DELETE", name: gettext("DELETE")} ], htmlClass: "horizontal-checkboxes" } ] } ] } ]; // model var model = { id: '', name: '', paths: '', ttl_seconds: '', methods: '' }; // modal config var config = { title: gettext("Signed URL for %s"), schema: schema, form: angular.copy(form), model: model }; var message = { success: gettext("Signed URL was successfully created for the queue %s with expires %s " + "and signature %s.") }; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function allowed() { return policy.ifAllowed({ rules: [['queue', 'signed_url']] }); } function perform(selected) { config.model.id = selected.name; config.model.name = selected.name; config.model.paths = ''; config.form = angular.copy(form); config.title = interpolate(config.title, [selected.name]); modal.open(config).then(submit); } function submit(context) { var name = context.model.name; delete context.model.id; delete context.model.name; delete context.model.output; if (!context.model.ttl_seconds) { delete context.model.ttl_seconds; } waitSpinner.showModalSpinner(gettext('Creating Signed URL')); return zaqar.signedUrl(name, context.model).then(function(response) { config.model = { paths: context.model.paths, ttl_seconds: context.model.ttl_seconds, methods: context.model.methods }; config.form = angular.copy(form); // for result message config.form[0].items[0].items[0].helpvalue = "<div class='alert alert-success'>" + interpolate(message.success, [name, response.data.expires, response.data.signature] ) + "</div>"; config.form[0].items[0].items[0].condition = false; // display new dialog waitSpinner.hideModalSpinner(); modal.open(config).then(submit); var result = actionResult.getActionResult().updated(resourceType, name); return result.results; }, function(response) { // close spinner and display toast waitSpinner.hideModalSpinner(); toast.add('error', response.data.split("(")[0].trim() + "."); var result = actionResult.getActionResult().failed(resourceType, name); return result.results; }); } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/signed-url.service.js
signed-url.service.js
(function() { 'use strict'; /** * @ngdoc overview * @ngname horizon.dashboard.project.queues.actions * @description Provides all of the actions for queues. */ angular.module('horizon.dashboard.project.queues.actions', [ 'horizon.framework.conf', 'horizon.app.core']) .run(registerActions); registerActions.$inject = [ 'horizon.framework.conf.resource-type-registry.service', 'horizon.dashboard.project.queues.actions.createQueueService', 'horizon.dashboard.project.queues.actions.deleteQueueService', 'horizon.dashboard.project.queues.actions.updateQueueService', 'horizon.dashboard.project.queues.actions.purgeQueueService', 'horizon.dashboard.project.queues.actions.postMessageService', 'horizon.dashboard.project.queues.actions.listMessageService', 'horizon.dashboard.project.queues.actions.signedUrlService', 'horizon.dashboard.project.queues.actions.createSubscriptionService', 'horizon.dashboard.project.queues.resourceType' ]; function registerActions( registry, createQueueService, deleteQueueService, updateQueueService, purgeQueueService, postMessageService, listMessageService, signedUrlService, createSubscriptionService, resourceType ) { var queueResourceType = registry.getResourceType(resourceType); queueResourceType.itemActions .append({ id: 'messagesPost', service: postMessageService, template: { text: gettext('Post Messages') } }) .append({ id: 'messagesList', service: listMessageService, template: { text: gettext('View Messages') } }) .append({ id: 'queuesSignedUrl', service: signedUrlService, template: { text: gettext('Signed URL') } }) .append({ id: 'queuesItemUpdate', service: updateQueueService, template: { text: gettext('Update') } }) .append({ id: 'queuesItemPurge', service: purgeQueueService, template: { text: gettext('Purge') } }) .append({ id: 'subscriptionsCreate', service: createSubscriptionService, template: { text: gettext('Create Subscription') } }) .append({ id: 'queuesItemDelete', service: deleteQueueService, template: { type: 'delete', text: gettext('Delete') } }); queueResourceType.batchActions .append({ id: 'queuesBatchCreate', service: createQueueService, template: { type: 'create', text: gettext('Create Queues') } }) .append({ id: 'queuesBatchDelete', service: deleteQueueService, template: { type: 'delete-selected', text: gettext('Delete Queues') } }); } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/actions.module.js
actions.module.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory('horizon.dashboard.project.queues.actions.deleteQueueService', deleteQueueService); deleteQueueService.$inject = [ '$q', 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.events', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.modal.deleteModalService', 'horizon.framework.widgets.toast.service' ]; /** * @ngDoc factory * @name horizon.dashboard.project.queues.actions.deleteQueueService * @param {Object} $q * @param {Object} policy * @param {Object} zaqar * @param {Object} events * @param {Object} gettext * @param {Object} $qExtensions * @param {Object} deleteModal * @param {Object} toast * @returns {Object} delete queue service * @description Brings up the delete queues confirmation modal dialog. * On submit, delete given queues. * On cancel, do nothing. */ function deleteQueueService( $q, policy, zaqar, events, gettext, $qExtensions, deleteModal, toast) { var context; var service = { initAction: initAction, allowed: allowed, perform: perform }; return service; ////////////// function initAction() { context = { successEvent: events.DELETE_SUCCESS }; } function perform(items, $scope) { var queues = angular.isArray(items) ? items : [items]; context.labels = labelize(queues.length); context.deleteEntity = deleteQueue; $qExtensions.allSettled(queues.map(checkPermission)).then(afterCheck); function afterCheck(result) { if (result.fail.length > 0) { toast.add('error', getMessage(result.fail)); } if (result.pass.length > 0) { deleteModal.open($scope, result.pass.map(getEntity), context); } } } function allowed() { return policy.ifAllowed({ rules: [['zaqar', 'delete_queues']] }); } function deleteQueue(queue) { return zaqar.deleteQueue(queue); } function checkPermission(queue) { return { promise: allowed(queue), context: queue }; } function getMessage(entities) { var message = gettext("You are not allowed to delete queues: %s"); return interpolate(message, [entities.map(getName).join(", ")]); } function labelize(count) { return { title: ngettext( 'Confirm Delete Queue', 'Confirm Delete queues', count), message: ngettext( 'You have selected "%s". Deleted queue is not recoverable.', 'You have selected "%s". Deleted queues are not recoverable.', count), submit: ngettext( 'Delete Queue', 'Delete Queues', count), success: ngettext( 'Deleted Queue: %s.', 'Deleted Queues: %s.', count), error: ngettext( 'Unable to delete Queue: %s.', 'Unable to delete Queues: %s.', count) }; } function getName(item) { return getEntity(item).name; } function getEntity(item) { return item.context; } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/delete-queue.service.js
delete-queue.service.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory( 'horizon.dashboard.project.queues.actions.purgeQueueService', purgeQueueService); purgeQueueService.$inject = [ '$q', 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.events', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.form.ModalFormService', 'horizon.framework.widgets.toast.service' ]; /** * @ngdoc factory * @name horizon.dashboard.project.queues.actions.purgeQueueService * @param {Object} $q * @param {Object} policy * @param {Object} zaqar * @param {Object} events * @param {Object} gettext * @param {Object} $qExtensions * @param {Object} modal * @param {Object} toast * @returns {Object} purge queue service * @description Brings up the purge queues choices modal dialog. * On submit, purge given queues. * On cancel, do nothing. */ function purgeQueueService( $q, policy, zaqar, events, gettext, $qExtensions, modal, toast ) { // schema var schema = { type: "object", properties: { resource_types: { title: gettext("Choose resource to purge"), type: "string", enum: ["messages", "subscriptions", "all"] } } }; // form var form = [ { type: 'section', htmlClass: 'row', items: [ { type: 'section', htmlClass: 'col-sm-12', items: [ { key: 'resource_types', type: 'radiobuttons', titleMap: [ {value: 'messages', name: gettext('Messages')}, {value: 'subscriptions', name: gettext('Subscriptions')}, {value: "all", name: "All"} ], required:true } ] } ] } ]; var scope, model; var message = { success: gettext('Queue %s has been purged successfully.') }; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function allowed() { return $qExtensions.booleanAsPromise(true); } function perform(selected, $scope) { scope = $scope; model = { id: selected.id, name: selected.name, resource_types: [] }; // modal config var config = { "title": gettext('Purge Queue'), "submitText": gettext('Purge'), "schema": schema, "form": form, "model": model }; return modal.open(config).then(submit); } function submit(context) { var id = context.model.id; var name = context.model.name; delete context.model.id; delete context.model.name; context.model.resource_types = (context.model.resource_types === "all") ? [] : [context.model.resource_types]; return zaqar.purgeQueue(id, context.model).then(function() { toast.add('success', interpolate(message.success, [name])); scope.$emit(events.PURGE_SUCCESS, name); }); } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/purge-queue.service.js
purge-queue.service.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .factory( 'horizon.dashboard.project.queues.actions.listMessageService', listMessageService); listMessageService.$inject = [ '$q', 'horizon.dashboard.project.queues.basePath', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.form.ModalFormService' ]; /** * @ngdoc factory * @name horizon.dashboard.project.queues.actions.listMessageService * @param {Object} $q * @param {String} basePath * @param {Object} gettext * @param {Object} $qExtensions * @param {Object} modal * @returns {Object} list messages service * @description Brings up the polling messages modal dialog. * On submit, poll messages from given queues. * On cancel, do nothing. */ function listMessageService( $q, basePath, gettext, $qExtensions, modal ) { // schema var schema = { type: "object", properties: { listMessages: { title: gettext("List Messages"), type: "string" } } }; // form var form = [ { type: 'section', htmlClass: 'row', items: [ { type: 'section', htmlClass: 'col-sm-12', items: [ { type: 'template', templateUrl: basePath + 'actions/list-message.html' } ] } ] } ]; // model var model; var service = { initAction: initAction, perform: perform, allowed: allowed }; // modal config var config = { "title": gettext('List Messages'), "submitText": gettext('List Messages'), "schema": schema, "form": form, "model": model }; return service; ////////////// function initAction() { } function allowed() { return $qExtensions.booleanAsPromise(true); } function perform(selected) { config.model = { id: selected.id, name: selected.name }; return modal.open(config).then(submit); } function submit(context) { var id = context.model.id; var name = context.model.name; config.model = { id: id, name: name }; // display new dialog modal.open(config).then(submit); } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/actions/list-message.service.js
list-message.service.js
(function() { 'use strict'; angular .module('horizon.dashboard.project.queues') .controller('horizon.dashboard.project.queues.steps.QueueMetadataController', controller); controller.$inject = [ '$q', '$scope', 'horizon.app.core.metadata.service', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.project.queues.events', 'horizon.framework.widgets.metadata.tree.service' ]; /** * @ngdoc controller * @name horizon.dashboard.project.queues.steps.QueueDetailsController * @param {Object} $q * @param {Object} $scope * @param {Object} metadata * @param {Object} zaqar * @param {Object} events * @param {Object} metaTree * @returns {undefined} Returns nothing * @description This controller is use for creating a queue. */ function controller($q, $scope, metadata, zaqar, events, metaTree) { var ctrl = this; var queue = $scope.queue ? $scope.queue : {}; ctrl.tree = new metaTree.Tree([], []); /* eslint-enable angular/ng_controller_as */ $scope.$watchCollection(getTree, onMetadataChanged); /* eslint-enable angular/ng_controller_as */ init(); //////////////////////////////// function init() { $q.all({ available: standardDefinitions(queue), existing: getExistingMetdataPromise(queue) }) .then(onMetadataGet); } function onMetadataGet(response) { ctrl.tree = new metaTree.Tree( response.available.data.items, response.existing.data ); } function getTree() { return ctrl.tree.getExisting(); } function standardDefinitions(queue) { // TODO: currently, there is no standard metadefinitions // should add some reserved/fixed definition here // preferably it should come from zaqar and not hardcoded here // however available metadata is needed for showing to be updated, // so now we set existing metadata to available metadata. if (angular.isDefined(queue.id)) { return {data: queue.metadata}; } else { var deferred = $q.defer(); deferred.resolve({data: {}}); return deferred.promise; } } function getExistingMetdataPromise(queue) { if (angular.isDefined(queue.id)) { $scope.stepModels.queueMetadataForm = queue.metadata; return {data: queue.metadata}; } else { var deferred = $q.defer(); deferred.resolve({data: {}}); $scope.stepModels.queueMetadataForm = {}; return deferred.promise; } } function onMetadataChanged(newValue, oldValue) { if (newValue !== oldValue) { $scope.stepModels.queueMetadataForm = newValue; } } } // end of controller })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/project/queues/steps/queue-metadata/queue-metadata.controller.js
queue-metadata.controller.js
(function () { 'use strict'; /** * @ngdoc overview * @name horizon.dashboard.admin.pools * @description Pools module for messaging. */ angular .module('horizon.dashboard.admin.pools', [ 'ngRoute', 'horizon.dashboard.admin.pools.actions' ]) .constant('horizon.dashboard.admin.pools.resourceType', 'OS::Zaqar::Pools') .run(run) .config(config); run.$inject = [ 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.admin.pools.basePath', 'horizon.dashboard.admin.pools.resourceType', 'horizon.dashboard.admin.pools.service', 'horizon.framework.conf.resource-type-registry.service' ]; function run(zaqar, basePath, resourceType, poolsService, registry) { registry.getResourceType(resourceType) .setNames(gettext('Pool'), gettext('Pools')) .setSummaryTemplateUrl(basePath + 'drawer.html') .setProperties(poolProperties()) .setListFunction(poolsService.getPoolsPromise) .tableColumns .append({ id: 'name', priority: 1, sortDefault: true }) .append({ id: 'group', priority: 1 }) .append({ id: 'weight', priority: 1 }) .append({ id: 'uri', priority: 2 }); // for magic-search registry.getResourceType(resourceType).filterFacets .append({ label: gettext('Name'), name: 'name', singleton: true }) .append({ label: gettext('Group'), name: 'group', singleton: true }) .append({ label: gettext('Weight'), name: 'weight', singleton: true }) .append({ label: gettext('URI'), name: 'uri', singleton: true }); } function poolProperties() { return { name: { label: gettext('Name'), filters: [] }, group: { label: gettext('Group'), filters: ['noName'] }, weight: { label: gettext('Weight'), filters: ['noValue'] }, uri: { label: gettext('URI'), filters: ['noValue'] }, options: { label: gettext('Options'), filters: ['noValue'] } }; } config.$inject = [ '$provide', '$windowProvider', '$routeProvider' ]; /** * @ndoc config * @name horizon.dashboard.admin.pools.basePath * @param {Object} $provide * @param {Object} $windowProvider * @param {Object} $routeProvider * @returns {undefined} Returns nothing * @description Base path for the pools panel */ function config($provide, $windowProvider, $routeProvider) { var path = $windowProvider.$get().STATIC_URL + 'dashboard/admin/pools/'; $provide.constant('horizon.dashboard.admin.pools.basePath', path); $routeProvider.when('/admin/pools', { templateUrl: path + 'panel.html' }); } }());
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pools/pools.module.js
pools.module.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.admin.pools.actions.delete.service * @Description * Brings up the delete pools confirmation modal dialog. * On submit, delete given pools. * On cancel, do nothing. */ angular .module('horizon.dashboard.admin.pools.actions') .factory('horizon.dashboard.admin.pools.actions.delete.service', deleteService); deleteService.$inject = [ '$q', 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.admin.pools.resourceType', 'horizon.framework.util.actions.action-result.service', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.modal.deleteModalService', 'horizon.framework.widgets.toast.service' ]; function deleteService( $q, policy, zaqar, resourceType, actionResult, gettext, $qExtensions, deleteModal, toast ) { var scope, context; var notAllowedMessage = gettext("You are not allowed to delete pools: %s"); var service = { initAction: initAction, allowed: allowed, perform: perform }; return service; ////////////// function initAction() { context = { }; } function perform(items, newScope) { scope = newScope; var pools = angular.isArray(items) ? items : [items]; context.labels = labelize(pools.length); context.deleteEntity = deletePool; return $qExtensions.allSettled(pools.map(checkPermission)).then(afterCheck); } function allowed() { return policy.ifAllowed({ rules: [['pool', 'delete_pool']] }); } function checkPermission(pool) { return {promise: allowed(), context: pool}; } function afterCheck(result) { var outcome = $q.reject().catch(angular.noop); // Reject the promise by default if (result.fail.length > 0) { toast.add('error', getMessage(notAllowedMessage, result.fail)); outcome = $q.reject(result.fail).catch(angular.noop); } if (result.pass.length > 0) { outcome = deleteModal.open(scope, result.pass.map(getEntity), context).then(createResult); } return outcome; } function createResult(deleteModalResult) { var result = actionResult.getActionResult(); deleteModalResult.pass.forEach(function markDeleted(item) { result.deleted(resourceType, getEntity(item).name); }); deleteModalResult.fail.forEach(function markFailed(item) { result.failed(resourceType, getEntity(item).name); }); return result.result; } function labelize(count) { return { title: ngettext( 'Confirm Delete Pool', 'Confirm Delete Pools', count), message: ngettext( 'You have selected "%s". Deleted Pool is not recoverable.', 'You have selected "%s". Deleted Pools are not recoverable.', count), submit: ngettext( 'Delete Pool', 'Delete Pools', count), success: ngettext( 'Deleted Pool: %s.', 'Deleted Pools: %s.', count), error: ngettext( 'Unable to delete Pool: %s.', 'Unable to delete Pools: %s.', count) }; } function deletePool(pool) { return zaqar.deletePool(pool, true); } function getMessage(message, entities) { return interpolate(message, [entities.map(getName).join(", ")]); } function getName(result) { return getEntity(result).name; } function getEntity(result) { return result.context; } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pools/actions/delete.service.js
delete.service.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.admin.pools.actions.update.service * @description * Service for the storage pool update modal */ angular .module('horizon.dashboard.admin.pools.actions') .factory('horizon.dashboard.admin.pools.actions.update.service', updateService); updateService.$inject = [ 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.admin.pools.actions.workflow', 'horizon.dashboard.admin.pools.resourceType', 'horizon.framework.util.actions.action-result.service', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.form.ModalFormService', 'horizon.framework.widgets.toast.service' ]; function updateService( policy, zaqar, workflow, resourceType, actionResult, gettext, $qExtensions, modal, toast ) { var message = { success: gettext('Pool %s was successfully updated.') }; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function perform(selected) { var title, submitText; title = gettext('Update Pool'); submitText = gettext('Update'); var config = workflow.init('update', title, submitText); // load current data zaqar.getPool(selected.name).then(onLoad); function onLoad(response) { config.model.name = response.data.name; config.model.group = response.data.group; config.model.weight = response.data.weight; config.model.uri = response.data.uri; config.model.options = response.data.options; } return modal.open(config).then(submit); } function allowed() { return policy.ifAllowed({ rules: [['pool', 'update_pool']] }); } function submit(context) { return zaqar.updatePool(context.model, true).then(success, true); } function success(response) { toast.add('success', interpolate(message.success, [response.data.name])); var result = actionResult.getActionResult().updated(resourceType, response.data.name); return result.result; } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pools/actions/update.service.js
update.service.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.admin.pools.actions.workflow * @description * Workflow for creating/updating storage pool */ angular .module('horizon.dashboard.admin.pools.actions') .factory('horizon.dashboard.admin.pools.actions.workflow', workflow); workflow.$inject = [ 'horizon.framework.util.i18n.gettext' ]; function workflow(gettext) { var workflow = { init: init }; function init(actionType, title, submitText) { var schema, form, model; var optionsPlaceholder = gettext( 'An optional request component related to storage-specific options in YAML format.'); // schema schema = { type: 'object', properties: { name: { title: gettext('Name'), type: 'string' }, group: { title: gettext('Group'), type: 'string' }, weight: { title: gettext('Weight'), type: 'number' }, uri: { title: gettext('URI'), type: 'string' }, options: { title: gettext('Options'), type: 'string' } } }; // form form = [ { type: 'section', htmlClass: 'row', items: [ { type: 'section', htmlClass: 'col-sm-6', items: [ { key: 'name', placeholder: gettext('Name of the pool.'), required: true, "readonly": actionType === 'update' }, { key: 'weight', placeholder: gettext('Weight of the pool.'), required: true }, { key: 'uri', placeholder: gettext('URI for storage engine of this pool.'), description: gettext('e.g. mongodb://127.0.0.1:27017'), required: true } ] }, { type: 'section', htmlClass: 'col-sm-6', items: [ { key: 'group', placeholder: gettext('Group of the pool.') }, { key: 'options', type: 'textarea', placeholder: optionsPlaceholder } ] } ] } ]; // form model = { name: '', group: '', weight: 0, uri: '', options: '' }; var config = { title: title, submitText: submitText, schema: schema, form: form, model: model }; return config; } return workflow; } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pools/actions/workflow.service.js
workflow.service.js
(function () { 'use strict'; /** * @ngdoc overview * @name horizon.dashboard.admin.pool-flavors * @description Flavors module for messaging pool. */ angular .module('horizon.dashboard.admin.pool-flavors', [ 'ngRoute', 'horizon.dashboard.admin.pool-flavors.actions' ]) .constant('horizon.dashboard.admin.pool-flavors.resourceType', 'OS::Zaqar::Flavors') .run(run) .config(config); run.$inject = [ 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.admin.pool-flavors.basePath', 'horizon.dashboard.admin.pool-flavors.resourceType', 'horizon.dashboard.admin.pool-flavors.service', 'horizon.framework.conf.resource-type-registry.service' ]; function run(zaqar, basePath, resourceType, flavorsService, registry) { registry.getResourceType(resourceType) .setNames(gettext('Pool Flavor'), gettext('Pool Flavors')) .setSummaryTemplateUrl(basePath + 'drawer.html') .setProperties(flavorProperties()) .setListFunction(flavorsService.getFlavorsPromise) .tableColumns .append({ id: 'name', priority: 1, sortDefault: true }) .append({ id: 'pool_group', priority: 1 }); // for magic-search registry.getResourceType(resourceType).filterFacets .append({ label: gettext('Name'), name: 'name', singleton: true }) .append({ label: gettext('Pool Group'), name: 'pool_group', singleton: true }); } function flavorProperties() { return { name: { label: gettext('Name'), filters: [] }, pool_group: { label: gettext('Pool Group'), filters: ['noName'] }, capabilities: { label: gettext('Capabilities'), filters: ['noValue'] } }; } config.$inject = [ '$provide', '$windowProvider', '$routeProvider' ]; /** * @ndoc config * @name horizon.dashboard.admin.pool-flavors.basePath * @param {Object} $provide * @param {Object} $windowProvider * @param {Object} $routeProvider * @returns {undefined} Returns nothing * @description Base path for the pool-flavors panel */ function config($provide, $windowProvider, $routeProvider) { var path = $windowProvider.$get().STATIC_URL + 'dashboard/admin/pool-flavors/'; $provide.constant('horizon.dashboard.admin.pool-flavors.basePath', path); $routeProvider.when('/admin/pool_flavors', { templateUrl: path + 'panel.html' }); } }());
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pool-flavors/pool-flavors.module.js
pool-flavors.module.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.admin.pool-flavors.actions.delete.service * @Description * Brings up the delete pool flavors confirmation modal dialog. * On submit, delete given pool flavors. * On cancel, do nothing. */ angular .module('horizon.dashboard.admin.pool-flavors.actions') .factory('horizon.dashboard.admin.pool-flavors.actions.delete.service', deleteService); deleteService.$inject = [ '$q', 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.admin.pool-flavors.resourceType', 'horizon.framework.util.actions.action-result.service', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.modal.deleteModalService', 'horizon.framework.widgets.toast.service' ]; function deleteService( $q, policy, zaqar, resourceType, actionResult, gettext, $qExtensions, deleteModal, toast ) { var scope, context; var notAllowedMessage = gettext("You are not allowed to delete pool flavors: %s"); var service = { initAction: initAction, allowed: allowed, perform: perform }; return service; ////////////// function initAction() { context = { }; } function perform(items, newScope) { scope = newScope; var flavors = angular.isArray(items) ? items : [items]; context.labels = labelize(flavors.length); context.deleteEntity = deleteFlavor; return $qExtensions.allSettled(flavors.map(checkPermission)).then(afterCheck); } function allowed() { return policy.ifAllowed({ rules: [['pool_flavor', 'delete_flavor']] }); } function checkPermission(flavor) { return {promise: allowed(), context: flavor}; } function afterCheck(result) { var outcome = $q.reject().catch(angular.noop); // Reject the promise by default if (result.fail.length > 0) { toast.add('error', getMessage(notAllowedMessage, result.fail)); outcome = $q.reject(result.fail).catch(angular.noop); } if (result.pass.length > 0) { outcome = deleteModal.open(scope, result.pass.map(getEntity), context).then(createResult); } return outcome; } function createResult(deleteModalResult) { var result = actionResult.getActionResult(); deleteModalResult.pass.forEach(function markDeleted(item) { result.deleted(resourceType, getEntity(item).name); }); deleteModalResult.fail.forEach(function markFailed(item) { result.failed(resourceType, getEntity(item).name); }); return result.result; } function labelize(count) { return { title: ngettext( 'Confirm Delete Pool Flavor', 'Confirm Delete Pool Flavors', count), message: ngettext( 'You have selected "%s". Deleted Pool Flavor is not recoverable.', 'You have selected "%s". Deleted Pool Flavors are not recoverable.', count), submit: ngettext( 'Delete Pool Flavor', 'Delete Pool Flavors', count), success: ngettext( 'Deleted Pool Flavor: %s.', 'Deleted Pool Flavors: %s.', count), error: ngettext( 'Unable to delete Pool Flavor: %s.', 'Unable to delete Pool Flavors: %s.', count) }; } function deleteFlavor(flavor) { return zaqar.deleteFlavor(flavor, true); } function getMessage(message, entities) { return interpolate(message, [entities.map(getName).join(", ")]); } function getName(result) { return getEntity(result).name; } function getEntity(result) { return result.context; } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pool-flavors/actions/delete.service.js
delete.service.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.admin.pool-flavors.actions.update.service * @description * Service for the pool flavor update modal */ angular .module('horizon.dashboard.admin.pool-flavors.actions') .factory('horizon.dashboard.admin.pool-flavors.actions.update.service', updateService); updateService.$inject = [ 'horizon.app.core.openstack-service-api.policy', 'horizon.app.core.openstack-service-api.zaqar', 'horizon.dashboard.admin.pool-flavors.actions.workflow', 'horizon.dashboard.admin.pool-flavors.resourceType', 'horizon.framework.util.actions.action-result.service', 'horizon.framework.util.i18n.gettext', 'horizon.framework.util.q.extensions', 'horizon.framework.widgets.form.ModalFormService', 'horizon.framework.widgets.toast.service' ]; function updateService( policy, zaqar, workflow, resourceType, actionResult, gettext, $qExtensions, modal, toast ) { var message = { success: gettext('Pool flavor %s was successfully updated.') }; var service = { initAction: initAction, perform: perform, allowed: allowed }; return service; ////////////// function initAction() { } function perform(selected) { var title, submitText; title = gettext('Update Pool Flavor'); submitText = gettext('Update'); var config = workflow.init('update', title, submitText); // load current data zaqar.getFlavor(selected.name).then(onLoad); function onLoad(response) { config.model.name = response.data.name; config.model.pool_group = response.data.pool_group; config.model.capabilities = response.data.capabilities; } return modal.open(config).then(submit); } function allowed() { return policy.ifAllowed({ rules: [['pool_flavor', 'update_flavor']] }); } function submit(context) { return zaqar.updateFlavor(context.model, true).then(success, true); } function success(response) { toast.add('success', interpolate(message.success, [response.data.name])); var result = actionResult.getActionResult().updated(resourceType, response.data.name); return result.result; } } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pool-flavors/actions/update.service.js
update.service.js
(function() { 'use strict'; /** * @ngdoc factory * @name horizon.dashboard.admin.pool-flavors.actions.workflow * @description * Workflow for creating/updating storage pool flavor */ angular .module('horizon.dashboard.admin.pool-flavors.actions') .factory('horizon.dashboard.admin.pool-flavors.actions.workflow', workflow); workflow.$inject = [ 'horizon.framework.util.i18n.gettext' ]; function workflow(gettext) { var workflow = { init: init }; function init(actionType, title, submitText) { var schema, form, model; var capabilitiesPlaceholder = gettext( 'Describes flavor-specific capabilities in YAML format.'); // schema schema = { type: 'object', properties: { name: { title: gettext('Name'), type: 'string' }, pool_group: { title: gettext('Pool Group'), type: 'string' }, capabilities: { title: gettext('Capabilities'), type: 'string' } } }; // form form = [ { type: 'section', htmlClass: 'row', items: [ { type: 'section', htmlClass: 'col-sm-6', items: [ { key: 'name', placeholder: gettext('Name of the flavor.'), required: true, "readonly": actionType === 'update' }, { key: 'pool_group', placeholder: gettext('Pool group for flavor.'), /* eslint-disable max-len */ description: gettext('You must specify one of the pool groups that is configured in storage pools.'), required: true } ] }, { type: 'section', htmlClass: 'col-sm-6', items: [ { key: 'capabilities', type: 'textarea', placeholder: capabilitiesPlaceholder } ] } ] } ]; // form model = { name: '', pool_group: '', capabilities: '' }; var config = { title: title, submitText: submitText, schema: schema, form: form, model: model }; return config; } return workflow; } })();
zaqar-ui
/zaqar_ui-14.0.0.0b1-py3-none-any.whl/zaqar_ui/static/dashboard/admin/pool-flavors/actions/workflow.service.js
workflow.service.js
======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/zaqar.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===== Zaqar ===== Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. It combines the ideas pioneered by Amazon's SQS product with additional semantics to support event broadcasting. The service features a fully RESTful API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Underlying this API is an efficient messaging engine designed with scalability and security in mind. Other OpenStack components can integrate with Zaqar to surface events to end users and to communicate with guest agents that run in the "over-cloud" layer. Cloud operators can leverage Zaqar to provide equivalents of SQS and SNS to their customers. General information is available in wiki: https://wiki.openstack.org/wiki/Zaqar The API v2.0 (stable) specification and documentation are available at: https://wiki.openstack.org/wiki/Zaqar/specs/api/v2.0 Zaqar's Documentation, the source of which is in ``doc/source/``, is available at: https://docs.openstack.org/zaqar/latest Zaqar's Release notes are available at: https://docs.openstack.org/releasenotes/zaqar/ Contributors are encouraged to join IRC (``#openstack-zaqar`` channel on ``OFTC``): https://wiki.openstack.org/wiki/IRC Information on how to run unit and functional tests is available at: https://docs.openstack.org/zaqar/latest/contributor/running_tests.html Information on how to run benchmarking tool is available at: https://docs.openstack.org/zaqar/latest/admin/running_benchmark.html Zaqar's design specifications is tracked at: https://specs.openstack.org/openstack/zaqar-specs/ Using Zaqar ----------- If you are new to Zaqar and just want to try it, you can set up Zaqar in the development environment. Using Zaqar in production environment: Coming soon! Using Zaqar in development environment: The instruction is available at: https://docs.openstack.org/zaqar/latest/contributor/development.environment.html This will allow you to run local Zaqar server with MongoDB as database. This way is the easiest, quickest and most suitable for beginners.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/README.rst
README.rst
The source repository for this project can be found at: https://opendev.org/openstack/zaqar Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/zaqar For more specific information about contributing to this repository, see the zaqar contributor guide: https://docs.openstack.org/zaqar/latest/contributor/contributing.html
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/CONTRIBUTING.rst
CONTRIBUTING.rst
======================== Zaqar style commandments ======================== - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on for Zaqar specific commandments General ------- - Optimize for readability; whitespace is your friend. - Use blank lines to group related logic. - All classes must inherit from ``object`` (explicitly). - Use single-quotes for strings unless the string contains a single-quote. - Use the double-quote character for blockquotes (``"""``, not ``'''``) - USE_ALL_CAPS_FOR_GLOBAL_CONSTANTS Comments -------- - In general use comments as "memory pegs" for those coming after you up the trail. - Guide the reader though long functions with a comments introducing different sections of the code. - Choose clean, descriptive names for functions and variables to make them self-documenting. - Add ``# NOTE(termie): blah blah...`` comments to clarify your intent, or to explain a tricky algorithm, when it isn't obvious from just reading the code. Identifiers ----------- - Don't use single characters in identifiers except in trivial loop variables and mathematical algorithms. - Avoid abbreviations, especially if they are ambiguous or their meaning would not be immediately clear to the casual reader or newcomer. Wrapping -------- Wrap long lines by using Python's implied line continuation inside parentheses, brackets and braces. Make sure to indent the continued line appropriately. The preferred place to break around a binary operator is after the operator, not before it. Example:: class Rectangle(Blob): def __init__(self, width, height, color='black', emphasis=None, highlight=0): # More indentation included to distinguish this from the rest. if (width == 0 and height == 0 and color == 'red' and emphasis == 'strong' or highlight > 100): raise ValueError('sorry, you lose') if width == 0 and height == 0 and (color == 'red' or emphasis is None): raise ValueError("I don't think so -- values are {0}, {1}".format( width, height)) msg = ('this is a very long string that goes on and on and on and' 'on and on and on...') super(Rectangle, self).__init__(width, height, color, emphasis, highlight) Imports ------- - Classes and functions may be hoisted into a package namespace, via __init__ files, with some discretion. More Import Examples -------------------- **INCORRECT** :: import zaqar.transport.wsgi as wsgi **CORRECT** :: from zaqar.transport import wsgi Docstrings ---------- Docstrings are required for all functions and methods. Docstrings should ONLY use triple-double-quotes (``"""``) Single-line docstrings should NEVER have extraneous whitespace between enclosing triple-double-quotes. **INCORRECT** :: """ There is some whitespace between the enclosing quotes :( """ **CORRECT** :: """There is no whitespace between the enclosing quotes :)""" Docstrings should document default values for named arguments if they're not None Docstrings that span more than one line should look like this: Example:: """Single-line summary, right after the opening triple-double-quote. If you are going to describe parameters and return values, use Sphinx; the appropriate syntax is as follows. :param foo: the foo parameter :param bar: (Default True) the bar parameter :param foo_long_bar: the foo parameter description is very long so we have to split it in multiple lines in order to keep things ordered :returns: return_type -- description of the return value :returns: description of the return value :raises ValueError: if the message_body exceeds 160 characters :raises TypeError: if the message_body is not a basestring """ **DO NOT** leave an extra newline before the closing triple-double-quote. Creating Unit Tests ------------------- NOTE: 100% coverage is required Logging ------- Use __name__ as the name of your logger and name your module-level logger objects 'LOG':: LOG = logging.getLogger(__name__)
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/HACKING.rst
HACKING.rst
======================= Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure: * zaqar-zaqar.yaml is rally task that will be run in gates * plugins - directory where you can add rally plugins. Almost everything in Rally is plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* Useful links: * More about rally: https://rally.readthedocs.org/en/latest/ * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://git.openstack.org/cgit/openstack/rally/tree/samples/plugins
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/rally-jobs/README.rst
README.rst
===== Zaqar ===== ====================== Enabling in Devstack ====================== 1. Download DevStack -------------------- For more info on devstack installation follow the below link: .. code-block:: ini https://docs.openstack.org/devstack/latest/ 2. Add this repo as an external repository ------------------------------------------ .. code-block:: ini cat > /opt/stack/devstack/local.conf << END [[local|localrc]] enable_plugin zaqar https://git.openstack.org/openstack/zaqar END 3. Run devstack -------------------- .. code-block:: ini cd /opt/stack/devstack ./stack.sh
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/devstack/README.rst
README.rst
# To enable a minimal set of Zaqar services, add the following to localrc: # # enable_service zaqar-websocket zaqar-wsgi # # Dependencies: # - functions # - OS_AUTH_URL for auth in api # - DEST set to the destination directory # - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api # - STACK_USER service user # stack.sh # --------- # install_zaqar # install_zaqarui # configure_zaqar # init_zaqar # start_zaqar # stop_zaqar # cleanup_zaqar # cleanup_zaqar_mongodb # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Functions # --------- # Test if any Zaqar services are enabled # is_zaqar_enabled function is_zaqar_enabled { [[ ,${ENABLED_SERVICES} =~ ,"zaqar" ]] && return 0 return 1 } # cleanup_zaqar() - Cleans up general things from previous # runs and storage specific left overs. function cleanup_zaqar { if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then cleanup_zaqar_mongodb fi } # cleanup_zaqar_mongodb() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up # After mongodb 6.0, the mongo shell has been remove, now using mongosh. function cleanup_zaqar_mongodb { if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongosh zaqar --eval 'db.dropDatabase();'; do sleep 1; done"; then die $LINENO "Mongo DB did not start" else full_version=$(mongosh zaqar --eval 'db.dropDatabase();') mongo_version=`echo $full_version | cut -d' ' -f11` required_mongo_version='6.0' if [[ $mongo_version < $required_mongo_version ]]; then die $LINENO "Zaqar needs Mongo DB version >= 6.0 to run." fi fi } # configure_zaqarclient() - Set config files, create data dirs, etc function configure_zaqarclient { setup_develop $ZAQARCLIENT_DIR } # configure_zaqar() - Set config files, create data dirs, etc function configure_zaqar { setup_develop $ZAQAR_DIR [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR sudo chown $USER $ZAQAR_CONF_DIR [ ! -d $ZAQAR_API_LOG_DIR ] && sudo mkdir -m 755 -p $ZAQAR_API_LOG_DIR sudo chown $USER $ZAQAR_API_LOG_DIR iniset $ZAQAR_CONF DEFAULT debug True iniset $ZAQAR_CONF DEFAULT unreliable True iniset $ZAQAR_CONF DEFAULT admin_mode True iniset $ZAQAR_CONF DEFAULT enable_deprecated_api_versions 1,1.1 iniset $ZAQAR_CONF signed_url secret_key notreallysecret if is_service_enabled key; then iniset $ZAQAR_CONF DEFAULT auth_strategy keystone fi iniset $ZAQAR_CONF storage message_pipeline zaqar.notification.notifier # Enable pooling by default for now iniset $ZAQAR_CONF DEFAULT admin_mode True iniset $ZAQAR_CONF 'drivers:transport:websocket' bind $(ipv6_unquote $ZAQAR_SERVICE_HOST) iniset $ZAQAR_CONF 'drivers:transport:websocket' port $ZAQAR_WEBSOCKET_PORT iniset $ZAQAR_CONF drivers transport websocket configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR iniset $ZAQAR_CONF trustee auth_type password iniset $ZAQAR_CONF trustee auth_url $KEYSTONE_AUTH_URI iniset $ZAQAR_CONF trustee username $ZAQAR_TRUSTEE_USER iniset $ZAQAR_CONF trustee password $ZAQAR_TRUSTEE_PASSWORD iniset $ZAQAR_CONF trustee user_domain_id $ZAQAR_TRUSTEE_DOMAIN iniset $ZAQAR_CONF DEFAULT pooling True iniset $ZAQAR_CONF 'pooling:catalog' enable_virtual_pool True # NOTE(flaper87): Configure mongodb regardless so we can use it as a pool # in tests. configure_mongodb if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then iniset $ZAQAR_CONF drivers message_store mongodb iniset $ZAQAR_CONF 'drivers:message_store:mongodb' uri mongodb://localhost:27017/zaqar iniset $ZAQAR_CONF 'drivers:message_store:mongodb' database zaqar iniset $ZAQAR_CONF drivers management_store mongodb iniset $ZAQAR_CONF 'drivers:management_store:mongodb' uri mongodb://localhost:27017/zaqar_mgmt iniset $ZAQAR_CONF 'drivers:management_store:mongodb' database zaqar_mgmt elif [ "$ZAQAR_BACKEND" = 'redis' ] ; then recreate_database zaqar iniset $ZAQAR_CONF drivers management_store sqlalchemy iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' uri `database_connection_url zaqar` iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' database zaqar_mgmt zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head iniset $ZAQAR_CONF drivers message_store redis iniset $ZAQAR_CONF 'drivers:message_store:redis' uri redis://localhost:6379 iniset $ZAQAR_CONF 'drivers:message_store:redis' database zaqar configure_redis elif [ "$ZAQAR_BACKEND" = 'swift' ] ; then recreate_database zaqar iniset $ZAQAR_CONF drivers management_store sqlalchemy iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' uri `database_connection_url zaqar` iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' database zaqar_mgmt zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head iniset $ZAQAR_CONF drivers message_store swift iniset $ZAQAR_CONF 'drivers:message_store:swift' auth_url $KEYSTONE_AUTH_URI iniset $ZAQAR_CONF 'drivers:message_store:swift' uri swift://zaqar:$SERVICE_PASSWORD@/service fi if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then iniset $ZAQAR_CONF DEFAULT notification_driver messaging iniset $ZAQAR_CONF DEFAULT control_exchange zaqar fi iniset_rpc_backend zaqar $ZAQAR_CONF DEFAULT pip_install uwsgi iniset $ZAQAR_UWSGI_CONF uwsgi master true iniset $ZAQAR_UWSGI_CONF uwsgi die-on-term true iniset $ZAQAR_UWSGI_CONF uwsgi exit-on-reload true iniset $ZAQAR_UWSGI_CONF uwsgi http $ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT iniset $ZAQAR_UWSGI_CONF uwsgi processes $API_WORKERS iniset $ZAQAR_UWSGI_CONF uwsgi enable_threads true iniset $ZAQAR_UWSGI_CONF uwsgi threads 4 iniset $ZAQAR_UWSGI_CONF uwsgi thunder-lock true iniset $ZAQAR_UWSGI_CONF uwsgi buffer-size 65535 iniset $ZAQAR_UWSGI_CONF uwsgi wsgi-file $ZAQAR_DIR/zaqar/transport/wsgi/app.py iniset $ZAQAR_UWSGI_CONF uwsgi master true iniset $ZAQAR_UWSGI_CONF uwsgi add-header "Connection: close" iniset $ZAQAR_UWSGI_CONF uwsgi lazy-apps true cleanup_zaqar } function configure_redis { if is_ubuntu; then install_package redis-server pip_install redis elif is_fedora; then install_package redis pip_install redis else exit_distro_not_supported "redis installation" fi } function configure_mongodb { # Set nssize to 2GB. This increases the number of namespaces supported # per database. pip_install pymongo if is_ubuntu; then # NOTE: To fix the mongodb's issue in ubuntu 22.04 LTS ubuntu_version=`cat /etc/issue | cut -d " " -f2` if [[ $ubuntu_version > '22' ]]; then wget -qO - https://www.mongodb.org/static/pgp/server-6.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/6.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-6.0.list sudo apt update curl -LO http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1-1ubuntu2.1~18.04.20_amd64.deb sudo dpkg -i ./libssl1.1_1.1.1-1ubuntu2.1~18.04.20_amd64.deb install_package mongodb-org restart_service mongod else install_package mongodb-server restart_service mongodb fi elif is_fedora; then install_package mongodb install_package mongodb-server restart_service mongod fi } # init_zaqar() - Initialize etc. function init_zaqar { # Create cache dir sudo mkdir -p $ZAQAR_AUTH_CACHE_DIR sudo chown $STACK_USER $ZAQAR_AUTH_CACHE_DIR rm -f $ZAQAR_AUTH_CACHE_DIR/* } # install_zaqar() - Collect source and prepare function install_zaqar { setup_develop $ZAQAR_DIR if is_service_enabled horizon; then install_zaqarui fi } function install_zaqarui { git_clone $ZAQARUI_REPO $ZAQARUI_DIR $ZAQARUI_BRANCH # NOTE(flwang): Workaround for devstack bug: 1540328 # where devstack install 'test-requirements' but should not do it # for zaqar-ui project as it installs Horizon from url. # Remove following two 'mv' commands when mentioned bug is fixed. mv $ZAQARUI_DIR/test-requirements.txt $ZAQARUI_DIR/_test-requirements.txt setup_develop $ZAQARUI_DIR mv $ZAQARUI_DIR/_test-requirements.txt $ZAQARUI_DIR/test-requirements.txt cp -a $ZAQARUI_DIR/zaqar_ui/enabled/* $HORIZON_DIR/openstack_dashboard/local/enabled/ if [ -d $ZAQARUI_DIR/zaqar-ui/locale ]; then (cd $ZAQARUI_DIR/zaqar-ui; DJANGO_SETTINGS_MODULE=openstack_dashboard.settings ../manage.py compilemessages) fi } # install_zaqarclient() - Collect source and prepare function install_zaqarclient { git_clone $ZAQARCLIENT_REPO $ZAQARCLIENT_DIR $ZAQARCLIENT_BRANCH # NOTE(flaper87): Ideally, this should be developed, but apparently # there's a bug in devstack that skips test-requirements when using # setup_develop setup_install $ZAQARCLIENT_DIR } # start_zaqar() - Start running processes, including screen function start_zaqar { cat $ZAQAR_UWSGI_CONF run_process zaqar-wsgi "$ZAQAR_BIN_DIR/uwsgi --ini $ZAQAR_UWSGI_CONF --pidfile2 $ZAQAR_UWSGI_MASTER_PIDFILE" run_process zaqar-websocket "$ZAQAR_BIN_DIR/zaqar-server --config-file $ZAQAR_CONF" echo "Waiting for Zaqar to start..." local www_authenticate_uri=http://${ZAQAR_SERVICE_HOST}/identity token=$(openstack token issue -c id -f value --os-auth-url ${www_authenticate_uri}) if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q --header=\"Client-ID:$(uuidgen)\" --header=\"X-Auth-Token:$token\" -O- $ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT/v2/ping; do sleep 1; done"; then die $LINENO "Zaqar did not start" fi } # stop_zaqar() - Stop running processes function stop_zaqar { local serv # Kill the zaqar screen windows for serv in zaqar-wsgi zaqar-websocket; do screen -S $SCREEN_NAME -p $serv -X kill done uwsgi --stop $ZAQAR_UWSGI_MASTER_PIDFILE } function create_zaqar_accounts { create_service_user "zaqar" if [[ "$KEYSTONE_IDENTITY_BACKEND" = 'sql' ]]; then local zaqar_service=$(get_or_create_service "zaqar" \ "messaging" "Zaqar Service") get_or_create_endpoint $zaqar_service \ "$REGION_NAME" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" local zaqar_ws_service=$(get_or_create_service "zaqar-websocket" \ "messaging-websocket" "Zaqar Websocket Service") get_or_create_endpoint $zaqar_ws_service \ "$REGION_NAME" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" fi if [ "$ZAQAR_BACKEND" = 'swift' ] ; then get_or_add_user_project_role ResellerAdmin zaqar service fi } if is_service_enabled zaqar-websocket || is_service_enabled zaqar-wsgi; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Zaqar" install_zaqarclient install_zaqar elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Zaqar" configure_zaqar configure_zaqarclient if is_service_enabled key; then create_zaqar_accounts fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Zaqar" init_zaqar start_zaqar fi if [[ "$1" == "unstack" ]]; then stop_zaqar fi fi # Restore xtrace $XTRACE # Local variables: # mode: shell-script # End:
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/devstack/plugin.sh
plugin.sh
# ``upgrade-zaqar`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc source $TOP_DIR/openrc admin admin # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit if grep -q '_store *= *mongodb' /etc/zaqar/zaqar.conf; then # mongo-tools is the name of the package which includes mongodump on # basically all distributions (Ubuntu, Debian, Fedora, CentOS and # openSUSE). install_package mongo-tools fi if grep -q 'management_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar_mgmt --out $SAVE_DIR/zaqar-mongodb-mgmt-dump.$BASE_RELEASE fi if grep -q 'message_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar --out $SAVE_DIR/zaqar-mongodb-message-dump.$BASE_RELEASE fi if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then redis-cli save sudo cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$BASE_RELEASE.rdb fi # Upgrade Zaqar # ============= # Duplicate some setup bits from target DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/tls # Keep track of the DevStack directory ZAQAR_DEVSTACK_DIR=$(dirname "$0")/.. source $ZAQAR_DEVSTACK_DIR/settings source $ZAQAR_DEVSTACK_DIR/plugin.sh # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace function wait_for_keystone { local www_authenticate_uri=http://${ZAQAR_SERVICE_HOST}/identity if ! wait_for_service $SERVICE_TIMEOUT ${www_authenticate_uri}/v$IDENTITY_API_VERSION/; then die $LINENO "keystone did not start" fi } # Save current config files for posterity [[ -d $SAVE_DIR/etc.zaqar ]] || cp -pr $ZAQAR_CONF_DIR $SAVE_DIR/etc.zaqar stack_install_service zaqar if grep -q 'management_store *= *sqlalchemy' /etc/zaqar/zaqar.conf; then zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head || die $LINENO "DB sync error" fi # calls upgrade-zaqar for specific release upgrade_project zaqar $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH wait_for_keystone start_zaqar # Don't succeed unless the services come up ensure_services_started zaqar-server if grep -q 'management_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar_mgmt --out $SAVE_DIR/zaqar-mongodb-mgmt-dump.$TARGET_RELEASE fi if grep -q 'message_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar --out $SAVE_DIR/zaqar-mongodb-message-dump.$TARGET_RELEASE fi if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then redis-cli save sudo cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$TARGET_RELEASE.rdb fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************"
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/devstack/upgrade/upgrade.sh
upgrade.sh
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======== Glossary ======== Messaging Service Concepts ========================== The Messaging Service is a multi-tenant, message queue implementation that utilizes a RESTful HTTP interface to provide an asynchronous communications protocol, which is one of the main requirements in today's scalable applications. .. glossary:: Queue Queue is a logical entity that groups messages. Ideally a queue is created per work type. For example, if you want to compress files, you would create a queue dedicated for this job. Any application that reads from this queue would only compress files. Message Message is sent through a queue and exists until it is deleted by a recipient or automatically by the system based on a TTL (time-to-live) value. Claim Claim is a mechanism to mark messages so that other workers will not process the same message. Worker Worker is an application that reads one or multiple messages from the queue. Producer Producer is an application that creates messages in one or multiple queues. Publish - Subscribe Publish - Subscribe is a pattern where all worker applications have access to all messages in the queue. Workers can not delete or update messages. Producer - Consumer Producer - Consumer is a pattern where each worker application that reads the queue has to claim the message in order to prevent duplicate processing. Later, when the work is done, the worker is responsible for deleting the message. If message is not deleted in a predefined time (claim TTL), it can be claimed by other workers. Message TTL Message TTL is time-to-live value and defines how long a message will be accessible. Claim TTL Claim TTL is time-to-live value and defines how long a message will be in claimed state. A message can be claimed by one worker at a time. Queues Database Queues database stores the information about the queues and the messages within these queues. Storage layer has to guarantee durability and availability of the data. Pooling If pooling enabled, queuing service uses multiple queues databases in order to scale horizontally. A pool (queues database) can be added anytime without stopping the service. Each pool has a weight that is assigned during the creation time but can be changed later. Pooling is done by queue which indicates that all messages for a particular queue can be found in the same pool (queues database). Catalog Database If pooling is enabled, catalog database has to be created. Catalog database maintains ``queues`` to ``queues database`` mapping. Storage layer has to guarantee durability and availability of data.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/glossary.rst
glossary.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================================== Welcome to the Zaqar's Documentation! ===================================== Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. The service features a REST API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Underlying this API is an efficient messaging engine designed with scalability and security in mind. The Websocket API is also available. Other OpenStack components can integrate with Zaqar to surface events to end users and to communicate with guest agents that run in the "over-cloud" layer. Key features ------------ Zaqar provides the following key features: * Choice between two communication transports. Both with Keystone support: * Firewall-friendly, **HTTP-based RESTful API**. Many of today's developers prefer a more web-friendly HTTP API. They value the simplicity and transparency of the protocol, it's firewall-friendly nature, and it's huge ecosystem of tools, load balancers and proxies. In addition, cloud operators appreciate the scalability aspects of the REST architectural style. * **Websocket-based API** for persistent connections. Websocket protocol provides communication over persistent connections. Unlike HTTP, where new connections are opened for each request/response pair, Websocket can transfer multiple requests/responses over single TCP connection. It saves much network traffic and minimizes delays. * Multi-tenant queues based on Keystone project IDs. * Support for several common patterns including event broadcasting, task distribution, and point-to-point messaging. * Component-based architecture with support for custom backends and message filters. * Efficient reference implementation with an eye toward low latency and high throughput (dependent on backend). * Highly-available and horizontally scalable. * Support for subscriptions to queues. Several notification types are available: * Email notifications. * Webhook notifications. * Websocket notifications. Project scope ------------- The Zaqar API is data-oriented. That is, it does not provision message brokers and expose those directly to clients. Instead, the API acts as a bridge between the client and one or more backends. A provisioning service for message brokers, however useful, serves a somewhat different market from what Zaqar is targeting today. With that in mind, if users are interested in a broker provisioning service, the community should consider starting a new project to address that need. Design principles ----------------- Zaqar, as with all OpenStack projects, is designed with the following guidelines in mind: * **Component-based architecture.** Quickly add new behaviors * **Highly available and scalable.** Scale to very serious workloads * **Fault tolerant.** Isolated processes avoid cascading failures * **Recoverable.** Failures should be easy to diagnose, debug, and rectify * **Open standards.** Be a reference implementation for a community-driven Contents -------- .. toctree:: :maxdepth: 2 user/index admin/index install/index configuration/index contributor/contributing contributor/index cli/index .. toctree:: :maxdepth: 1 glossary
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/index.rst
index.rst
============ zaqar-status ============ Synopsis ======== :: zaqar-status <category> <command> [<args>] Description =========== :program:`zaqar-status` is a tool that provides routines for checking the status of a Zaqar deployment. Options ======= The standard pattern for executing a :program:`zaqar-status` command is:: zaqar-status <category> <command> [<args>] Run without arguments to see a list of available command categories:: zaqar-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: zaqar-status upgrade These sections describe the available categories and arguments for :program:`zaqar-status`. Upgrade ~~~~~~~ .. _zaqar-status-checks: ``zaqar-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **8.0.0 (Stein)** * Placeholder to be filled in with checks as they are added in Stein.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/cli/zaqar-status.rst
zaqar-status.rst
.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service, code-named zaqar. This section assumes that you already have a working OpenStack environment with at least Identity service installed. Note that installation and configuration vary by distribution. .. toctree:: install-obs.rst install-rdo.rst install-ubuntu.rst Possible Minimum Scalable HA Setup ---------------------------------- Scalable HA (High availability) setup is out of scope in this chapter. For a HA setup, a load balancer has to be placed in front of the web servers. To provide high availability with minimum administration overhead for storage use ``MongoDB`` driver and for transport use ``wsgi`` driver. To have a small footprint while providing HA, you can use two web servers which will host the application and three ``MongoDB`` servers (configured as replica-set) which will host Messaging service's management store and message store databases. At larger scale, the management store database and the message store database are advised to be hosted on different ``MongoDB`` replica sets.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/install.rst
install.rst
========================== Messaging service overview ========================== The Message service is multi-tenant, fast, reliable, and scalable. It allows developers to share data between distributed application components performing different tasks, without losing messages or requiring each component to be always available. The service features a RESTful API and a Websocket API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Key features ~~~~~~~~~~~~ The Messaging service provides the following key features: * Choice between two communication transports. Both with Identity service support: * Firewall-friendly, **HTTP-based RESTful API**. Many of today's developers prefer a more web-friendly HTTP API. They value the simplicity and transparency of the protocol, its firewall-friendly nature, and its huge ecosystem of tools, load balancers and proxies. In addition, cloud operators appreciate the scalability aspects of the REST architectural style. * **Websocket-based API** for persistent connections. Websocket protocol provides communication over persistent connections. Unlike HTTP, where new connections are opened for each request/response pair, Websocket can transfer multiple requests/responses over single TCP connection. It saves much network traffic and minimizes delays. * Multi-tenant queues based on Identity service IDs. * Support for several common patterns including event broadcasting, task distribution, and point-to-point messaging. * Component-based architecture with support for custom back ends and message filters. * Efficient reference implementation with an eye toward low latency and high throughput (dependent on back end). * Highly-available and horizontally scalable. * Support for subscriptions to queues. Several notification types are available: * Email notifications * Webhook notifications * Websocket notifications Layers of the Messaging service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Messaging service has following layers: * The transport layer (Messaging application) which can provide these APIs: * HTTP RESTful API (via ``wsgi`` driver). * Websocket API (via ``websocket`` driver). * The storage layer which keeps all the data and metadata about queues and messages. It has two sub-layers: * The management store database (Catalog). Can be ``MongoDB`` database (or ``MongoDB`` replica-set) or SQL database. * The message store databases (Pools). Can be ``MongoDB`` database (or ``MongoDB`` replica-set) or ``Redis`` database.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/get_started.rst
get_started.rst
================== Installation Guide ================== .. toctree:: get_started.rst install.rst verify.rst next-steps.rst The Messaging service is multi-tenant, fast, reliable, and scalable. It allows developers to share data between distributed application components performing different tasks, without losing messages or requiring each component to be always available. The service features a RESTful API and a Websocket API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. This chapter assumes a working setup of OpenStack following the base Installation Guide. Ocata ~~~~~ To install Zaqar, see the Ocata Messaging service install guide for each distribution: - `Ubuntu <https://docs.openstack.org/project-install-guide/messaging/ocata/install-ubuntu.html>`__ - `CentOS and RHEL <https://docs.openstack.org/project-install-guide/messaging/ocata/install-rdo.html>`__ - `openSUSE and SUSE Linux Enterprise <https://docs.openstack.org/project-install-guide/messaging/ocata/install-obs.html>`__ Newton ~~~~~~ To install Zaqar, see the Newton Messaging service install guide for each distribution: - `Ubuntu <https://docs.openstack.org/project-install-guide/messaging/newton/install-ubuntu.html>`__ - `CentOS and RHEL <https://docs.openstack.org/project-install-guide/messaging/newton/install-rdo.html>`__ - `openSUSE and SUSE Linux Enterprise <https://docs.openstack.org/project-install-guide/messaging/newton/install-obs.html>`__
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/index.rst
index.rst
.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service, code-named ``zaqar`` for Red Hat Enterprise Linux 7 and CentOS 7. This section assumes that you already have a working OpenStack environment with at least Identity service installed. Here you can find instructions and recommended settings for installing Messaging service in small configuration: one web server with Messaging service configured to use replica-set of three ``MongoDB`` database servers. Because only one web server is used, the Messaging service installed by using these instructions can't be considered as high available, see :doc:`install`. In this tutorial these server names are used as examples: * Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. * Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. * Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. Prerequisites ------------- Before you install Messaging service, you must meet the following system requirements: * Installed Identity service for user and project management. * Python 2.7. Before you install and configure Messaging, you must create a ``MongoDB`` replica-set of three database servers. Also you need to create service credentials and API endpoints in Identity. #. Install and configure ``MongoDB`` replica-set on database servers: #. Install ``MongoDB`` on the database servers: On each database server follow the official `MongoDB installation instructions`_. .. note:: Messaging service works with ``MongoDB`` versions >= 2.4 #. Configure ``MongoDB`` on the database servers: On each database server edit configuration file: ``/etc/mongod.conf`` and modify as needed: .. code-block:: ini # MongoDB sample configuration for Messaging service. # (For MongoDB version >= 2.6) # Edit according to your needs. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log storage: dbPath: /var/lib/mongo journal: enabled: false processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile net: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. operationProfiling: slowOpThresholdMs: 200 mode: slowOp replication: oplogSizeMB: 2048 replSetName: catalog .. note:: In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration file should be written in different format. For information about format for different versions see the official `MongoDB configuration reference`_. .. warning:: Additional steps are required to secure ``MongoDB`` installation. You should modify this configuration for your security requirements. See the official `MongoDB security reference`_. #. Start ``MongoDB`` on the database servers: Start ``MongoDB`` service on all database servers: .. code-block:: console # systemctl start mongod Make ``MongoDB`` service start automatically after reboot: .. code-block:: console # systemctl enable mongod #. Configure ``MongoDB`` Replica Set on the database servers: Once you've installed ``MongoDB`` on three servers and assuming that the primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go to ``MYDB0`` and run these commands: .. code-block:: console # mongo local --eval "printjson(rs.initiate())" # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" .. note:: The database servers must have access to each other and also be accessible from the Messaging service web server. Configure firewalls on all database servers to accept incoming connections to port ``27017`` from the needed source. To check if the replica-set is established see the output of this command: .. code-block:: console # mongo local --eval "printjson(rs.status())" #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create the ``zaqar`` user: .. code-block:: console $ openstack user create --domain default --password-prompt zaqar User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | 7b0ffc83097148dab6ecbef6ddcc46bf | | name | zaqar | +-----------+----------------------------------+ #. Add the ``admin`` role to the ``zaqar`` user: .. code-block:: console $ openstack role add --project service --user zaqar admin .. note:: This command provides no output. #. Create the ``zaqar`` service entity: .. code-block:: console $ openstack service create --name zaqar --description "Messaging" messaging +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Messaging | | enabled | True | | id | b39c22818be5425ba2315dd4b10cd57c | | name | zaqar | | type | messaging | +-------------+----------------------------------+ #. Create the Messaging service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | aabca78860e74c4db0bcb36167bfe106 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 07f9524613de4fd3905e13a87f81fd3f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 686f7b19428f4b5aa1425667dfe4f49d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ Install and configure Messaging web server ------------------------------------------ Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server ``WEB0.EXAMPLE-MESSAGES.NET``. #. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order to cache Identity service tokens and catalog mappings: .. code-block:: console # yum install memcached Start ``memcached`` service: .. code-block:: console # systemctl start memcached Make ``memcached`` service start automatically after reboot: .. code-block:: console # systemctl enable memcached #. Install Messaging service and ``uWSGI``: .. code-block:: console # yum install python-pip # git clone https://git.openstack.org/openstack/zaqar.git # cd zaqar # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log # pip install --upgrade pymongo gevent uwsgi #. Create Zaqar configiration directory ``/etc/zaqar/``: .. code-block:: console # mkdir /etc/zaqar #. Customize the policy file: .. code-block:: console # oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf # cp etc/zaqar.policy.yaml.sample /etc/zaqar/policy.yaml Edit any item as needed in policy.yaml. .. note:: By default, if you do not need custom policy file, you do not need to perform the above steps, then zaqar will use the code's default policy. #. Create log file: .. code-block:: console # touch /var/log/zaqar-server.log # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log # chmod 600 /var/log/zaqar-server.log Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files: .. code-block:: console # mkdir /srv/zaqar #. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: .. code-block:: python from keystonemiddleware import auth_token from zaqar.transport.wsgi import app app = auth_token.AuthProtocol(app.app, {}) #. Increase backlog listen limit from default (128): .. code-block:: console # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf #. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify as needed: .. code-block:: ini [uwsgi] https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY pidfile = /var/run/zaqar.pid gevent = 2000 gevent-monkey-patch = true listen = 1024 enable-threads = true chdir = /srv/zaqar module = zaqar_uwsgi:app workers = 4 harakiri = 60 add-header = Connection: close Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's private key (``*.key``). .. note:: The ``uWSGI`` configuration options above can be modified for different security and performance requirements including load balancing. See the official `uWSGI configuration reference`_. #. Create pid file: .. code-block:: console # touch /var/run/zaqar.pid # chown ZAQARUSER:ZAQARUSER /var/run/zaqar.pid Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create Messaging service's configuration file ``/etc/zaqar/zaqar.conf`` with the following content: .. code-block:: ini [DEFAULT] # Show debugging output in logs (sets DEBUG log level output) #debug = False # Pooling and admin mode configs pooling = True admin_mode = True # Log to file log_file = /var/log/zaqar-server.log # This is taken care of in our custom app.py, so disable here ;auth_strategy = keystone # Modify to make it work with your Identity service. [keystone_authtoken] project_domain_name = Default user_domain_name = Default project_domain_id = default project_name = service user_domain_id = default # File path to a PEM encoded Certificate Authority to use when verifying # HTTPs connections. Defaults to system CAs if commented. cafile = PATH_TO_CA_FILE # Messaging service user name in Identity service. username = ZAQARIDENTITYUSER # Messaging service password in Identity service. password = ZAQARIDENTITYPASSWORD # Complete public Identity API endpoint (HTTPS protocol is more preferable # than HTTP). www_authenticate_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Complete admin Identity API endpoint (HTTPS protocol is more preferable # than HTTP). identity_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Token cache time in seconds. token_cache_time = TOKEN_CACHE_TIME memcached_servers = 127.0.0.1:11211 [cache] # Dogpile.cache backend module. It is recommended that Memcache with # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be # used in production deployments. Small workloads (single process) # like devstack can use the dogpile.cache.memory backend. (string # value) backend = dogpile.cache.memory memcache_servers = 127.0.0.1:11211 [drivers] transport = wsgi message_store = mongodb management_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set # accordingly. uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred # Name for the database on mongodb server. database = zaqarmanagementstore # Number of databases across which to partition message data, in order # to reduce writer lock %. DO NOT change this setting after initial # deployment. It MUST remain static. Also, you should not need a large # number of partitions to improve performance, esp. if deploying # MongoDB on SSD storage. (integer value) partitions = 8 # Uncomment any options below if needed. # Maximum number of times to retry a failed operation. Currently # only used for retrying a message post. ;max_attempts = 1000 # Maximum sleep interval between retries (actual sleep time # increases linearly according to number of attempts performed). ;max_retry_sleep = 0.1 # Maximum jitter interval, to be added to the sleep interval, in # order to decrease probability that parallel requests will retry # at the same instant. ;max_retry_jitter = 0.005 # Frequency of message garbage collections, in seconds ;gc_interval = 5 * 60 # Threshold of number of expired messages to reach in a given # queue, before performing the GC. Useful for reducing frequent # locks on the DB for non-busy queues, or for worker queues # which process jobs quickly enough to keep the number of in- # flight messages low. # # Note: The higher this number, the larger the memory-mapped DB # files will be. ;gc_threshold = 1000 [drivers:message_store:mongodb] # This section has same set of available options as # "[drivers:management_store:mongodb]" section. # # If pooling is enabled, all pools inherit values from options in these # settings unless overridden in pool creation request. Also "uri" option # value isn't used in case of pooling. # # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, # ssl_ca_certs options need to be set accordingly. # Name for the database on MondoDB server. database = zaqarmessagestore [transport] max_queues_per_page = 1000 max_queue_metadata = 262144 max_mesages_per_page = 10 max_messages_post_size = 262144 max_message_ttl = 1209600 max_claim_ttl = 43200 max_claim_grace = 43200 [signed_url] # Secret key used to encrypt pre-signed URLs. (string value) secret_key = SOMELONGSECRETKEY Edit any options as needed, especially the options with capitalized values. #. Create a service file for Messaging service ``/etc/systemd/system/zaqar.uwsgi.service``: .. code-block:: ini [Unit] Description=uWSGI Zaqar After=syslog.target [Service] ExecStart=/usr/bin/uwsgi --ini /srv/zaqar/uwsgi.ini # Requires systemd version 211 or newer RuntimeDirectory=uwsgi Restart=always KillSignal=SIGQUIT Type=notify StandardError=syslog NotifyAccess=all User=ZAQARUSER Group=ZAQARUSER [Install] WantedBy=multi-user.target Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. Finalize installation --------------------- Now after you have configured the web server and the database servers to have a functional Messaging service, you need to start the service, make the service automatically start with the system and define the created ``MongoDB`` replica-set as Messaging's pool. #. Start Messaging service on the web server: .. code-block:: console # systemctl start zaqar.uwsgi.service #. Make Messaging service start automatically after reboot on the web server: .. code-block:: console # systemctl enable zaqar.uwsgi.service #. Configure pool: .. code-block:: console # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" \ -H "Content-type: application/json" \ Replace ``POOL1`` variable with the desired name of a pool. Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. .. note:: The ``options`` key in curl request above overrides any options (specified in configuration file or default) in ``[drivers:message_store:mongodb]`` Messaging service configuration file's section. .. tip:: In larger deployments, there should be many load balanced web servers. Also the management store databases and the message store databases (pools) should be on different ``MongoDB`` replica-sets. .. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-red-hat/ .. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ .. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ .. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/install-rdo.rst
install-rdo.rst
.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Messaging service by creating messages via curl utility: .. code-block:: console $ curl -i -X POST http://ZAQAR_ENDPOINT:8888/v2/queues/samplequeue/messages \ -d '{"messages": [{"body": {"event": 1}, "ttl": 600}, {"body": {"event": 2}, "ttl": 600}]}' \ -H "Content-type: application/json" \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. Replace ``ZAQAR_ENDPOINT`` variable with the endpoint of Messaging service. The normal response would be with status code 201 and look something like this: .. code-block:: console HTTP/1.1 201 Created content-length: 135 content-type: application/json; charset=UTF-8 location: http://ZAQAR_ENDPOINT:8888/v2/queues/samplequeue/messages?ids=575f6f2515e5c87d779a9b20,575f6f2515e5c87d779a9b21 Connection: close {"resources": ["/v2/queues/samplequeue/messages/575f6f2515e5c87d779a9b20", "/v2/queues/samplequeue/messages/575f6f2515e5c87d779a9b21"]}
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/verify.rst
verify.rst
.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service for Ubuntu 14.04 (LTS). This section assumes that you already have a working OpenStack environment with at least Identity service installed. Here you can find instructions and recommended settings for installing Messaging service in small configuration: one web server with Messaging service configured to use replica-set of three ``MongoDB`` database servers. Because only one web server is used, the Messaging service installed by using these instructions can't be considered as high available, see :doc:`install`. In this tutorial these server names are used as examples: * Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. * Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. * Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. Prerequisites ------------- Before you install Messaging service, you must meet the following system requirements: * Installed Identity service for user and project management. * Python 2.7. Before you install and configure Messaging, you must create a ``MongoDB`` replica-set of three database servers. Also you need to create service credentials and API endpoints in Identity. #. Install and configure ``MongoDB`` replica-set on database servers: #. Install ``MongoDB`` on the database servers: On each database server follow the official `MongoDB installation instructions`_. .. note:: Messaging service works with ``MongoDB`` versions >= 2.4 #. Configure ``MongoDB`` on the database servers: On each database server edit configuration file: ``/etc/mongod.conf`` and modify as needed: .. code-block:: ini # MongoDB sample configuration for Messaging service. # (For MongoDB version >= 2.6) # Edit according to your needs. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log storage: dbPath: /var/lib/mongo journal: enabled: false processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile net: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. operationProfiling: slowOpThresholdMs: 200 mode: slowOp replication: oplogSizeMB: 2048 replSetName: catalog .. note:: In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration file should be written in different format. For information about format for different versions see the official `MongoDB configuration reference`_. .. warning:: Additional steps are required to secure ``MongoDB`` installation. You should modify this configuration for your security requirements. See the official `MongoDB security reference`_. #. Start ``MongoDB`` on the database servers: Start ``MongoDB`` service on all database servers: .. code-block:: console # service mongodb start #. Configure ``MongoDB`` Replica Set on the database servers: Once you've installed ``MongoDB`` on three servers and assuming that the primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go to ``MYDB0`` and run these commands: .. code-block:: console # mongo local --eval "printjson(rs.initiate())" # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" .. note:: The database servers must have access to each other and also be accessible from the Messaging service web server. Configure firewalls on all database servers to accept incoming connections to port ``27017`` from the needed source. To check if the replica-set is established see the output of this command: .. code-block:: console # mongo local --eval "printjson(rs.status())" #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create the ``zaqar`` user: .. code-block:: console $ openstack user create --domain default --password-prompt zaqar User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | 7b0ffc83097148dab6ecbef6ddcc46bf | | name | zaqar | +-----------+----------------------------------+ #. Add the ``admin`` role to the ``zaqar`` user: .. code-block:: console $ openstack role add --project service --user zaqar admin .. note:: This command provides no output. #. Create the ``zaqar`` service entity: .. code-block:: console $ openstack service create --name zaqar --description "Messaging" messaging +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Messaging | | enabled | True | | id | b39c22818be5425ba2315dd4b10cd57c | | name | zaqar | | type | messaging | +-------------+----------------------------------+ #. Create the Messaging service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | aabca78860e74c4db0bcb36167bfe106 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 07f9524613de4fd3905e13a87f81fd3f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 686f7b19428f4b5aa1425667dfe4f49d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ Install and configure Messaging web server ------------------------------------------ Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server ``WEB0.EXAMPLE-MESSAGES.NET``. #. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order to cache Identity service tokens and catalog mappings: .. code-block:: console # apt-get install memcached Start ``memcached`` service: .. code-block:: console # service memcached start #. Install Messaging service and ``uWSGI``: .. code-block:: console # apt-get install python-pip # git clone https://git.openstack.org/openstack/zaqar.git # cd zaqar # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log # pip install --upgrade pymongo gevent uwsgi #. Create Zaqar configiration directory ``/etc/zaqar/``: .. code-block:: console # mkdir /etc/zaqar #. Customize the policy file: .. code-block:: console # oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf # cp etc/zaqar.policy.yaml.sample /etc/zaqar/policy.yaml Edit any item as needed in policy.yaml. .. note:: By default, if you do not need custom policy file, you do not need to perform the above steps, then zaqar will use the code's default policy. #. Create log file: .. code-block:: console # touch /var/log/zaqar-server.log # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log # chmod 600 /var/log/zaqar-server.log Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files: .. code-block:: console # mkdir /srv/zaqar #. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: .. code-block:: python from keystonemiddleware import auth_token from zaqar.transport.wsgi import app app = auth_token.AuthProtocol(app.app, {}) #. Increase backlog listen limit from default (128): .. code-block:: console # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf #. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify as needed: .. code-block:: ini [uwsgi] https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY pidfile = /var/run/zaqar.pid gevent = 2000 gevent-monkey-patch = true listen = 1024 enable-threads = true chdir = /srv/zaqar module = zaqar_uwsgi:app workers = 4 harakiri = 60 add-header = Connection: close Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's private key (``*.key``). .. note:: The ``uWSGI`` configuration options above can be modified for different security and performance requirements including load balancing. See the official `uWSGI configuration reference`_. #. Create pid file: .. code-block:: console # touch /var/run/zaqar.pid # chown ZAQARUSER:ZAQARUSER /var/run/zaqar.pid Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create Messaging service's configuration file ``/etc/zaqar/zaqar.conf`` with the following content: .. code-block:: ini [DEFAULT] # Show debugging output in logs (sets DEBUG log level output) #debug = False # Pooling and admin mode configs pooling = True admin_mode = True # Log to file log_file = /var/log/zaqar-server.log # This is taken care of in our custom app.py, so disable here ;auth_strategy = keystone # Modify to make it work with your Identity service. [keystone_authtoken] project_domain_name = Default user_domain_name = Default project_domain_id = default project_name = service user_domain_id = default # File path to a PEM encoded Certificate Authority to use when verifying # HTTPs connections. Defaults to system CAs if commented. cafile = PATH_TO_CA_FILE # Messaging service user name in Identity service. username = ZAQARIDENTITYUSER # Messaging service password in Identity service. password = ZAQARIDENTITYPASSWORD # Complete public Identity API endpoint (HTTPS protocol is more preferable # than HTTP). www_authenticate_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Complete admin Identity API endpoint (HTTPS protocol is more preferable # than HTTP). identity_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Token cache time in seconds. token_cache_time = TOKEN_CACHE_TIME memcached_servers = 127.0.0.1:11211 [cache] # Dogpile.cache backend module. It is recommended that Memcache with # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be # used in production deployments. Small workloads (single process) # like devstack can use the dogpile.cache.memory backend. (string # value) backend = dogpile.cache.memory memcache_servers = 127.0.0.1:11211 [drivers] transport = wsgi message_store = mongodb management_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set # accordingly. uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred # Name for the database on mongodb server. database = zaqarmanagementstore # Number of databases across which to partition message data, in order # to reduce writer lock %. DO NOT change this setting after initial # deployment. It MUST remain static. Also, you should not need a large # number of partitions to improve performance, esp. if deploying # MongoDB on SSD storage. (integer value) partitions = 8 # Uncomment any options below if needed. # Maximum number of times to retry a failed operation. Currently # only used for retrying a message post. ;max_attempts = 1000 # Maximum sleep interval between retries (actual sleep time # increases linearly according to number of attempts performed). ;max_retry_sleep = 0.1 # Maximum jitter interval, to be added to the sleep interval, in # order to decrease probability that parallel requests will retry # at the same instant. ;max_retry_jitter = 0.005 # Frequency of message garbage collections, in seconds ;gc_interval = 5 * 60 # Threshold of number of expired messages to reach in a given # queue, before performing the GC. Useful for reducing frequent # locks on the DB for non-busy queues, or for worker queues # which process jobs quickly enough to keep the number of in- # flight messages low. # # Note: The higher this number, the larger the memory-mapped DB # files will be. ;gc_threshold = 1000 [drivers:message_store:mongodb] # This section has same set of available options as # "[drivers:management_store:mongodb]" section. # # If pooling is enabled, all pools inherit values from options in these # settings unless overridden in pool creation request. Also "uri" option # value isn't used in case of pooling. # # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, # ssl_ca_certs options need to be set accordingly. # Name for the database on MondoDB server. database = zaqarmessagestore [transport] max_queues_per_page = 1000 max_queue_metadata = 262144 max_mesages_per_page = 10 max_messages_post_size = 262144 max_message_ttl = 1209600 max_claim_ttl = 43200 max_claim_grace = 43200 [signed_url] # Secret key used to encrypt pre-signed URLs. (string value) secret_key = SOMELONGSECRETKEY Edit any options as needed, especially the options with capitalized values. #. Create an upstart config, it could be named as ``/etc/init/zaqar.conf``: .. code-block:: bash description "Zaqar api server" author "Your Name <[email protected]>" start on runlevel [2345] stop on runlevel [!2345] chdir /var/run pre-start script mkdir -p /var/run/zaqar chown zaqar:zaqar /var/run/zaqar mkdir -p /var/lock/zaqar chown zaqar:root /var/lock/zaqar end script exec /usr/bin/uwsgi --master --emperor /etc/zaqar/uwsgi Finalize installation --------------------- Now after you have configured the web server and the database servers to have a functional Messaging service, you need to start the service, make the service automatically start with the system and define the created ``MongoDB`` replica-set as Messaging's pool. #. Start Messaging service on the web server: .. code-block:: console # systemctl start zaqar.uwsgi.service #. Make Messaging service start automatically after reboot on the web server: .. code-block:: console # systemctl enable zaqar.uwsgi.service #. Configure pool: .. code-block:: console # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" \ -H "Content-type: application/json" \ Replace ``POOL1`` variable with the desired name of a pool. Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. .. note:: The ``options`` key in curl request above overrides any options (specified in configuration file or default) in ``[drivers:message_store:mongodb]`` Messaging service configuration file's section. .. tip:: In larger deployments, there should be many load balanced web servers. Also the management store databases and the message store databases (pools) should be on different ``MongoDB`` replica-sets. .. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ .. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ .. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ .. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/install-ubuntu.rst
install-ubuntu.rst
.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1. This section assumes that you already have a working OpenStack environment with at least Identity service installed. Here you can find instructions and recommended settings for installing Messaging service in small configuration: one web server with Messaging service configured to use replica-set of three ``MongoDB`` database servers. Because only one web server is used, the Messaging service installed by using these instructions can't be considered as high available, see :doc:`install`. In this tutorial these server names are used as examples: * Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. * Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. * Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. Prerequisites ------------- Before you install Messaging service, you must meet the following system requirements: * Installed Identity service for user and project management. * Python 2.7. Before you install and configure Messaging, you must create a ``MongoDB`` replica-set of three database servers. Also you need to create service credentials and API endpoints in Identity. #. Install and configure ``MongoDB`` replica-set on database servers: #. Install ``MongoDB`` on the database servers: On each database server follow the official `MongoDB installation instructions`_. .. note:: Messaging service works with ``MongoDB`` versions >= 2.4 #. Configure ``MongoDB`` on the database servers: On each database server edit configuration file: ``/etc/mongod.conf`` and modify as needed: .. code-block:: ini # MongoDB sample configuration for Messaging service. # (For MongoDB version >= 2.6) # Edit according to your needs. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log storage: dbPath: /var/lib/mongo journal: enabled: false processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile net: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. operationProfiling: slowOpThresholdMs: 200 mode: slowOp replication: oplogSizeMB: 2048 replSetName: catalog .. note:: In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration file should be written in different format. For information about format for different versions see the official `MongoDB configuration reference`_. .. warning:: Additional steps are required to secure ``MongoDB`` installation. You should modify this configuration for your security requirements. See the official `MongoDB security reference`_. #. Start ``MongoDB`` on the database servers: Start ``MongoDB`` service on all database servers: .. code-block:: console # service mongod start Make ``MongoDB`` service start automatically after reboot: .. code-block:: console # chkconfig mongod on #. Configure ``MongoDB`` Replica Set on the database servers: Once you've installed ``MongoDB`` on three servers and assuming that the primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go to ``MYDB0`` and run these commands: .. code-block:: console # mongo local --eval "printjson(rs.initiate())" # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" .. note:: The database servers must have access to each other and also be accessible from the Messaging service web server. Configure firewalls on all database servers to accept incoming connections to port ``27017`` from the needed source. To check if the replica-set is established see the output of this command: .. code-block:: console # mongo local --eval "printjson(rs.status())" #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create the ``zaqar`` user: .. code-block:: console $ openstack user create --domain default --password-prompt zaqar User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | 7b0ffc83097148dab6ecbef6ddcc46bf | | name | zaqar | +-----------+----------------------------------+ #. Add the ``admin`` role to the ``zaqar`` user: .. code-block:: console $ openstack role add --project service --user zaqar admin .. note:: This command provides no output. #. Create the ``zaqar`` service entity: .. code-block:: console $ openstack service create --name zaqar --description "Messaging" messaging +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Messaging | | enabled | True | | id | b39c22818be5425ba2315dd4b10cd57c | | name | zaqar | | type | messaging | +-------------+----------------------------------+ #. Create the Messaging service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | aabca78860e74c4db0bcb36167bfe106 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 07f9524613de4fd3905e13a87f81fd3f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 686f7b19428f4b5aa1425667dfe4f49d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ Install and configure Messaging web server ------------------------------------------ Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server ``WEB0.EXAMPLE-MESSAGES.NET``. #. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order to cache Identity service tokens and catalog mappings: .. code-block:: console # zypper install memcached Start ``memcached`` service: .. code-block:: console # /etc/init.d/memcached start Make ``memcached`` service start automatically after reboot: .. code-block:: console # chkconfig memcached on #. Install Messaging service and ``uWSGI``: .. code-block:: console # zypper install python-pip # git clone https://git.openstack.org/openstack/zaqar.git # cd zaqar # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log # pip install --upgrade pymongo gevent uwsgi #. Create Zaqar configiration directory ``/etc/zaqar/``: .. code-block:: console # mkdir /etc/zaqar #. Customize the policy file: .. code-block:: console # oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf # cp etc/zaqar.policy.yaml.sample /etc/zaqar/policy.yaml Edit any item as needed in policy.yaml. .. note:: By default, if you do not need custom policy file, you do not need to perform the above steps, then zaqar will use the code's default policy. #. Create log file: .. code-block:: console # touch /var/log/zaqar-server.log # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log # chmod 600 /var/log/zaqar-server.log Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files: .. code-block:: console # mkdir /srv/zaqar #. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: .. code-block:: python from keystonemiddleware import auth_token from zaqar.transport.wsgi import app app = auth_token.AuthProtocol(app.app, {}) #. Increase backlog listen limit from default (128): .. code-block:: console # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf #. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify as needed: .. code-block:: ini [uwsgi] https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY pidfile = /var/run/zaqar.pid gevent = 2000 gevent-monkey-patch = true listen = 1024 enable-threads = true chdir = /srv/zaqar module = zaqar_uwsgi:app workers = 4 harakiri = 60 add-header = Connection: close Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's private key (``*.key``). .. note:: The ``uWSGI`` configuration options above can be modified for different security and performance requirements including load balancing. See the official `uWSGI configuration reference`_. #. Create pid file: .. code-block:: console # touch /var/run/zaqar.pid # chown ZAQARUSER:ZAQARUSER /var/run/zaqar.pid Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create Messaging service's configuration file ``/etc/zaqar/zaqar.conf`` with the following content: .. code-block:: ini [DEFAULT] # Show debugging output in logs (sets DEBUG log level output) #debug = False # Pooling and admin mode configs pooling = True admin_mode = True # Log to file log_file = /var/log/zaqar-server.log # This is taken care of in our custom app.py, so disable here ;auth_strategy = keystone # Modify to make it work with your Identity service. [keystone_authtoken] project_domain_name = Default user_domain_name = Default project_domain_id = default project_name = service user_domain_id = default # File path to a PEM encoded Certificate Authority to use when verifying # HTTPs connections. Defaults to system CAs if commented. cafile = PATH_TO_CA_FILE # Messaging service user name in Identity service. username = ZAQARIDENTITYUSER # Messaging service password in Identity service. password = ZAQARIDENTITYPASSWORD # Complete public Identity API endpoint (HTTPS protocol is more preferable # than HTTP). www_authenticate_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Complete admin Identity API endpoint (HTTPS protocol is more preferable # than HTTP). auth_url = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Token cache time in seconds. token_cache_time = TOKEN_CACHE_TIME memcached_servers = 127.0.0.1:11211 [cache] # Dogpile.cache backend module. It is recommended that Memcache with # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be # used in production deployments. Small workloads (single process) # like devstack can use the dogpile.cache.memory backend. (string # value) backend = dogpile.cache.memory memcache_servers = 127.0.0.1:11211 [drivers] transport = wsgi message_store = mongodb management_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set # accordingly. uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred # Name for the database on mongodb server. database = zaqarmanagementstore # Number of databases across which to partition message data, in order # to reduce writer lock %. DO NOT change this setting after initial # deployment. It MUST remain static. Also, you should not need a large # number of partitions to improve performance, esp. if deploying # MongoDB on SSD storage. (integer value) partitions = 8 # Uncomment any options below if needed. # Maximum number of times to retry a failed operation. Currently # only used for retrying a message post. ;max_attempts = 1000 # Maximum sleep interval between retries (actual sleep time # increases linearly according to number of attempts performed). ;max_retry_sleep = 0.1 # Maximum jitter interval, to be added to the sleep interval, in # order to decrease probability that parallel requests will retry # at the same instant. ;max_retry_jitter = 0.005 # Frequency of message garbage collections, in seconds ;gc_interval = 5 * 60 # Threshold of number of expired messages to reach in a given # queue, before performing the GC. Useful for reducing frequent # locks on the DB for non-busy queues, or for worker queues # which process jobs quickly enough to keep the number of in- # flight messages low. # # Note: The higher this number, the larger the memory-mapped DB # files will be. ;gc_threshold = 1000 [drivers:message_store:mongodb] # This section has same set of available options as # "[drivers:management_store:mongodb]" section. # # If pooling is enabled, all pools inherit values from options in these # settings unless overridden in pool creation request. Also "uri" option # value isn't used in case of pooling. # # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, # ssl_ca_certs options need to be set accordingly. # Name for the database on MondoDB server. database = zaqarmessagestore [transport] max_queues_per_page = 1000 max_queue_metadata = 262144 max_mesages_per_page = 10 max_messages_post_size = 262144 max_message_ttl = 1209600 max_claim_ttl = 43200 max_claim_grace = 43200 [signed_url] # Secret key used to encrypt pre-signed URLs. (string value) secret_key = SOMELONGSECRETKEY Edit any options as needed, especially the options with capitalized values. #. Create a service file for Messaging service ``/etc/systemd/system/zaqar-uwsgi.service``: .. code-block:: ini [Unit] Description=uWSGI Zaqar After=syslog.target [Service] ExecStart=/usr/bin/uwsgi --ini /srv/zaqar/uwsgi.ini # Requires systemd version 211 or newer RuntimeDirectory=uwsgi Restart=always KillSignal=SIGQUIT Type=notify StandardError=syslog NotifyAccess=all User=ZAQARUSER Group=ZAQARUSER [Install] WantedBy=multi-user.target Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. Finalize installation --------------------- Now after you have configured the web server and the database servers to have a functional Messaging service, you need to start the service, make the service automatically start with the system and define the created ``MongoDB`` replica-set as Messaging's pool. #. Start Messaging service on the web server: .. code-block:: console # systemctl start zaqar-uwsgi.service #. Make Messaging service start automatically after reboot on the web server: .. code-block:: console # systemctl enable zaqar-uwsgi.service #. Configure pool: .. code-block:: console # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" \ -H "Content-type: application/json" \ Replace ``POOL1`` variable with the desired name of a pool. Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. .. note:: The ``options`` key in curl request above overrides any options (specified in configuration file or default) in ``[drivers:message_store:mongodb]`` Messaging service configuration file's section. .. tip:: In larger deployments, there should be many load balanced web servers. Also the management store databases and the message store databases (pools) should be on different ``MongoDB`` replica-sets. .. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-suse/ .. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ .. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ .. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/install/install-obs.rst
install-obs.rst
======================================== Writing stages for the storage pipelines ======================================== Introduction ~~~~~~~~~~~~ A pipeline is a set of stages needed to process a request. When a new request comes to Zaqar, first the message goes through the transport layer pipeline and then through one of the storage layer pipelines depending on the type of operation of each particular request. For example, if Zaqar receives a request to make a queue-related operation, the storage layer pipeline will be ``queue pipeline``. Zaqar always has the actual storage controller as the final storage layer pipeline stage. By setting the options in the ``[storage]`` section of ``zaqar.conf`` you can add additional stages to these storage layer pipelines: * **Claim pipeline** * **Message pipeline** with built-in stage available to use: * ``zaqar.notification.notifier`` - sends notifications to the queue subscribers on each incoming message to the queue, i.e. enables notifications functionality. * **Queue pipeline** * **Subscription pipeline** The storage layer pipelines options are empty by default, because additional stages can affect the performance of Zaqar. Depending on the stages, the sequence in which the option values are listed does matter or not. You can add your own external stages to the storage layer pipelines. Things to know before writing the stage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Stages in the pipeline must implement storage controller methods they need to hook. You can find all available to hook methods in the abstract classes in ``zaqar/storage/base.py``. For example, if you're looking for all methods available to hook for the queue storage layer pipeline, see ``Queue`` class in ``zaqar/storage/base.py``. As you can see, Zaqar's built-in stage ``zaqar.notification.notifier`` implements ``post`` method of ``zaqar.storage.base.Message`` abstract class. A stage can halt the pipeline immediate by returning a value that is not None; otherwise, processing will continue to the next stage, ending with the actual storage controller. .. warning:: For the most of the cases it does not matter what non-None value the storage pipeline returns, but sometimes the returned value is used by the transport layer and you have to be careful. For example, during queue creation request, if the storage driver returns ``True``, the transport layer responds to the client with the ``201`` http response code, if ``False``, it responds with ``204`` http response code. See: ``zaqar.transport.wsgi.v2_0.queues.ItemResource#on_put``. Zaqar finds stages with their source codes through the Python entry points mechanism. All Python packages containing stages for Zaqar must register their stages under ``zaqar.storage.stages`` entry point group during their install either by ``setup.py`` or by ``setup.cfg``. If the stage is registered, and the name of the stage's entry point is specified by the user in the one of ``zaqar.conf`` storage layer pipeline options, the stage will be loaded to the particular storage layer pipeline. Zaqar imports stages as plugins. See ``zaqar.storage.pipeline#_get_storage_pipeline``. For additional information about plugins see: `Stevedore - Creating Plugins`_ and `Stevedore - Loading the Plugins`_. Example of external stage (written outside Zaqar package) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example of small package with a stage that can process queue-related requests in Zaqar. The stage does not do anything useful, but is good as example. File tree structure of the package:: . ├── setup.py └── ubershystages ├── __init__.py └── queues ├── __init__.py └── lovely.py 2 directories, 4 files ``setup.py``: .. code-block:: python from setuptools import setup, find_packages setup( name='ubershystages', version='1.0', description='Demonstration package for Zaqar with plugin pipeline stage', author='Ubershy', author_email='[email protected]', url='', classifiers=['Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Intended Audience :: Developers', 'Environment :: Console', ], platforms=['Any'], scripts=[], packages=find_packages(), include_package_data=True, entry_points={ 'zaqar.storage.stages': [ 'ubershy.lovelyplugin = ubershystages.queues.lovely:LovelyStage', ], }, zip_safe=False, ) ``lovely.py``: .. code-block:: python class LovelyStage(object): """This stage: 1. Prints 'Lovely stage is processing request...' on each queue creation or deletion request. 2. Prints 'Oh, what a lovely day!' on each creation request of a queue named 'lovely'. 3. Prevents deletion of a queue named 'lovely' and prints 'Secretly keeping lovely queue' on such attempt. """ def __init__(self, *args, **kwargs): print("Lovely stage is loaded!") def create(self, name, metadata=None, project=None): """Stage's method which processes queue creation request. :param name: The queue name :param project: Project id """ self.printprocessing() if name == 'lovely': print("Oh, what a lovely day!") def delete(self, name, project=None): """Stage's method which processes queue deletion request. :param name: The queue name :param project: Project id :returns: Something non-None, if the queue has a name 'lovely'. It will stop further processing through the other stages of the pipeline, and the request will not reach the storage controller driver, preventing queue deletion from the database. """ self.printprocessing() if name == 'lovely': print('Secretly keeping lovely queue') something = "shhh... it's a bad practice" return something def printprocessing(self): print('Lovely stage is processing request...') To install the package to the system in the root directory of the package run: .. code-block:: console # pip install -e . In ``zaqar.conf`` add ``ubershy.lovelyplugin`` to the ``queue_pipeline`` option: .. code-block:: ini [storage] queue_pipeline = ubershy.lovelyplugin Start Zaqar: .. code-block:: console $ zaqar-server If the stage has successfully loaded to Zaqar you will see amongst terminal output lines the ``Lovely stage is loaded!`` line. Then you can try to perform queue create and queue delete operations with the queue 'lovely' and see what will happen in Zaqar's database. .. note:: You can hold multiple stages in one package, just be sure that all stages will be registered as entry points. For example, in the ``setup.py`` you can register additional ``ubershy.nastyplugin`` stage: .. code-block:: python entry_points={ 'zaqar.storage.stages': [ 'ubershy.lovelyplugin = ubershystages.queues.lovely:LovelyStage', 'ubershy.nastyplugin = ubershystages.messages.nasty:NastyStage', ], }, .. _`Stevedore - Creating Plugins`: https://docs.openstack.org/stevedore/latest/user/tutorial/creating_plugins.html .. _`Stevedore - Loading the Plugins`: https://docs.openstack.org/stevedore/latest/user/tutorial/loading.html
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/admin/writing_pipeline_stages.rst
writing_pipeline_stages.rst
.. Copyright (c) 2017 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Guru Meditation Reports ======================= Zaqar contains a mechanism whereby developers and system administrators can generate a report about the state of a running Zaqar executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- For wsgi and websocket mode, a *GMR* can be generated by sending the *USR2* signal to any Zaqar process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``zaqar-server`` has process id ``8675``, and was run with ``2>/var/log/zaqar/zaqar-server-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/zaqar/zaqar-server-err.log``. For uwsgi mode, user should add a configuration in Zaqar's conf file:: [oslo_reports] file_event_handler=['The path to a file to watch for changes to trigger ' 'the reports, instead of signals. Setting this option ' 'disables the signal trigger for the reports.'] file_event_handler_interval=['How many seconds to wait between polls when ' 'file_event_handler is set, default value ' 'is 1'] For example, you can specify "file_event_handler=/tmp/guru_report" and "file_event_handler_interval=1" in Zaqar's conf file. A *GMR* can be generated by "touch"ing the file which was specified in file_event_handler. The *GMR* will then output to standard error for that particular process. For example, suppose that ``zaqar-server`` was run with ``2>/var/log/zaqar/zaqar-server-err.log``, and the file path is ``/tmp/guru_report``. Then, ``touch /tmp/guru_report`` will trigger the Guru Meditation report to be printed to ``/var/log/zaqar/zaqar-server-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports <https://docs.openstack.org/oslo.reports/latest/>`_
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/admin/gmr.rst
gmr.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ OSprofiler Guide ================ OSprofiler is a library from oslo. It's used for performance analysis. Please see `Official Doc`_ for more detail. Preparation ----------- OSprofiler now supports some kind of backends, such as Ceilometer, ElasticSearch , Messaging and MongoDB. .. note:: 1. Ceilometer is only used for data collection, and Messaging is only used for data transfer. So Ceilometer only works when Messaging is enabled. 2. ElasticSearch and MongoDB support both data collection and transfer. So they can be used standalone. In this guide, we take MongoDB for example. There are some new config options. **enabled** Enables the profiling for all services on this node. Default value is False (fully disable the profiling feature). This function may bring down Zaqar's performance, so please set to disable in production environment. **connection_string** Connection string for a notifier backend. Default value is messaging:// which sets the notifier to oslo_messaging. Here we set it to "mongodb://localhost:27017" **hmac_keys** Secret key(s) to use for encrypting context data for performance profiling. This string value should have the following format: <key1>[,<key2>,...<keyn>], where each key is some random string. A user who triggers the profiling via the REST API has to set one of these keys in the headers of the REST API call to include profiling results of this node for this particular project. **trace_wsgi_transport**, **trace_message_store** and **trace_management_store** The three layers during a user's request flow. Set to True to enable tracing for each layer. So In this example, we should add the following config options:: [profiler] enabled = True connection_string = mongodb://localhost:27017 hmac_keys = 123 trace_wsgi_transport = True trace_message_store = True trace_management_store = True .. note:: If you want to use MQ and Ceilometer, please leave the **connection_string** empty or indicate the MQ information. And please make sure that the following config options have be set in Ceilometer.conf :: [DEFAULT] event_dispatchers = database [oslo_messaging_notifications] topics = notifications, profiler Then restart Zaqar service Command Line ------------ we can use OpenStack Client to analyse the user request now. For example, if we want to know the performance for "queue list", we can do like this: 1. OpenStack Client now supports OSprofiler by default. Only thing we need to do is adding ``--os-profile {hmac_keys}`` in the command:: openstack queue list --os-profile 123 "123" here is what we set in Zaqar config file. After the request is done, OpenStack Client will return a trace ID like:: Trace ID: 2902c7a3-ee18-4b08-aae7-4e34388f9352 Display trace with command: osprofiler trace show --html 2902c7a3-ee18-4b08-aae7-4e34388f9352 Now the trace information has been stored in MongoDB already. 2. Use the command from the openstack client return information. The osprofiler command uses Ceilometer for data collection by default, so we need use ``--connection-string`` to change it to mongoDB here:: osprofiler trace show --html 2902c7a3-ee18-4b08-aae7-4e34388f9352 --connection-string mongodb://localhost:27017 Then you can see the analysis information in html format now. It also supports json format:: osprofiler trace show --json 2902c7a3-ee18-4b08-aae7-4e34388f9352 --connection-string mongodb://localhost:27017 Of course it supports to save the result to a file:: osprofiler trace show --json 2902c7a3-ee18-4b08-aae7-4e34388f9352 --out list_test --connection-string mongodb://localhost:27017 Then you can open the file "list_test" to get the result. .. note:: If you used MQ for data transfer, the "--connection-string" here could be ignored or set it to your Ceilometer endpoint. .. _Official Doc: https://docs.openstack.org/osprofiler/latest/user/background.html
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/admin/OSprofiler.rst
OSprofiler.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running benchmark ================= Introduction ------------ This document describes how to run benchmarking tool. Zaqar Contributors can use this tool to test how the particular code change affects Zaqar's performance. Usage ----- 1. First install and run zaqar-server. For example, you can setup Zaqar in development environment. See :doc:`../contributor/development.environment`. 2. In your terminal cd into your local Zaqar repo and install additional requirements: .. code-block:: console $ pip install -r bench-requirements.txt 3. Copy the configuration file to ~/.zaqar: .. code-block:: console $ cp etc/zaqar-benchmark.conf.sample ~/.zaqar/zaqar-benchmark.conf 4. In this configuration file specify where zaqar-server can be found: .. code-block:: ini server_url = http://localhost:8888 5. The benchmarking tool needs a set of messages to work with. Specify the path to the file with messages in the configuration file. Alternatively, put it in the directory with the configuration file and name it ``zaqar-benchmark-messages.json``. As a starting point, you can use the sample file from the etc directory: .. code-block:: console $ cp etc/zaqar-benchmark-messages.json ~/.zaqar/ If the file is not found or no file is specified, a single hard-coded message is used for all requests. 6. Run the benchmarking tool using the following command: .. code-block:: console $ zaqar-bench By default, the command will run a performance test for 5 seconds, using one producer process with 10 greenlet workers, and one observer process with 5 workers. The consumer role is disabled by default. You can override these defaults in the config file or on the command line using a variety of options. For example, the following command runs a performance test for 30 seconds using 4 producer processes with 20 workers each, plus 4 consumer processes with 20 workers each. Note that the observer role is also disabled in this example by setting its number of workers to zero: .. code-block:: console $ zaqar-bench -pp 4 -pw 10 -cp 4 -cw 20 -ow 0 -t 30 By default, the results are in human-readable format. For JSON output add the ``--noverbose`` flag. The non-verbose output looks similar to the following: .. code-block:: console $ zaqar-bench --noverbose Using 'envvars' credentials Using 'keystone' authentication method Benchmarking Zaqar API v2... {"params": {"consumer": {"processes": 1, "workers": 0}, "observer": {"processes": 1, "workers": 5}, "producer": {"processes": 1, "workers": 10}}, "consumer": {"claim_total_requests": 0, "ms_per_claim": 0, "total_reqs": 0, "reqs_per_sec": 0, "successful_reqs": 0, "duration_sec": 0, "ms_per_delete": 0, "messages_processed": 0}, "producer": {"duration_sec": 8.569170951843262, "ms_per_req": 201.715140507139, "total_reqs": 29, "successful_reqs": 29, "reqs_per_sec": 3.384224700729303}, "observer": {"duration_sec": 8.481178045272827, "ms_per_req": 407.40778711107043, "total_reqs": 18, "successful_reqs": 18, "reqs_per_sec": 2.122346672115049}} By default, zaqar-bench is benchmarking Zaqar API version 2. To run benchmark against other API versions use ``-api`` parameter. For example: .. code-block:: console $ zaqar-bench -api 1.1 Configuring zaqar-bench to use Keystone authentication ###################################################### It's possible to use zaqar-bench with Keystone authentication, if your Zaqar is configured to use Keystone authentication method and the Keystone service is running. For example, this is always true when running DevStack_ with unmodified ``zaqar.conf``. Let's configure zaqar-bench too to use Keystone: #. Set zaqar-bench's authentication method to Keystone. By default zaqar-bench is using ``noauth`` method. This can be changed by setting the environment variable ``OS_AUTH_STRATEGY`` to ``keystone``. To set this environment variable: * temporarily, run: .. code-block:: console $ export OS_AUTH_STRATEGY=keystone * permanently, add this line to your ``~/bashrc`` file: .. code-block:: bash export OS_AUTH_STRATEGY=keystone Reboot your computer or just run in the terminal where you will start zaqar-bench: .. code-block:: console $ source ~/.bashrc #. Set Keystone credentials for zaqar-bench. * If you're running Zaqar under DevStack, **you can omit this step**, because zaqar-bench will automatically get administrator or user credentials from the one of the files created by DevStack: either from ``/etc/openstack/clouds.yaml`` file or from ``~/.config/openstack/clouds.yaml`` file, if it exists. * If you're running manually configured Zaqar with manually configured Keystone (not under DevStack): Add these lines to your ``~/.bashrc`` file and specify the valid Keystone credentials: .. code-block:: bash export OS_AUTH_URL="http://<your keystone endpoint>/v2.0" export OS_USERNAME="<keystone user name>" export OS_PASSWORD="<the user's password>" export OS_PROJECT_NAME="<keystone project name for the user>" Reboot your computer or just run in the terminal where you will start zaqar-bench: .. code-block:: console $ source ~/.bashrc #. Run zaqar-bench as usual, for example: .. code-block:: console $ zaqar-bench If everything is properly configured, zaqar-bench must show the line ``Using 'keystone' authentication method`` and execute without authentication errors. .. _DevStack: https://docs.openstack.org/devstack/latest/
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/admin/running_benchmark.rst
running_benchmark.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========== CORS Guide ========== Zaqar supports Cross-Origin Resource Sharing (CORS) now. The function is provided by oslo.middleware. Please see `Official Doc`_ and `OpenStack Spec`_ for more detail. This guide is mainly tell users how to use it in Zaqar. New Config Options ------------------ There are some new config options. **allowed_origin** Indicate whether this resource may be shared with the domain received in the requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing slash. Example: https://horizon.example.com'. **allow_credentials** Indicate that the actual request can include user credentials. The default value is True. **expose_headers** Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers. The default value is []. **max_age** Maximum cache age of CORS preflight requests. The default value is 3600. **allow_methods** Indicate which methods can be used during the actual request. The default value is ['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'PATCH']. **allow_headers** Indicate which header field names may be used during the actual request. The default value is []. Request and Response example ---------------------------- The CORS feature is enabled by default in Zaqar. Here is a config example::: [cors] allowed_origin = http://example allow_methods = GET the above example config options mean that Zaqar only receive the GET request from http://example domain. Here are some example request: 1. Zaqar will do nothing if the request doesn't contain "Origin" header:: # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" HTTP/1.1 300 Multiple Choices content-length: 668 content-type: application/json; charset=UTF-8 Connection: close 2. Zaqar will return nothing in response headers if the "Origin" is not in ``allowed_origin``:: # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://" HTTP/1.1 300 Multiple Choices content-length: 668 content-type: application/json; charset=UTF-8 Connection: close In the Zaqar log, we can see a message:: CORS request from origin 'http://' not permitted. 3. Zaqar will return CORS information if the "Origin" header is in ``allowed_origin``:: # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://example" HTTP/1.1 300 Multiple Choices content-length: 668 content-type: application/json; charset=UTF-8 Vary: Origin Access-Control-Allow-Origin: http://example Access-Control-Allow-Credentials: true Connection: close 4. Zaqar will return more information if the request doesn't follow Zaqar's\ CORS rule:: # curl -I -X PUT http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://example" HTTP/1.1 405 Method Not Allowed content-length: 0 content-type: application/json; charset=UTF-8 allow: GET, OPTIONS Vary: Origin Access-Control-Allow-Origin: http://example Access-Control-Allow-Credentials: true Connection: close .. _Official Doc: https://docs.openstack.org/oslo.middleware/latest/reference/cors.html .. _OpenStack Spec: https://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/admin/CORS.rst
CORS.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================== The subscription Confirm Guide ============================== The subscription confirm feature now supports webhook and email with both mongoDB and redis backend. This guide shows how to use this feature: Webhook ------- .. note:: You should make sure that the message notification is enabled. By default, the ``message_pipeline`` config option in [storage] section should be set like: message_pipeline = zaqar.notification.notifier 1. Set the config option "require_confirmation" and add the policy to the policy.yaml file. Then restart Zaqar-wsgi service:: In the config file: [notification] require_confirmation = True In the policy.yaml file: "subscription:confirm": "" 2. Create a subscription. Here used zaqar/samples/zaqar/subscriber_service_sample.py be the subscriber endpoint for example.So before the step 2, you should start the subscriber service first. The service could be started simply by the command:: python zaqar/samples/zaqar/subscriber_service_sample.py The service's default port is 5678. If you want to use a new port, the command will be like:: python zaqar/samples/zaqar/subscriber_service_sample.py new_port_number The service will not confirm the subscription automatically by default. If you want to do that, the command will be like:: python zaqar/samples/zaqar/subscriber_service_sample.py --auto-confirm Then create a subscription:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/subscriptions \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"subscriber":"http://10.229.47.217:5678", "ttl":3600, "options":{}}' The response:: HTTP/1.1 201 Created content-length: 47 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/subscriptions Connection: close {"subscription_id": "576256b03990b480617b4063"} At the same time, If the subscriber sample service is not start by "--auto confirm", you will receive a POST request in the subscriber sample service, the request is like:: WARNING:root:{"UnsubscribeBody": {"confirmed": false}, "URL-Methods": "PUT", "X-Project-ID": "51be2c72393e457ebf0a22a668e10a64", "URL-Paths": "/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm", "URL-Expires": "2016-07-06T04:35:56", "queue_name": "test", "SubscribeURL": ["/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm"], "SubscribeBody": {"confirmed": true}, "URL-Signature": "d4038a40589cdb61cd13d5a6997472f5be779db441dd8fe0c597a6e465f30c41", "Message": "You have chosen to subscribe to the queue: test", "Message_Type": "SubscriptionConfirmation"} 10.229.47.217 - - [06/Jul/2016 11:35:56] "POST / HTTP/1.1" 200 - If you start the sample service with "--auto confirm", please go to step 6 directly, because the step 5 will be done by the service automatically. 3. Get the subscription. The request:: curl -i -X GET http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063 \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" The response:: HTTP/1.1 200 OK content-length: 154 content-type: application/json; charset=UTF-8 Connection: close {"confirmed": false, "age": 73, "id": "576256b03990b480617b4063", "subscriber": "http://10.229.47.217:5678", "source": "test", "ttl": 3600, "options": {}} You can find that the "confirmed" property is false by default. 4. Post a message to the subscription's queue The request:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/messages \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"messages": [{"ttl": 3600,"body": "test123"}]}' The response:: HTTP/1.1 201 Created content-length: 68 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/messages?ids=57624dee3990b4634d71bb4a Connection: close {"resources": ["/v2/queues/test/messages/57624dee3990b4634d71bb4a"]} The subscriber received nothing and you will find a log info in zaqar-wsgi.:: 2016-07-06 11:37:57.929 98400 INFO zaqar.notification.notifier [(None,)2473911afe2642c0b74d7e1200d9bba7 51be2c72393e457ebf0a22a668e10a64 - - -] The subscriber http://10.229.47.217:5678 is not confirmed. 5. Use the information showed in step3 to confirm the subscription The request:: curl -i -X PUT http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "URL-Methods: PUT" -H "X-Project-ID: 51be2c72393e457ebf0a22a668e10a64" \ -H "URL-Signature: d28dced4eabbb09878a73d9a7a651df3a3ce5434fcdb6c3727decf6c7078b282" \ -H "URL-Paths: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm" \ -H "URL-Expires: 2016-06-16T08:35:12" -d '{"confirmed": true}' The response:: HTTP/1.1 204 No Content location: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm Connection: close 6. Repeat step3 to get the subscription The request:: curl -i -X GET http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063 \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" The response:: HTTP/1.1 200 OK content-length: 155 content-type: application/json; charset=UTF-8 Connection: close {"confirmed": true, "age": 1370, "id": "576256b03990b480617b4063", "subscriber": "http://10.229.47.217:5678", "source": "test", "ttl": 3600, "options": {}} The subscription is confirmed now. 7. Repeat step4 to post a new message. The request:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/messages \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"messages": [{"ttl": 3600,"body": "test123"}]}' The response:: HTTP/1.1 201 Created content-length: 68 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/messages?ids=5762526d3990b474c80d5483 Connection: close {"resources": ["/v2/queues/test/messages/5762526d3990b474c80d5483"]} Then in subscriber sample service, you will receive a request:: WARNING:root:{"body": {"event": "BackupStarted"}, "queue_name": "test", "Message_Type": "Notification", "ttl": 3600} 10.229.47.217 - - [06/Jul/2016 13:19:07] "POST / HTTP/1.1" 200 - 8. Unsubscription. The request:: curl -i -X PUT http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "URL-Methods: PUT" -H "X-Project-ID: 51be2c72393e457ebf0a22a668e10a64" \ -H "URL-Signature: d28dced4eabbb09878a73d9a7a651df3a3ce5434fcdb6c3727decf6c7078b282" \ -H "URL-Paths: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm" \ -H "URL-Expires: 2016-06-16T08:35:12" -d '{"confirmed": false}' The response:: HTTP/1.1 204 No Content location: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm Connection: close Then try to post a message. The subscriber will not receive the notification any more. Email ----- 1. For the email confirmation way, also need to set the config option "external_confirmation_url", "subscription_confirmation_email_template" and "unsubscribe_confirmation_email_template". The confirmation page url that will be used in email subscription confirmation before notification, this page is not hosted in Zaqar server, user should build their own web service to provide this web page. The subscription_confirmation_email_template let user to customize the subscription confirmation email content, including topic, body and sender. The unsubscribe_confirmation_email_template let user to customize the unsubscribe confirmation email content, including topic, body and sender too:: In the config file: [notification] require_confirmation = True external_confirmation_url = http://web_service_url/ subscription_confirmation_email_template = topic:Zaqar Notification - Subscription Confirmation,\ body:'You have chosen to subscribe to the queue: {0}. This queue belongs to project: {1}. To confirm this subscription, click or visit this link below: {2}',\ sender:Zaqar Notifications <[email protected]> unsubscribe_confirmation_email_template = topic: Zaqar Notification - Unsubscribe Confirmation,\ body:'You have unsubscribed successfully to the queue: {0}. This queue belongs to project: {1}. To resubscribe this subscription, click or visit this link below: {2}',\ sender:Zaqar Notifications <[email protected]> In the policy.yaml file: "subscription:confirm": "" 2. Create a subscription. For email confirmation, you should create a subscription like this:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/subscriptions \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"subscriber":"your email address", "ttl":3600, "options":{}}' The response:: HTTP/1.1 201 Created content-length: 47 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/subscriptions Connection: close {"subscription_id": "576256b03990b480617b4063"} After the subscription created, Zaqar will send a email to the email address of subscriber. The email specifies how to confirm the subscription. 3. Click the confirmation page link in the email body 4. The confirmation page will send the subscription confirmation request to Zaqar server automatically. User also can choose to unsubscribe by clicking the unsubscription link in this page, that will cause Zaqar to cancel this subscription and send another email to notify this unsubscription action. Zaqar providers two examples of those web pages that will help user to build their own pages:: zaqar/sample/html/subscriptionConfirmation.html zaqar/sample/html/unsubscriptionConfirmation.html User can place those pages in web server like Apache to access them by browser, so the external_confirmation_url will be like this:: http://127.0.0.1:8080/subscriptionConfirmation.html For CORS, here used zaqar/samples/html/confirmation_web_service_sample.py be a simple web service for example, it will relay the confirmation request to Zaqar Server. So before Step 3, you should start the web service first. The service could be started simply by the command:: python zaqar/samples/html/confirmation_web_service_sample.py The service's default port is 5678. If you want to use a new port, the command will be like:: python zaqar/samples/html/confirmation_web_service_sample.py new_port_number
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/admin/subscription_confirm.rst
subscription_confirm.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Continuous integration with Jenkins =================================== Zaqar uses a `Jenkins`_ server to automate development tasks. The Jenkins front-end is at http://jenkins.openstack.org. You must have an account on `Launchpad`_ to be able to access the OpenStack Jenkins site. Jenkins performs tasks such as running static code analysis, running unit tests, and running functional tests. For more details on the jobs being run by Jenkins, see the code reviews on https://review.opendev.org. Tests are run automatically and comments are put on the reviews automatically with the results. You can also get a view of the jobs that are currently running from the Zuul status dashboard, http://zuul.openstack.org/. .. _Jenkins: http://jenkins-ci.org .. _Launchpad: http://launchpad.net
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/jenkins.rst
jenkins.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================== Setting up a development environment ==================================== This section describes how to setup a working Python development environment that you can use in developing Zaqar on Ubuntu or Fedora. These instructions assume that you are familiar with Git. Refer to GettingTheCode_ for additional information. .. _GettingTheCode: https://wiki.openstack.org/wiki/Getting_The_Code Virtual environments -------------------- Use virtualenv_ to track and manage Python dependencies for developing and testing Zaqar. Using virtualenv_ enables you to install Python dependencies in an isolated virtual environment, instead of installing the packages at the system level. .. _virtualenv: https://pypi.org/project/virtualenv .. note:: Virtualenv is useful for development purposes, but is not typically used for full integration testing or production usage. If you want to learn about production best practices, check out the `OpenStack Operations Guide`_. .. _`OpenStack Operations Guide`: https://wiki.openstack.org/wiki/OpsGuide Install GNU/Linux system dependencies ##################################### .. note:: This section is tested for Zaqar on Ubuntu 14.04 (Trusty) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Learn more about contributing to Zaqar documentation in the :doc:`welcome` manual. Install the prerequisite packages. On Ubuntu: .. code-block:: console $ sudo apt-get install gcc python-pip libxml2-dev libxslt1-dev python-dev zlib1g-dev On Fedora-based distributions (e.g., Fedora/RHEL/CentOS): .. code-block:: console $ sudo yum install gcc python-pip libxml2-devel libxslt-devel python-devel Install MongoDB ############### You also need to have MongoDB_ installed and running. .. _MongoDB: http://www.mongodb.org On Ubuntu, follow the instructions in the `MongoDB on Ubuntu Installation Guide`_. .. _`MongoDB on Ubuntu installation guide`: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ On Fedora-based distributions, follow the instructions in the `MongoDB on Red Hat Enterprise, CentOS, Fedora, or Amazon Linux Installation Guide`_. .. _`MongoDB on Red Hat Enterprise, CentOS, Fedora, or Amazon Linux installation guide`: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-red-hat-centos-or-fedora-linux/ .. note:: If you are Contributor and plan to run Unit tests on Zaqar, you may want to add this line to mongodb configuration file (``etc/mongod.conf`` or ``etc/mongodb.conf`` depending on distribution): .. code-block:: ini smallfiles = true Many Zaqar's Unit tests do not clean up their testing databases after executing. And database files consume much disk space even if they do not contain any records. This behavior will be fixed soon. Getting the code ################ Get the code from git.openstack.org to create a local repository with Zaqar: .. code-block:: console $ git clone https://git.openstack.org/openstack/zaqar.git Configuration ############# #. From your home folder create the ``~/.zaqar`` folder. This directory holds the configuration files for Zaqar: .. code-block:: console $ mkdir ~/.zaqar #. Generate the sample configuration file ``zaqar/etc/zaqar.conf.sample``: .. code-block:: console $ pip install tox $ cd zaqar $ tox -e genconfig #. Copy the Zaqar configuration samples to the directory ``~/.zaqar/``: .. code-block:: console $ cp etc/zaqar.conf.sample ~/.zaqar/zaqar.conf $ cp etc/logging.conf.sample ~/.zaqar/logging.conf #. Find the ``[drivers]`` section in ``~/.zaqar/zaqar.conf`` and specify ``mongodb`` as the message store: .. code-block:: ini message_store = mongodb management_store = mongodb #. Then find ``[drivers:message_store:mongodb]`` and ``[drivers:management_store:mongodb]`` sections and specify the :samp:`{URI}` to point to your local mongodb instance by adding this line to both the sections: .. code-block:: ini uri = mongodb://$MONGODB_HOST:$MONGODB_PORT By default you will have: .. code-block:: ini uri = mongodb://127.0.0.1:27017 This :samp:`{URI}` points to single mongodb node which of course is not reliable, so you need to set in the ``[default]`` section of configuration file: .. code-block:: ini unreliable = True For your reference, you can omit this parameter or set it to False only if the provided :samp:`{URI}` to your mongodb is actually the URI to mongodb Replica Set or Mongos. Also it must have "Write concern" parameter set to ``majority`` or to a number more than ``1``. For example, :samp:`{URI}` to reliable mongodb can look like this: .. code-block:: ini uri = mongodb://mydb0,mydb1,mydb2:27017/?replicaSet=foo&w=2 Where ``mydb0``, ``mydb1``, ``mydb2`` are addresses of the configured mongodb Replica Set nodes, ``replicaSet`` (Replica Set name) parameter is set to ``foo``, ``w`` (Write concern) parameter is set to ``2``. #. For logging, find the ``[handler_file]`` section in ``~/.zaqar/logging.conf`` and modify as desired: .. code-block:: ini args=('zaqar.log', 'w') Installing and using virtualenv ############################### #. Install virtualenv by running: .. code-block:: console $ pip install virtualenv #. Create and activate a virtual environment: .. code-block:: console $ virtualenv zaqarenv $ source zaqarenv/bin/activate #. Install Zaqar: .. code-block:: console $ pip install -e . #. Install the required Python binding for MongoDB: .. code-block:: console $ pip install pymongo #. Start Zaqar server in ``info`` logging mode: .. code-block:: console $ zaqar-server -v Or you can start Zaqar server in ``debug`` logging mode: .. code-block:: console $ zaqar-server -d #. Verify Zaqar is running by creating a queue via curl. In a separate terminal run: .. code-block:: console $ curl -i -X PUT http://localhost:8888/v2/queues/samplequeue -H "Content-type: application/json" -H 'Client-ID: 123e4567-e89b-12d3-a456-426655440000' -H 'X-PROJECT-ID: 12345' .. note:: ``Client-ID`` expects a valid UUID. ``X-PROJECT-ID`` expects a user-defined project identifier. #. Get ready to code! .. note:: You can run the Zaqar server in the background by passing the ``--daemon`` flag: .. code-block:: console $ zaqar-server -v --daemon But with this method you will not get immediate visual feedback and it will be harder to kill and restart the process. Troubleshooting ^^^^^^^^^^^^^^^ No handlers found for zaqar.client (...) """""""""""""""""""""""""""""""""""""""" This happens because the current user cannot create the log file (for the default configuration in ``/var/log/zaqar/server.log``). To solve it, create the folder: .. code-block:: console $ sudo mkdir /var/log/zaqar Create the file: .. code-block:: console $ sudo touch /var/log/zaqar/server.log And try running the server again. DevStack -------- If you want to use Zaqar in an integrated OpenStack developing environment, you can add it to your DevStack_ deployment. To do this, you first need to add the following setting to your ``local.conf``: .. code-block:: bash enable_plugin zaqar https://git.openstack.org/openstack/zaqar Then run the ``stack.sh`` script as usual. .. _DevStack: https://docs.openstack.org/devstack/latest/ Running tests ------------- See :doc:`running_tests` for details. Running the benchmarking tool ----------------------------- See :doc:`../admin/running_benchmark` for details. Contributing your work ---------------------- See :doc:`welcome` and :doc:`first_patch` for details.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/development.environment.rst
development.environment.rst
============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide <https://docs.openstack.org/contributors/>`_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Zaqar. Communication ~~~~~~~~~~~~~ * IRC channel #openstack-zaqar at OFTC * Mailing list (prefix subjects with ``[zaqar]`` for faster responses) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ Please refer the `zaqar Core Team <https://review.opendev.org/admin/groups/b082df89771ed409e9ce06fd9487aefd9e4fc868,members>`_ contacts. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ zaqar features are tracked on `Launchpad <https://bugs.launchpad.net/zaqar>`_. Task Tracking ~~~~~~~~~~~~~ We track our tasks in `Launchpad <https://bugs.launchpad.net/zaqar>`_. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so on `Launchpad <https://bugs.launchpad.net/zaqar>`_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the zaqar project require one or two +2 votes from zaqar core reviewers before one of the core reviewers can approve patch by giving ``Workflow +1`` vote. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ All common PTL duties are enumerated in the `PTL guide <https://docs.openstack.org/project-team-guide/ptl.html>`_.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/contributing.rst
contributing.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _project_info: Project Info ============ Maintainers ----------- Project Team Lead (PTL) ~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------+---------------------------------------------+ | Contact | Area of interest | +------------------------------+---------------------------------------------+ | | Feilong Wang | * Chief Architect | | | flwang (irc) | * Release management | | | [email protected] | * Community management | | | [email protected] | * Core team management | | | | * Road Map | +------------------------------+---------------------------------------------+ | *If you would like to refactor whole Zaqar or have UX/community/other issues please contact me.* Project Core maintainers ~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------+---------------------------------------------+ | Contact | Area of interest | +------------------------------+---------------------------------------------+ | | Xiyuan Wang | * Zaqar related questions or bugs. | | | wxy (irc) UTC0200-UTC1000 | * Production problem for Zaqar. | | | [email protected] | * Integration with container. | | | | | +------------------------------+---------------------------------------------+ | | Hao Wang | * Introduce interesting and helpful | | | | features | | | wanghao (irc) | * Bug Fix and Code Optimization | | | [email protected] | * Notification Service | | | | | +------------------------------+---------------------------------------------+ | | Thomas Herve | * Websocket | | | therve (irc) | * Swift backend | | | [email protected] | | | | | | +------------------------------+---------------------------------------------+ | *All cores from this list are reviewing all changes that are proposed to Zaqar. To avoid duplication of efforts, please contact them before starting work on your code.* Storage Backend Maintainers reviewers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------+---------------------------------------------+ | Contact | Area of interest | +------------------------------+---------------------------------------------+ | | Hao Wang | * MongoDB | | | wanghao (irc) | | | | [email protected] | | +------------------------------+---------------------------------------------+ | | gecong | * Swift | | | gecong (irc) | | | | [email protected] | | | | | | +------------------------------+---------------------------------------------+ | | gengchc2 | * Redis | | | gengchc (irc) | | | | [email protected] | | | | | | +------------------------------+---------------------------------------------+ | | Feilong Wang | * SqlAlchemy | | | flwang (irc) | | | | [email protected] | | | | | | +------------------------------+---------------------------------------------+ | *All cores from this list are responsible for maintaining the storage backend. To avoid duplication of efforts, please contact them before starting working on your own backends.* Useful links ------------ - `Source code`_ - `Project space`_ - `Bugs`_ - `Patches on review`_ Where can I discuss and propose changes? ---------------------------------------- - Our IRC channel: **#openstack-zaqar** on **OFTC**; - Bi-Weekly Zaqar team meeting (in IRC): **#openstack-zaqar** on **OFTC**, held on Monday at 14:00 UTC; - OpenStack mailing list: **[email protected]** (see `Subscription and usage instructions`_); - `Zaqar team on Launchpad`_: Answers/Bugs/Blueprints. .. references: .. _Source code: https://github.com/openstack/zaqar .. _Project space: https://launchpad.net/zaqar .. _Bugs: https://bugs.launchpad.net/zaqar .. _Patches on review: https://review.opendev.org/#/q/status:open+zaqar,n,z .. _IRC logs: http://irclog.perlgeek.de/openstack-zaqar .. _Subscription and usage instructions: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _Zaqar team on Launchpad: https://launchpad.net/zaqar
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/project_info.rst
project_info.rst
================== Contribution Guide ================== .. toctree:: :maxdepth: 2 welcome project_info development.environment first_patch first_review launchpad gerrit jenkins reviewer_guide running_tests test_suite Modules reference ~~~~~~~~~~~~~~~~~ Zaqar is composed of two layers: .. toctree:: :maxdepth: 1 transport storage The **transport drivers** are responsible for interacting with Zaqar clients. Every query made by clients is processed by the transport layer, which is in charge of passing this information to the backend and then returning the response in a format understandable by the client. The **storage drivers** are responsible for interacting with the storage backends and, that way, store or retrieve the data coming from the transport layer. In order to keep these layers decoupled, we have established that **checks should be performed in the appropriate layer**. In other words, transport drivers must guarantee that the incoming data is well-formed and storage drivers must enforce their data model stays consistent.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/index.rst
index.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============== Reviewer Guide ============== Overview -------- Our program follows the usual OpenStack review process, albeit with some important additions (see below). See also: :doc:`first_review`. Be Professional --------------- The PTL, with the support of the core reviewers, is ultimately responsible for holding contributors accountable for creating a positive, constructive, and productive culture. Inappropriate behavior will not be tolerated. (`Why this is important?`_) Do This: * Act professionally. * Treat others as friends and family. * Seek first to understand. * Be honest, transparent, and constructive. * Use clear, concise language. * Use prefixes to clarify the tone and intent of your comments. Don't Do This: * Use indecent, profane, or degrading language of any kind. * Hold a patch hostage for an ulterior motive, political or otherwise. * Abuse the review system to discuss big issues that would be better hashed out on the mailing list, in IRC, or during OpenStack Summit design sessions. * Engage in bullying behaviors, including but not limited to: * Belittling others' opinions * Persistent teasing or sarcasm * Insulting, threatening, or yelling at someone * Accusing someone of being incompetent * Setting someone up to fail * Humiliating someone * Isolating someone from others * Withholding information to gain an advantage * Falsely accusing someone of errors * Sabotaging someone's work Reviewing Docs -------------- When possible, enlist the help of a professional technical writer to help review each doc patch. All reviewers should familiarize themselves with `OpenStack Documentation Contributor Guide`_. When reviewing user guide patches, please run them through Maven and proof the resulting docs before giving your ``+1`` or ``+2``. Reviewing Code -------------- When reviewing code patches, use your best judgment and seek to provide constructive feedback to the author. Compliment them on things they have done well, and highlight possible improvements. Also, dedicate as much time as necessary in order to provide a careful analysis of the code. Don't assume that someone else will catch any issues you yourself miss; in other words, pretend you are the only person reviewing a given patch. Remember, "given enough eyeballs, all bugs are shallow" ceases to be true the moment individual reviewers become complacent. Some things to check when reviewing code: * Patch aligns with project goals, and is ideally associated with a bp or bug. * Commit message is formatted appropriately and contains external references as needed. * Coding style matches guidelines given in ``HACKING.rst``. * Patch is cohesive and not too big to be reviewed in a timely manner (some patches may need to be split to improve cohesion and/or reduce size). * Patch does what the commit message promises. * Algorithms are implemented correctly, and chosen appropriately. * Data schemas follow best practices. * Unit and functional tests have been included and/or updated. * Code contains no bugs (pay special attention to edge cases that tests may have missed). Use Prefixes ------------ We encourage the use of prefixes to clarify the tone and intent of your review comments. This is one way we try to mitigate misunderstandings that can lead to bad designs, bad code, and bad blood. .. list-table:: **Prefixes** :widths: 6 80 8 :header-rows: 1 * - Prefix - What the reviewer is saying - Blocker? * - KUDO - You did a nice job here, and I wanted to point that out. Keep up the good work! - No * - TEST - I think you are missing a test for this feature, code branch, specific data input, etc. - Yes * - BUG - I don't think this code does what it was intended to do, or I think there is a general design flaw here that we need to discuss. - Yes * - SEC - This is a serious security vulnerability and we better address it before merging the code. - Yes * - PERF - I have a concern that this won't be fast enough or won't scale. Let's discuss the issue and benchmark alternatives. - Yes * - DSQ - I think there is something critical here that we need to discuss this in IRC or on the mailing list before moving forward. - Yes * - STYLE - This doesn't seem to be consistent with other code and with ``HACKING.rst`` - Yes * - Q - I don't understand something. Can you clarify? - Yes * - DRY - This could be modified to reduce duplication of code, data, etc. See also: `Wikipedia: Don't repeat yourself`_ - Maybe * - YAGNI - This feature or flexibility probably isn't needed, or isn't worth the added complexity; if it is, we can always add the feature later. See also: `Wikipedia: You aren't gonna need it`_ - Maybe * - NIT - This is a nitpick that I can live with if we want to merge without addressing it. - No * - IMO - I'm chiming in with my opinion in response to someone else's comment, or I just wanted to share an observation. Please take what I say with a grain of salt. - No * - FYI - I just wanted to share some useful information. - No .. _`Why this is important?` : https://thoughtstreams.io/kgriffs/technical-communities/5060/ .. _`OpenStack Documentation Contributor Guide` : https://docs.openstack.org/contributor-guide/index.html .. _`Wikipedia: Don't repeat yourself` : https://en.wikipedia.org/wiki/Don't_repeat_yourself .. _`Wikipedia: You aren't gonna need it` : https://en.wikipedia.org/wiki/Don't_repeat_yourself
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/reviewer_guide.rst
reviewer_guide.rst
============================== Project hosting with Launchpad ============================== `Launchpad`_ hosts the Zaqar project. The Zaqar project homepage on Launchpad is http://launchpad.net/zaqar. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) * Jenkins (see :doc:`jenkins`) Mailing list ------------ The mailing list address is ``[email protected]``. This is a common mailing list across all OpenStack projects. To participate in the mailing list: Subscribe at http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss The mailing list archives are at http://lists.openstack.org/pipermail/openstack-dev (pre December 2018) and http://lists.openstack.org/pipermail/openstack-discuss for current. Bug tracking ------------ Report Zaqar bugs at https://bugs.launchpad.net/zaqar Feature requests (Blueprints) ----------------------------- Zaqar uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/zaqar. Technical support (Answers) --------------------------- Zaqar uses Launchpad Answers to track Zaqar technical support questions. The Zaqar Answers page is at https://answers.launchpad.net/zaqar. Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can also be used for technical support requests. You can also reach us in ``#openstack-zaqar`` IRC channel at ``OFTC``. .. _Launchpad: https://launchpad.net .. _Wiki: https://wiki.openstack.org .. _Zaqar Team: https://launchpad.net/zaqar .. _Ask OpenStack: https://ask.openstack.org/
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/launchpad.rst
launchpad.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Your first review ================= The review stage is a very important part in the development process. Following are some of the reasons this stage is important: * Getting other developers feedback minimizes the risk of adding regressions to the code base and ensures the quality of the code being merged. * Building the community encourages everyone to review code. Everyone appreciates having their code reviewed. * Since developers are always learning from being exposed to the points of view of others, reviews help developers to improve their coding skills. * Providing a review is a great way to become familiar with the code. Everyone is encourages to review code. You don't need to know every detail of the code base. You need to understand only what the code related to the fix does. Step by step ------------ Go to ``review.opendev.org`` and filter by `Open Zaqar fixes`_. Select a fix from the list to review. Try to select an easy patch for your first review. That will help you to gain some confidence. Download the patch to your local repository and test it: .. code-block:: console $ git review -d [review-id] The :samp:`{review-id}` is the number in the URL (check the screenshot for more details). Example: .. code-block:: console $ git review -d 92979 .. image:: images/zaqar_review_id.png :alt: Zaqar review id This git command creates a branch with the author's name and enables you to test the patch in your local environment. * Inspect the code. Use all of the best programming practices you know as you review the code. * Give code location feedback. Do you consider that some code should be better located in another place within the file, or maybe in another file? If so, suggest this in the review comment and score with a ``-1`` if you think that it's that important. * Give code-style feedback. Do you think that the code structure could be improved? Keep the DRY, YAGNI and KISS principles in mind. * Give grammar and orthography feedback. Many of our contributors are not native English speakers, so it is common to find some errors of this type. * Make sure that: * The commit message is formatted appropriately. Check `Git Commit Messages`_ for more information on how you should write a git commit message. * The coding style matches guidelines given in ``HACKING.rst``. * The patch is not too big. You might need to split some patches to improve cohesion and/or reduce size. * The patch does what the commit message promises. * Unit and functional tests are included and/or updated. * If during the inspection you see a specific line you would like to bring up to discussion in the final review, leave feedback as an inline comment in Gerrit. This will make the review process easier. You can also use prefixes described in :doc:`reviewer_guide` for Zaqar inline comments. * Keep in mind the :doc:`reviewer_guide` and be respectful when leaving feedback. * Hit the :guilabel:`Review` button in the web UI to publish your comments and assign a score. * Things to consider when leaving a score: * You can score with a ``-1`` if you think that there are things to fix. We have to be careful to not stall the cycle just because a few nits, so downvoting also depends on the current stage of the development cycle and the severity of the flaw you see. * You can score with a "0" if you are the author of the fix and you want to respond to the reviewers comments, or if you are a reviewer and you want to point out some reminder for future developing (e.g. the deadline is the next day and the fix needs to be merged, but you want something to be improved). * You can score with ``+1`` if the fix works and you think that the code looks good, upvoting is your choice. * Remember to leave any comment that you think is important in the comment form. When you are done, click :guilabel:`Publish Comments`. For more details on how to do a review, check out the `Gerrit Workflow Review section`_ document. .. _`Open Zaqar fixes`: https://review.opendev.org/#/q/status:open+zaqar,n,z .. _`Git Commit Messages`: https://wiki.openstack.org/wiki/GitCommitMessages .. _`Gerrit Workflow Review section`: https://docs.openstack.org/infra/manual/developers.html#code-review
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/first_review.rst
first_review.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Your first patch ================ This section describes how to create your first patch and upload it to Gerrit_ for reviewing. Create your contributor accounts and set up your code environment ----------------------------------------------------------------- Accounts setup ############## You will need to create a Launchpad_ account to login to the Gerrit_ review system dashboard. This is also useful for automatically crediting bug fixes to you when you address them with your code commits. You will also have to sign the `Contributors License Agreement`_ and `join the OpenStack Foundation`_. It is a good idea to use the same email all of these accounts to avoid hooks errors. Visit the `Gerrit Workflow's account setup`_ section in the wiki to get more information on setting up your accounts. .. _Launchpad: https://launchpad.net/ .. _Gerrit: https://review.opendev.org/ .. _`Contributors License Agreement`: https://docs.openstack.org/infra/manual/developers.html#account-setup .. _`join the OpenStack Foundation`: https://www.openstack.org/join/ .. _`Gerrit Workflow's account setup`: https://docs.openstack.org/infra/manual/developers.html#account-setup SSH setup ######### You are going to need to create and upload an SSH key to Gerrit to be able to commit changes for review. To create an SSH key: .. code-block:: console $ ssh-keygen –t rsa You can optionally enter a password to enhance security. View and copy your SSH key: .. code-block:: console $ less ~/.ssh/id_rsa.pub Now you can `upload the SSH key to Gerrit`_. .. _`upload the SSH key to Gerrit`: https://review.opendev.org/#/settings/ssh-keys Git Review installation ####################### Before you start working, make sure you have ``git-review`` installed on your system. You can install it with the following command: .. code-block:: console $ pip install git-review ``Git-review`` checks if you can authenticate to Gerrit with your SSH key. It will ask you for your username. You can configure your Gerrit username so you don't have to keep re-entering it every time you want to use ``git-review``: .. code-block:: console $ git config --global gitreview.username yourgerritusername You can also save some time by entering your email and your name: .. code-block:: console $ git config --global gitreview.email "yourgerritemail" $ git config --global gitreview.name "Firstname Lastname" You can view your Gerrit user name in the `settings page`_. .. _`settings page`: https://review.opendev.org/#/settings/ Project setup ############# Clone the Zaqar repository with the following git command: .. code-block:: console $ git clone https://git.openstack.org/openstack/zaqar.git For information on how to set up the Zaqar development environment see :doc:`development.environment`. Before writing code, you will have to do some configurations to connect your local repository with Gerrit. You will only need to do this your first time setting up the development environment. You can set ``git-review`` to configure the project and install the Gerrit change-id commit hook with the following command: .. code-block:: console $ cd zaqar $ git review -s If you get the error "We don't know where your Gerrit is", you will need to add a new git remote. The URL should be in the error message. Copy that and create the new remote. It looks something like: .. code-block:: console $ git remote add gerrit ssh://<username>@review.opendev.org:29418/openstack/zaqar.git In the project directory you have a hidden ``.git`` directory and a ``.gitreview`` file. You can view them with the following command: .. code-block:: console $ ls -la Making a patch -------------- Pick or report a bug #################### You can start tackling some bugs from the `bugs list in Launchpad`_. If you find a bug you want to work on, assign yourself. Make sure to read the bug report. If you need more information, ask the reporter to provide more details through a comment on Launchpad or through IRC or email. If you find a bug, look through Launchpad to see if it has been reported. If it hasn't, report the bug, and ask for another developer to confirm it. You can start working on it if another developer confirms the bug. Here are some details you might want to include when filling out a bug report: * The release, or milestone, or commit ID corresponding to the software that you are running * The operating system and version where you've identified the bug * Steps to reproduce the bug, including what went wrong * Description of the expected results instead of what you saw * Portions of your log files so that you include only relevant excerpts In the bug comments, you can contribute instructions on how to fix a given bug, and set the status to "Triaged". You can read more about `Launchpad bugs`_ in the official guide. .. _`bugs list in Launchpad`: https://bugs.launchpad.net/zaqar .. _`Launchpad bugs`: https://docs.openstack.org/project-team-guide/bugs.html Workflow ######## Make sure your repo is up to date. You can update it with the following git commands: .. code-block:: console $ git remote update $ git checkout master $ git pull --ff-only origin master Create a topic branch. You can create one with the following git command: .. code-block:: console $ git checkout -b TOPIC-BRANCH If you are working on a blueprint, name your :samp:`{TOPIC-BRANCH}` ``bp/BLUEPRINT`` where :samp:`{BLUEPRINT}` is the name of a blueprint in Launchpad (for example, "bp/authentication"). The general convention when working on bugs is to name the branch ``bug/BUG-NUMBER`` (for example, "bug/1234567"). Read more about the commit syntax in the `Gerrit workflow`_ wiki. .. _`Gerrit workflow`: https://docs.openstack.org/infra/manual/developers.html#development-workflow Common problems ^^^^^^^^^^^^^^^ #. You realized that you were working in master and you haven't made any commits. Solution: .. code-block:: console $ git checkout -b newbranch $ git commit -a -m "Edited" If you already created the branch, omit the ``-b``. You put all your changes to :samp:`{newbranch}`. Problem solved. #. You realized that you were working in master and you have made commits to master. Solution: .. code-block:: console $ git branch newbranch $ git reset --hard HEAD~x $ git checkout newbranch Where ``x`` is the number of commits you have made to master. And remember, you will lose any uncommitted work. You put your commits in :samp:`{newbranch}`. Problem solved. #. You made multiple commits and realized that Gerrit requires one commit per patch. Solution: * You need to squash your previous commits. Make sure you are in your branch and follow `squashing guide`_. Then fill commit message properly. You squashed your commits. Problem solved. Design principles ################# Zaqar lives by the following design principles: * `DRY`_ * `YAGNI`_ * `KISS`_ .. _`DRY`: https://en.wikipedia.org/wiki/Don%27t_repeat_yourself .. _`YAGNI`: https://en.wikipedia.org/wiki/YAGNI .. _`KISS`: https://en.wikipedia.org/wiki/KISS_principle Try to stick to these design principles when working on your patch. Test your code ############## It is important to test your code and follow the python code style guidelines. See :doc:`running_tests` for details on testing. Submitting a patch ------------------ Once you finished coding your fix, add and commit your final changes. Your commit message should: * Provide a brief description of the change in the first line. * Insert a single blank line after the first line. * Provide a detailed description of the change in the following lines, breaking paragraphs where needed. * The first line should be limited to 50 characters and should not end with a period. * Subsequent lines should be wrapped at 72 characters. * Put the 'Change-id', 'Closes-Bug #NNNNN' and 'blueprint NNNNNNNNNNN' lines at the very end. Read more about `making a good commit message`_. To submit it for review use the following git command: .. code-block:: console $ git review You will see the URL of your review page once it is successfully sent. You can also see your reviews in :guilabel:`My Changes` in Gerrit. The first thing to watch for is a ``+1`` in the :guilabel:`Verified` column next to your patch in the server and/or client list of pending patches. If the "Jenkins" user gives you a ``-1``, you'll need to check the log it posts to find out what gate test failed, update your patch, and resubmit. You can set your patch as a :guilabel:`work in progress` if your patch is not ready to be merged, but you would still like some feedback from other developers. To do this leave a review on your patch setting :guilabel:`Workflow` to ``-1``. Once the gate has verified your patch, other Zaqar developers will take a look and submit their comments. When you get two or more ``+2``'s from core reviewers, the patch will be approved and merged. Don't be discouraged if a reviewer submits their comments with a ``-1``. Patches iterate through several updates and reviews before they are ready for merging. To reply to feedback save all your comments as draft, then click on the :guilabel:`Review` button. When replying to feedback, you as the patch author can use the score of ``0``. The only exception to using the score of ``0`` is when you discover a blocking issue and you don't want your patch to be merged. In which case, you can review your own patch with a ``-2``, while you decide whether to keep, refactor, or withdraw the patch. Professional conduct -------------------- The Zaqar team holds reviewers accountable for promoting a positive, constructive culture within our program. If you ever feel that a reviewer is not acting professionally or is violating the OpenStack community code of conduct, please let the PTL know immediately so that he or she can help resolve the issue. .. _`making a good commit message`: https://wiki.openstack.org/wiki/GitCommitMessages .. _`squashing guide` : http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/first_patch.rst
first_patch.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Code reviews with Gerrit ======================== Zaqar uses the `Gerrit`_ tool to review proposed code changes. The review site is https://review.opendev.org. Gerrit is a complete replacement for GitHub pull requests. `All GitHub pull requests to the Zaqar repository will be ignored`. See `Development Workflow with Gerrit`_ for more detailed documentation on how to work with Gerrit. .. _Gerrit: https://www.gerritcodereview.com/ .. _Development Workflow with Gerrit: https://docs.openstack.org/infra/manual/developers.html#development-workflow
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/gerrit.rst
gerrit.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Welcome new contributors ======================== First Steps =========== It's very great that you're interested in contributing to Zaqar. First of all, make sure you join Zaqar communication forums: * Subscribe to Zaqar `mailing lists`_. * Join Zaqar team on IRC. You can chat with us directly in the ``#openstack-zaqar`` channel on ``OFTC``. If you don't know how to use IRC, you can find some directions in `OpenStack IRC wiki`_. * Answer and ask questions on `Ask OpenStack`_. How can I contribute? ===================== There are many ways you can contribute to Zaqar. Of course coding is one, but you can also join Zaqar as a tester, documenter, designer or translator. Coding ------ Bug fixing ^^^^^^^^^^ The first area where you can help is bug fixing. ``Confirmed`` bugs are usually your best choice. ``Triaged`` bugs should even contain tips on how they should be fixed. You can find both of them in `Zaqar's Confirmed and Triaged bugs`_ web page. Once you selected the bug you want to work on, go ahead and assign it to yourself, branch the code, implement the fix, and propose your change for review. You can find information on how to do it in :doc:`first_patch` manual. Some easy-to-fix bugs may be marked with the ``low-hanging-fruit`` tag: those are good targets for a beginner. Bug triaging ^^^^^^^^^^^^ You can also help Zaqar with bug triaging. Reported bugs need care: prioritizing them correctly, confirming them, making sure they don't go stale. All those tasks help immensely. If you want to start contributing in coding, but you are not a hardcore developer, consider helping in this area. Bugs can be marked with different tags according to their status: * ``New`` bugs are those bugs that have been reported by a user but haven't been verified by the community yet. * ``Confirmed`` bugs are those bugs that have been reproduced by someone else than the reporter. * ``Triaged`` bugs are those bugs that have been reproduced by a core developer. * ``Incomplete`` bugs are those bugs that don't have enough information to be reproduced. * ``In Progress`` bugs are those bugs that are being fixed by some developer. This status is set automatically by the Gerrit review system once a fix is proposed by a developer. You don't need to set it manually. * ``Invalid`` bugs are those bugs that don't qualify as a bug. Usually a support request or something unrelated to the project. You can learn more about this in Launchpad's `Of Bugs and Statuses`_. You only have to worry about ``New`` bugs. If you can reproduce them, you can mark them as ``Confirmed``. If you cannot reproduce them, you can ask the reported to provide more information and mark them as ``Incomplete``. If you consider that they aren't bugs, mark them as ``Invalid`` (Be careful with this. Asking someone else in Zaqar is always a good idea). Also, you can contribute instructions on how to fix a given bug. Check out the `Bug Triage`_ wiki for more information. Reviewing ^^^^^^^^^ Every patch submitted to OpenStack gets reviewed before it can be approved and merged. Zaqar gets a lot of contributions and everyone can (and is encouraged to) review Zaqar's existing patches. Pick an open review and go through it, test it if possible, and leave a comment with a ``+1`` or ``-1`` vote describing what you discovered. If you're planning on submitting patches of your own, it's a great way to learn about what the community cares about and to learn about the code base. Make sure you read :doc:`first_review` manual. Feature development ^^^^^^^^^^^^^^^^^^^ Once you get familiar with the code, you can start to contribute new features. New features get implemented every 6 months in `OpenStack development cycle`_. We use `Launchpad Blueprints`_ to track the design and implementation of significant features, and Zaqar team uses Design Summits every 6 months to get together and discuss things in person with the rest of the community. Code should be proposed for inclusion before Zaqar reach the final feature milestone of the development cycle. Testing ------- Testing efforts are highly related to coding. If you find that there are test cases missing or that some tests could be improved, you are encouraged to report it as a bug and then provide your fix. See :doc:`running_tests` and :doc:`test_suite` for information on how to run tests and how the tests are organized in Zaqar. See :doc:`first_patch` for information on how to provide your fix. Documenting ----------- You can contribute to `Zaqar's Contributor Documentation`_ which you are currently reading and to `Zaqar's Wiki`_. To fix a documentation bug check the bugs marked with the ``doc`` tag in Zaqar's bug list. In case that you want to report a documentation bug, then don't forget to add the ``doc`` tag to it. `Zaqar's Contributor Documentation`_ is compiled from source files in ``.rst`` (reStructuredText) format located in ``doc/source/`` directory in Zaqar repository. The `"openstack-manuals" project`_ houses the documentation that is published to ``docs.openstack.org``. Before contributing to `Zaqar's Contributor Documentation`_ you have to read :doc:`first_patch` manual and `OpenStack Documentation Contributor Guide`_. Also, you can monitor `Ask OpenStack`_ to curate the best answers that can be folded into the documentation. Designing --------- Zaqar doesn't have a user interface yet. Zaqar team is working to `integrate Zaqar to the OpenStack Dashboard (Horizon)`_. If you're a designer or usability professional your help will be really appreciated. Whether it's reviewing upcoming features as a user and giving feedback, designing features, testing designs or features with users, or helping to build use cases and requirements, everything is useful. Translating ----------- You can translate Zaqar to language you know. Read the `Translation wiki page`_ for more information on how OpenStack manages translations. Zaqar has adopted Zanata, and you can use the `OpenStack Zanata site`_ as a starting point to translate any of the OpenStack projects, including Zaqar. It's easier to start translating directly on the `OpenStack Zanata site`_, as there is no need to download any files or applications to get started. .. _`mailing lists` : https://wiki.openstack.org/wiki/MailingLists .. _`OpenStack IRC wiki` : https://wiki.openstack.org/wiki/IRC .. _`Ask OpenStack` : https://ask.openstack.org/ .. _`Zaqar's Confirmed and Triaged bugs` : https://bugs.launchpad.net/zaqar/+bugs?field.searchtext=&orderby=-importance&search=Search&field.status%3Alist=CONFIRMED&field.status%3Alist=TRIAGED&assignee_option=any&field.assignee=&field.bug_reporter=&field.bug_commenter=&field.subscriber=&field.structural_subscriber=&field.tag=&field.tags_combinator=ANY&field.has_cve.used=&field.omit_dupes.used=&field.omit_dupes=on&field.affects_me.used=&field.has_patch.used=&field.has_branches.used=&field.has_branches=on&field.has_no_branches.used=&field.has_no_branches=on&field.has_blueprints.used=&field.has_blueprints=on&field.has_no_blueprints.used=&field.has_no_blueprints=on .. _`Of Bugs and Statuses` : http://blog.launchpad.net/general/of-bugs-and-statuses .. _`Bug Triage` : https://wiki.openstack.org/wiki/BugTriage .. _`OpenStack development cycle` : https://wiki.openstack.org/wiki/ReleaseCycle .. _`Launchpad Blueprints` : https://wiki.openstack.org/wiki/Blueprints .. _`OpenStack Documentation Contributor Guide` : https://docs.openstack.org/contributor-guide/index.html .. _`Zaqar's Contributor Documentation` : https://docs.openstack.org/zaqar/latest/ .. _`Zaqar's Wiki` : https://wiki.openstack.org/wiki/Zaqar .. _`"openstack-manuals" project` : https://wiki.openstack.org/wiki/Documentation .. _`integrate Zaqar to the OpenStack Dashboard (Horizon)` : https://blueprints.launchpad.net/zaqar-ui/ .. _`Translation wiki page` : https://wiki.openstack.org/wiki/Translations#Translation_.26_Management .. _`OpenStack Zanata site` : https://translate.openstack.org/
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/contributor/welcome.rst
welcome.rst
.. _basic-configuration: Basic Configuration =================== The ``zaqar.conf`` configuration file is an `INI file format <https://en.wikipedia.org/wiki/INI_file>`_. This file is located in ``/etc/zaqar``. If there is a file ``zaqar.conf`` in ``~/.zaqar`` directory, it is used instead of the one in ``/etc/zaqar`` directory. When you manually install the Message service, you must generate the zaqar.conf file using the config samples generator located inside Zaqar installation directory and customize it according to your preferences. To generate the sample configuration file ``zaqar/etc/zaqar.conf.sample``: .. code-block:: console # pip install tox $ cd zaqar $ tox -e genconfig Where :samp:`{zaqar}` is your Message service installation directory. Then copy Message service configuration sample to the directory ``/etc/zaqar``: .. code-block:: console # cp etc/zaqar.conf.sample /etc/zaqar/zaqar.conf For a list of configuration options, see the tables in this guide. .. important:: Do not specify quotes around configuration options. Message API configuration ------------------------- The Message service has two APIs: the HTTP REST API for WSGI transport driver, and the Websocket API for Websocket transport driver. The Message service can use only one transport driver at the same time. The functionality and behavior of the APIs are defined by API versions. For example, the Websocket API v2 acts the same as the HTTP REST API v2. For now there are v1, v1.1 and v2 versions of HTTP REST API and only v2 version of Websocket API. Permission control options in each API version: * The v1 does not have any permission options. * The v1.1 has only ``admin_mode`` option which controls the global permission to access the pools and flavors functionality. * The v2 has only: * RBAC policy options: ``policy_default_rule``, ``policy_dirs``, ``policy_file`` which controls the permissions to access each type of functionality for different types of users. .. warning:: JSON formatted policy file is deprecated since Zaqar 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html * ``secret_key`` option which defines a secret key to use for signing special URLs. These are called pre-signed URLs and give temporary permissions to outsiders of the system. Authentication and authorization -------------------------------- All requests to the API may only be performed by an authenticated agent. The preferred authentication system is the OpenStack Identity service, code-named keystone. Identity service authentication ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To authenticate, an agent issues an authentication request to an Identity service endpoint. In response to valid credentials, Identity service responds with an authentication token and a service catalog that contains a list of all services and endpoints available for the given token. Multiple endpoints may be returned for Message service according to physical locations and performance/availability characteristics of different deployments. Normally, Identity service middleware provides the ``X-Project-Id`` header based on the authentication token submitted by the Message service client. For this to work, clients must specify a valid authentication token in the ``X-Auth-Token`` header for each request to the Message service API. The API validates authentication tokens against Identity service before servicing each request. No authentication ~~~~~~~~~~~~~~~~~ If authentication is not enabled, clients must provide the ``X-Project-Id`` header themselves. Notifications options --------------------- The notifications feature in the Message service can be enabled by adding ``zaqar.notification.notifier`` stage to the message storage layer pipeline. To do it, ensure that ``zaqar.notification.notifier`` is added to ``message_pipeline`` option in the ``[storage]`` section of ``zaqar.conf``: .. code-block:: ini [storage] message_pipeline = zaqar.notification.notifier Pooling options --------------- The Message service supports pooling. Pooling aims to make the Message service highly scalable without losing any of its flexibility by allowing users to use multiple back ends. Storage drivers options ----------------------- Storage back ends ~~~~~~~~~~~~~~~~~ The Message service supports several different storage back ends (storage drivers) for storing management information, messages and their metadata. The recommended storage back end is MongoDB. For information on how to specify the storage back ends. When the storage back end is chosen, the corresponding back-end options become active. For example, if Redis is chosen as the management storage back end, the options in ``[drivers:management_store:redis]`` section become active. Storage layer pipelines ~~~~~~~~~~~~~~~~~~~~~~~ A pipeline is a set of stages needed to process a request. When a new request comes to the Message service, first it goes through the transport layer pipeline and then through one of the storage layer pipelines depending on the type of operation of each particular request. For example, if the Message service receives a request to make a queue-related operation, the storage layer pipeline will be ``queue pipeline``. The Message service always has the actual storage controller as the final storage layer pipeline stage. By setting the options in the ``[storage]`` section of ``zaqar.conf``, you can add additional stages to these storage layer pipelines: * **Claim pipeline** * **Message pipeline** with built-in stage available to use: * ``zaqar.notification.notifier`` - sends notifications to the queue subscribers on each incoming message to the queue, in other words, enables notifications functionality. * **Queue pipeline** * **Subscription pipeline** The storage layer pipelines options are empty by default, because additional stages can affect the performance of the Message service. Depending on the stages, the sequence in which the option values are listed does matter or not. You can add external stages to the storage layer pipelines. For information how to write and add your own external stages, see `Writing stages for the storage pipelines <https://docs.openstack.org/zaqar/latest/admin/writing_pipeline_stages.html>`_ tutorial. Messaging log files ------------------- The corresponding log file of each Messaging service is stored in the ``/var/log/zaqar/`` directory of the host on which each service runs. .. list-table:: Log files used by Messaging services :widths: 35 35 :header-rows: 1 * - Log filename - Service that logs to the file * - ``server.log`` - Messaging service
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/configuration/configuring.rst
configuring.rst
===================== Getting Started Guide ===================== Overview -------- Messaging service is a RESTful API-based messaging service. It supports distributed web applications,and is based on the OpenStack Zaqar project. Messaging service is a vital component of large, distributed web applications. You can use Messaging service for public, private, and hybrid cloud environments. As you develop distributed web applications, you often have multiple agents set up to complete sets of tasks for those applications. These tasks can be anything from creating users to deleting blocks of storage. Messaging service provides a simple interface that creates these tasks as queues, messages, and claims. The interface then posts, claims, reads, and deletes them as the tasks are needed and performed. Messaging service handles the distribution of tasks, but it does not necessarily manage the order of the tasks. Applications handle the workflow at a higher level. This guide explains how to access and start using the API so that you can begin to use Messaging service for your applications. Instructions are given for how to properly enter the necessary URLs, using cURL, to set up and use a basic set of Messaging service operations. Prerequisites for Running Examples ---------------------------------- In order to run the examples in this guide, you must have the following prerequisites: - A Cloud account - A username and password, as specified during registration - Prior knowledge of HTTP/1.1 conventions - Basic familiarity with Cloud and RESTful APIs How Messaging service Works --------------------------- Following is an overview of how Messaging service works. For definitions of Messaging service terms, see the below glossary. 1. You create a queue to which producers or publishers post messages. 2. Workers (consumers or subscribers) claim or get a message from the queue, complete the work in that message, and delete the message. If a worker will be off-line before it completes the work in a message, the worker can retire the claim's time to live (TTL), putting the message back into the queue for another worker to claim. 3. Subscribers monitor the claims from these queues to track activity and help troubleshoot errors. For the majority of use cases, Messaging service is not responsible for the ordering of messages. However, if there is only a single producer, Messaging service ensures that messages are handled in a First In, First Out (FIFO) order. Messaging Patterns ------------------ The Messaging service API supports a variety of messaging patterns including the following: - Task distribution - Event broadcasting - Point-to-point messaging Task distribution ----------------- The task distribution pattern has the following characteristics: - A producer is programmed to send messages to a queue. - Multiple workers (or consumers) are programmed to monitor a queue. - Only one worker can claim a message so that no other worker can claim the message and duplicate the work. - The worker must delete the message when work is done. - TTL restores a message to an unclaimed state if the worker never finishes. This pattern is ideal for dispatching jobs to multiple processors. Event Broadcasting ------------------ Characteristics of the event broadcasting pattern are: - The publisher sends messages to a queue. - Multiple observers (or subscribers) get the messages in the queue. - Multiple observers take action on each message. - Observers send a marker to skip messages already seen. - TTL eventually deletes messages. This pattern is ideal for notification of events to multiple observers at once. Point-to-point messaging ------------------------ Characteristics of the point-to-point messaging pattern are: - The publisher sends messages to a queue. - The consumer gets the messages in the queue. - The consumer can reply with the result of processing a message by sending another message to the same queue (queues are duplex by default). - The publisher gets replies from the queue. - The consumer sends a marker to skip messages already seen. - TTL eventually deletes messages. This pattern is ideal for communicating with a specific client, especially when a reply is desired from that client. Messaging service Operations ---------------------------- This section lists all of the operations that are available in the Messaging service API. This document uses some of the most common operations in `OpenStack API Reference <https://docs.openstack.org/api-quick-start/index.html>`__.. For details about all of the operations, see the Messaging service API v2 Reference. Home Document ~~~~~~~~~~~~~ The following operation is available for the home document: - Get Home Document Queues ~~~~~~ The following operations are available for queues: - Create Queue - List Queues - Get Queue - Update Queue - Get Queue Stats - Delete Queue Messages ~~~~~~~~ The following operations are available for messages: - Post Message - Get Messages - Get a Specific Message - Get a Set of Messages by ID - Delete Message - Delete a Set of Messages by ID Claims ~~~~~~ The following operations are available for claims: - Claim Messages - Get Claim - Update Claim - Release Claim Subscriptions ~~~~~~~~~~~~~ The following operations are available for subscriptions: - Create Subscriptions - List Subscriptions - Get Subscription - Update Subscription - Delete Subscription Pools ~~~~~ The following operations are available for Pools: - Create Pools - List Pools - Get Pool - Update Pool - Delete Pool Flavors ~~~~~~~ The following operations are available for Flavors: - Create Flavors - List Flavors - Get Flavor - Update Flavors - Delete Flavors Health ~~~~~~ The following operations are available for Health: - Ping for basic health status - Get detailed health status Use Cases --------- Queuing systems are used to coordinate tasks within an application. Here are some examples: - **Backup**: A backup application might use a queuing system to connect the actions that users do in the a control panel to the customer's backup agent on a server. When a customer wants to start a backup, they simply choose "start backup" on a panel. Doing so causes the producer to put a "startBackup" message into the queue. Every few minutes, the agent on the customers server (the worker) checks the queue to see if it has any new messages to act on. The agent claims the "startBackup" message and kicks off the backup on the customer's server. - **Storage**: Gathering statistics for a large, distributed storage system can be a long process. The storage system can use a queuing system to ensure that jobs complete, even if one initially fails. Since messages are not deleted until after the worker has completed the job, the storage system can make sure that no job goes undone. If the worker fails to complete the job, the message stays in the queue to be completed by another server. In this case, a worker claims a message to perform a statistics job, but the claim's TTL expired and the message is put back into the queue when the job took too long to complete (meaning that it most likely failed). By giving the claim a TTL, applications can protect themselves from workers going off-line while processing a message. After a claim's TTL expires, the message is put back into the queue for another worker to claim. - **Email**: The team for an email application is constantly migrating customer email from old versions to newer ones, so they develop a tool to let customers do it themselves. The migrations take a long time, so they cannot be done with single API calls, or by a single server. When a user starts a migration job from their portal, the migration tool sends messages to the queue with details of how to run the migration. A set of migration engines, the consumers in this case, periodically check the queues for new migration tasks, claim the messages, perform the migration, and update a database with the migration details. This process allows a set of servers to work together to accomplish large migrations in a timely manner. Following are some generic use cases for Messaging service: - Distribute tasks among multiple workers (transactional job queues) - Forward events to data collectors (transactional event queues) - Publish events to any number of subscribers (event broadcasting) - Send commands to one or more agents (point-to-point messaging or event broadcasting) - Request an action or get information from a Remote Procedure Call (RPC) agent (point-to-point messaging) Additional Resources -------------------- For more information about using the API, see the Messaging service API v2 Reference. All you need to get started with Messaging service is the getting started guide, the reference, and your Cloud account. For information about the OpenStack Zaqar API, see `OpenStack API Reference <https://docs.openstack.org/api-quick-start/index.html>`__. This API uses standard HTTP 1.1 response codes as documented at `www.w3.org/Protocols/rfc2616/rfc2616-sec10.html <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html>`__. Glossary -------- **Claim** The process of a worker checking out a message to perform a task. Claiming a message prevents other workers from attempting to process the same messages. **Claim TTL** Defines how long a message will be in claimed state. A message can be claimed by one worker at a time. **Consumer** A server that claims messages from the queue. **Message** A task, a notification, or any meaningful data that a producer or publisher sends to the queue. A message exists until it is deleted by a recipient or automatically by the system based on a TTL (time-to-live) value. **Message TTL** Defines how long a message will be accessible. **Producer** A server or application that sends messages to the queue. **Producer - Consumer** A pattern where each worker application that reads the queue has to claim the message in order to prevent duplicate processing. Later, when work is done, the worker is responsible for deleting the message. If message is not deleted in a predefined time, it can be claimed by other workers. **Publisher** A server or application that posts messages to the queue with the intent to distribute information or updates to multiple subscribers. **Publisher - Subscriber** A pattern where all worker applications have access to all messages in the queue. Workers cannot delete or update messages. **Queue** The entity that holds messages. Ideally, a queue is created per work type. For example, if you want to compress files, you would create a queue dedicated to this job. Any application that reads from this queue would only compress files. **Subscriber** An observer that watches messages like an RSS feed but does not claim any messages. **TTL** Time-to-live value. **Worker** A client that claims messages from the queue and performs actions based on those messages.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/user/getting_started.rst
getting_started.rst
Send Requests to the API ======================== You have several options for sending requests through an API: - Developers and testers may prefer to use cURL, the command-line tool from http://curl.haxx.se/. With cURL you can send HTTP requests and receive responses back from the command line. - If you like to use a more graphical interface, the REST client for Firefox also works well for testing and trying out commands, see https://addons.mozilla.org/en-US/firefox/addon/restclient/. - You can also download and install rest-client, a Java application to test RESTful web services, from https://github.com/wiztools/rest-client. Sending API Requests Using cURL ------------------------------- cURL is a command-line tool that is available in UNIX® system-based environments and Apple Mac OS X® systems, and can be downloaded for Microsoft Windows® to interact with the REST interfaces. For more information about cURL, visit http://curl.haxx.se/. cURL enables you to transmit and receive HTTP requests and responses from the command line or from within a shell script. As a result, you can work with the REST API directly without using one of the client APIs. The following cURL command-line options are used in this guide to run the examples. .. list-table:: :widths: 50 50 :header-rows: 1 * - Option - Description * - ``-d`` - Sends the specified data in a ``POST`` request to the HTTP server. * - ``-i`` - Includes the HTTP header in the output. * - ``-H HEADER`` - Specifies an HTTP header in the request. * - ``-X`` - Specifies the request method to use when communicating with the HTTP server. The specified request is used instead of the default method, which is GET. For example, ``-X PUT`` specifies to use the ``PUT`` request method. **Note** If you have the tools, you can run the cURL JSON request examples with the following options to format the output from cURL: ``<curl JSON request example> | python -mjson.tool``. Copying and Pasting cURL Request Examples into a Terminal Window ---------------------------------------------------------------- To run the cURL request examples shown in this guide on Linux or Mac systems, perform the following actions: 1. Copy and paste each example from the HTML version of this guide into an ASCII text editor (for example, vi or TextEdit). You can click on the small document icon to the right of each request example to select it. 2. Modify each example with your required account information and so forth, as detailed in this guide. 3. After you are finished modifying the text for the cURL request example with your information (for example, ``your_username`` and ``your_api_key``), paste it into your terminal window. 4. Press Enter to run the cURL command. **Note** The carriage returns in the cURL request examples that are part of the cURL syntax are escaped with a backslash (\\) in order to avoid prematurely terminating the command. However, you should not escape carriage returns inside the JSON message within the command. **Tip** If you have trouble copying and pasting the examples as described, try typing the entire example on one long line, removing all the backslash line continuation characters.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/user/send_request_api.rst
send_request_api.rst
Generate an Authentication Token ================================ You can use `cURL <http://curl.haxx.se/>`__ to try the authentication process in two steps: get a token, and send the token to a service. 1. Get an authentication token by providing your user name and either your API key or your password. Here are examples of both approaches: You can request a token by providing your user name and your password. :: $ curl -X POST https://localhost:5000/v2.0/tokens -d '{"auth":{"passwordCredentials":{"username": "joecool", "password":"coolword"}, "tenantId":"5"}}' -H 'Content-type: application/json' Successful authentication returns a token which you can use as evidence that your identity has already been authenticated. To use the token, pass it to other services as an ``X-Auth-Token`` header. Authentication also returns a service catalog, listing the endpoints you can use for Cloud services. 2. Use the authentication token to send a ``GET`` to a service you would like to use. Authentication tokens are typically valid for 24 hours. Applications should be designed to re-authenticate after receiving a 401 (Unauthorized) response from a service endpoint. **Note** If you programmatically parse an authentication response, be aware that service names are stable for the life of the particular service and can be used as keys. You should also be aware that a user's service catalog can include multiple uniquely-named services that perform similar functions.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/user/authentication_tokens.rst
authentication_tokens.rst
.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================== The Notification Delivery Policy Guide ====================================== Support notification delivery policy in webhook type. It will work when the notification is sent from Zaqar to the subscriber failed. This guide shows how to use this feature: Webhook ------- .. note:: You should make sure that the message notification is enabled. By default, the ``message_pipeline`` config option in [storage] section should be set like: message_pipeline = zaqar.notification.notifier 1. Create the queue with _retry_policy metadata like this: .. code:: json { "_retry_policy": { "retries_with_no_delay": "<Integer value, optional>", "minimum_delay_retries": "<Integer value, optional>", "minimum_delay": "<Interger value, optional>", "maximum_delay": "<Interger value, optional>", "maximum_delay_retries": "<Integer value, optional>", "retry_backoff_function": "<String value, optional>", "ignore_subscription_override": "<Bool value, optional>"} } - 'minimum_delay' and 'maximum_delay' mean delay time in seconds. - 'retry_backoff_function' mean name of retry backoff function. There will be a enum in Zaqar that contain all valid values. Zaqar now supports retry backoff function including 'linear', 'arithmetic','geometric' and 'exponential'. - 'minimum_delay_retries' and 'maximum_delay_retries' mean the number of retries with 'minimum_delay' or 'maximum_delay' delay time. If value of retry_policy is empty dict, that Zaqar will use default value to those keys: - retries_with_no_delay=3 - minimum_delay_retries=3 - minimum_delay=5 - maximum_delay=30 - maximum_delay_retries=3 - retry_backoff_function=linear - ignore_subscription_override=False 2. Create a subscription with options like queue's metadata below. If user don't set the options, Zaqar will use the retry policy in queue's metadata. If user do it, Zaqar will use the retry policy in options by default, if user still want to use retry policy in queue's metadata, then can set the ignore_subscription_override = True.
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/user/notification_delivery_policy.rst
notification_delivery_policy.rst
Common Headers ============== Each request to the Message Queuing API must include certain standard and extended HTTP headers (as shown in the following table). These headers provide host, agent, authentication, and other pertinent information to the server. The following table provides the common headers used by the API. .. list-table:: :widths: 50 50 :header-rows: 1 * - Header - Description * - Host - Host name of the API * - Date - Current date and time * - Accept - Media type to use. Initially, only ``application/json`` is supported. **Note: The "Accept" header is required.** * - Accept-Encoding - Specifies that the agent accepts gzip-encoded response bodies * - Content-Type - ``application/json`` * - Content-Length - For ``POST`` or ``PUT`` requests, the length in bytes of the message document being submitted * - X-Auth-Token - Authorization token * - X-Project-Id - An ID for a project to which the value of X-Auth-Token grants access. Queues are created under this project. The project ID is the same as the account ID (also sometimes called tenant ID). * - Client-ID - A UUID for each client instance. The UUID must be submitted in its canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). The client generates the Client-ID once. Client-ID persists between restarts of the client so the client should reuse that same Client-ID. **Note: All message-related operations require the use of "Client-ID" in the headers to ensure that messages are not echoed back to the client that posted them, unless the client explicitly requests this.** Working with the Message Queuing API ==================================== This chapter contains a simple exercise with some basic Message Queuing requests that you will commonly use. Example requests are provided in cURL, followed by the response. For a complete list of operations available for Message Queuing, see :doc:`getting_started` Each operation is fully described in the `Message Queuing API v2 Reference <https://docs.openstack.org/api-ref/message/>`_. Create Queue ------------ The Create Queue operation creates a queue in the region of your choice. The body of the PUT request is empty. The template is as follows: .. code:: rest PUT {endpoint}/queues/{queue_name} The ``queue_name`` parameter specifies the name to give the queue. The name *must not* exceed 64 bytes in length and is limited to US-ASCII letters, digits, underscores, and hyphens. Following are examples of a Create Queue request and response: .. code-block:: bash curl -i -X PUT https://queues.api.openstack.org/v2/queues/samplequeue \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 201 Created Content-Length: 0 Location: /v2/queues/samplequeue Post Message ------------ The Post Message operation inserts one or more messages in a queue. You can submit up to 10 messages in a single request, but you must encapsulate them in a collection container (an array in JSON, even for a single message - without the JSON array, you receive an "Invalid body request" error message). You can use the resulting value of the location header or response body to retrieve the created messages for further processing if needed. The template is as follows: .. code:: rest POST {endpoint}/queues/{queue_name}/messages The client specifies only the body and ttl attributes for the message. Metadata, such as id and age, is added. The response body contains a list of resource paths that correspond to each message submitted in the request, in the same order as they were submitted. If a server-side error occurs during the processing of the submitted messages, a partial list is returned. The ``partial`` attribute is set to ``true``, and the client tries to post the remaining messages again. **Important** The ``partial`` attribute has been deprecated in the v1.0 API and is not available in the v1.1 API. Drivers are now required to operate in a transactional manner. In other words, either all messages must be posted, or none of them. The ``body`` attribute specifies an arbitrary document that constitutes the body of the message being sent. The following rules apply for the maximum size: - The size is limited to 256 KB for the entire request body (as-is), including whitespace. - The maximum size of posted messages is the maximum size of the entire request document (rather than the sum of the individual message ``body`` field values as it was earlier releases). On error, the client is notified of by how much the request exceeded the limit. The document *must* be valid JSON. (The Message Queuing service validates it.) The ``ttl`` attribute specifies the lifetime of the message. When the lifetime expires, the server deletes the message and removes it from the queue. Valid values are 60 through 1209600 seconds (14 days). **Note** The server might not actually delete the message until its age reaches (ttl + 60) seconds. So there might be a delay of 60 seconds after the message expires before it is deleted. The following are examples of a Post Message request and response: .. code:: bash curl -i -X POST https://queues.api.openstack.org/v1/queues/samplequeue/messages -d \ '[{"ttl": 300,"body": {"event": "BackupStarted"}},{"ttl": 60,"body": {"play": "hockey"}}]' \ -H "Content-type: application/json" \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 201 Created Content-Length: 153 Content-Type: application/json; charset=utf-8 Location: /v1/queues/samplequeue/messages?ids=51ca00a0c508f154c912b85c,51ca00a0c508f154c912b85d {"partial": false, "resources": ["/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85c", "/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85d"]} Claim Messages -------------- The Claim Messages operation claims a set of messages (up to the value of the ``limit`` parameter) from oldest to newest and skips any messages that are already claimed. If there are no messages available to claim, the Message Queuing service returns an HTTP ``204 No Content`` response code. The template is as follows: .. code-block:: rest POST {endpoint}/queues/{queue_name}/claims{?limit} Content-Type: application/json { "ttl": {claim_ttl}, "grace": {message_grace} } The client (worker) needs to delete the message when it has finished processing it. The client deletes the message before the claim expires to ensure that the message is processed only once. If a client needs more time, the Cloud Service provides the Update Claim operation to make changes. See the Message Queuing API v1 Reference for a description of this operation. As part of the delete operation, workers specify the claim ID (which is best done by simply using the provided href). If workers perform these actions, then if a claim simply expires, the server can return an error and notify the worker of a possible race condition. This action gives the worker a chance to roll back its own processing of the given message because another worker can claim the message and process it. The age given for a claim is relative to the server's clock. The claim's age is useful for determining how quickly messages are getting processed and whether a given message's claim is about to expire. When a claim expires, it is released back to the queue for other workers to claim. (If the original worker failed to process the message, another client worker can then claim the message.) The ``limit`` parameter specifies the number of messages to claim. The ``limit`` parameter is configurable. The default is 20. Messages are claimed based on the number of messages available. The server might claim and return less than the requested number of messages. The ``ttl`` attribute specifies the lifetime of the claim. While messages are claimed, they are not available to other workers. The value must be between 60 and 43200 seconds (12 hours). The ``grace`` attribute specifies the message grace period in seconds. Valid values are between 60 and 43200 seconds (12 hours). To deal with workers that have stopped responding (for up to 1209600 seconds or 14 days, including claim lifetime), the server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus the specified grace period. If a claimed message normally lives longer than the grace period, its expiration is not adjusted. it Following are examples of a Claim Messages request and response: .. code:: bash curl -i -X POST https://queues.api.openstack.org/v1/queues/samplequeue/claims -d \ '{"ttl": 300,"grace":300}' \ -H "Content-type: application/json" \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code-block:: rest HTTP/1.1 201 OK Content-Length: 164 Content-Type: application/json; charset=utf-8 Location: /v1/queues/samplequeue/claims/51ca011c821e7250f344efd6 X-Project-Id: [ { "body": { "event": "BackupStarted" }, "age": 124, "href": "\/v1\/queues\/samplequeue\/messages\/51ca00a0c508f154c912b85c?claim_id=51ca011c821e7250f344efd6", "ttl": 300 } ] Delete Message with Claim ID ---------------------------- The Delete Message operations deletes messages. The template is as follows: .. code:: rest DELETE {endpoint}/queues/{queue_name}/messages/{message_id}{?claim_id} The ``message_id`` parameter specifies the message to delete. The ``claim_id`` parameter specifies that the message is deleted only if it has the specified claim ID and that claim has not expired. This specification is useful for ensuring that only one worker processes any given message. When a worker's claim expires before it deletes a message that it has processed, the worker must roll back any actions it took based on that message because another worker can now claim and process the same message. Following are examples of a Delete Message request and response: .. code:: bash curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85c?claim_id=51ca011c821e7250f344efd6 \ -H "Content-type: application/json" \ -H "X-Auth-Token: " \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 204 No Content Release Claim ------------- The Release Claim operation immediately releases a claim, making any remaining, undeleted) messages associated with the claim available to other workers. The template is as follows: .. code:: rest DELETE {endpoint}/queues/{queue_name}/claims/{claim_id} This operation is useful when a worker is performing a graceful shutdown, fails to process one or more messages, or is taking longer than expected to process messages and wants to make the remainder of the messages available to other workers. Following are examples of a Release Claim request and response: .. code:: bash curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue/claims/51ca011c821e7250f344efd6 \ -H "Content-type: application/json" \ -H "X-Auth-Token: " \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 204 No Content Delete Queue ------------ The Delete Queue operation immediately deletes a queue and all of its existing messages. The template is as follows: .. code:: rest DELETE {endpoint}/queues/{queue_name} Following are examples of a Delete Queue request and response: .. code:: bash curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue \ -H "Content-type: application/json" \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 204 No Content
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/doc/source/user/headers_queue_api_working.rst
headers_queue_api_working.rst
import logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import requests import sys try: import SimpleHTTPServer import SocketServer except Exception: from http import server as SimpleHTTPServer import socketserver as SocketServer if len(sys.argv) > 2: PORT = int(sys.argv[2]) elif len(sys.argv) > 1: PORT = int(sys.argv[1]) else: PORT = 5678 class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """This is the sample service for email subscription confirmation. """ def do_OPTIONS(self): logging.warning('=================== OPTIONS =====================') self.send_response(200) self.send_header('Access-Control-Allow-Origin', self.headers['origin']) self.send_header('Access-Control-Allow-Methods', 'PUT') self.send_header('Access-Control-Allow-Headers', 'client-id,confirmation-url,content-type,url-expires,' 'url-methods,url-paths,url-signature,x-project-id,' 'confirm') self.end_headers() logging.warning(self.headers) return def do_PUT(self): logging.warning('=================== PUT =====================') self._send_confirm_request() self.send_response(200) self.send_header('Access-Control-Allow-Origin', self.headers['origin']) self.end_headers() message = "{\"message\": \"ok\"}" self.wfile.write(message) logging.warning(self.headers) return def _send_confirm_request(self): url = self.headers['confirmation-url'] confirmed_value = True try: if self.headers['confirm'] == "false": confirmed_value = False except KeyError: pass headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Project-ID': self.headers['x-project-id'], 'Client-ID': uuidutils.generate_uuid(), 'URL-Methods': self.headers['url-methods'], 'URL-Signature': self.headers['url-signature'], 'URL-Paths': self.headers['url-paths'], 'URL-Expires': self.headers['url-expires'], } data = {'confirmed': confirmed_value} requests.put(url=url, data=jsonutils.dumps(data), headers=headers) Handler = ServerHandler httpd = SocketServer.TCPServer(("", PORT), Handler) httpd.serve_forever()
zaqar
/zaqar-16.0.0.0rc1.tar.gz/zaqar-16.0.0.0rc1/samples/html/confirmation_web_service_sample.py
confirmation_web_service_sample.py
========= zar 2.0.2 ========= |Maturity| |Build Status| |Coverage Status| |license gpl| Overview ======== ========= zar 2.0.2 ========= |Maturity| |Build Status| |Coverage Status| |license gpl| Overview ======== zar === Zeroincombenze® Archive Replica ------------------------------- ZAR stand for Zeroincombenze® Archive Replica. It is a tool kit to backup, restore, replicate files and/or database. ZAR manages easily backup for Odoo database, keeps last nth copies and purges oldest copies. | Features -------- * backup and restore odoo database * backup and restore based on rules by configuration file * restore database with automatic actions disabled * multiple copies of database by configuration file * automatic purging of oldest copies * configuration based on host name: it works on duplicate host image too * backup on same host or on remote host | | Features -------- * backup and restore odoo database * backup and restore based on rules by configuration file * restore database with automatic actions disabled * multiple copies of database by configuration file * automatic purging of oldest copies * configuration based on host name: it works on duplicate host image too * backup on same host or on remote host | | Getting started =============== Installation ------------ Zeroincombenze tools require: * Linux Centos 7/8 or Debian 9/10 or Ubuntu 18/20/22 * python 2.7+, some tools require python 3.6+ * bash 5.0+ Stable version via Python Package ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: pip install zar | Current version via Git ~~~~~~~~~~~~~~~~~~~~~~~ :: cd $HOME git clone https://github.com/zeroincombenze/tools.git cd ./tools ./install_tools.sh -p source $HOME/devel/activate_tools Upgrade ------- Stable version via Python Package ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: pip install zar -U | Current version via Git ~~~~~~~~~~~~~~~~~~~~~~~ :: cd $HOME ./install_tools.sh -U source $HOME/devel/activate_tools | | Credits ======= Copyright --------- SHS-AV s.r.l. <https://www.shs-av.com/> Contributors ------------ * Antonio M. Vigliotti <[email protected]> Contributors ------------ * Antonio Maria Vigliotti <[email protected]> | This module is part of tools project. Last Update / Ultimo aggiornamento: 2023-05-21 .. |Maturity| image:: https://img.shields.io/badge/maturity-Alfa-black.png :target: https://odoo-community.org/page/development-status :alt: .. |Build Status| image:: https://travis-ci.org/zeroincombenze/tools.svg?branch=master :target: https://travis-ci.com/zeroincombenze/tools :alt: github.com .. |license gpl| image:: https://img.shields.io/badge/licence-AGPL--3-blue.svg :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html :alt: License: AGPL-3 .. |license opl| image:: https://img.shields.io/badge/licence-OPL-7379c3.svg :target: https://www.odoo.com/documentation/user/9.0/legal/licenses/licenses.html :alt: License: OPL .. |Coverage Status| image:: https://coveralls.io/repos/github/zeroincombenze/tools/badge.svg?branch=master :target: https://coveralls.io/github/zeroincombenze/tools?branch=2.0 :alt: Coverage .. |Codecov Status| image:: https://codecov.io/gh/zeroincombenze/tools/branch/2.0/graph/badge.svg :target: https://codecov.io/gh/zeroincombenze/tools/branch/2.0 :alt: Codecov .. |Tech Doc| image:: https://www.zeroincombenze.it/wp-content/uploads/ci-ct/prd/button-docs-2.svg :target: https://wiki.zeroincombenze.org/en/Odoo/2.0/dev :alt: Technical Documentation .. |Help| image:: https://www.zeroincombenze.it/wp-content/uploads/ci-ct/prd/button-help-2.svg :target: https://wiki.zeroincombenze.org/it/Odoo/2.0/man :alt: Technical Documentation .. |Try Me| image:: https://www.zeroincombenze.it/wp-content/uploads/ci-ct/prd/button-try-it-2.svg :target: https://erp2.zeroincombenze.it :alt: Try Me .. |OCA Codecov| image:: https://codecov.io/gh/OCA/tools/branch/2.0/graph/badge.svg :target: https://codecov.io/gh/OCA/tools/branch/2.0 :alt: Codecov .. |Odoo Italia Associazione| image:: https://www.odoo-italia.org/images/Immagini/Odoo%20Italia%20-%20126x56.png :target: https://odoo-italia.org :alt: Odoo Italia Associazione .. |Zeroincombenze| image:: https://avatars0.githubusercontent.com/u/6972555?s=460&v=4 :target: https://www.zeroincombenze.it/ :alt: Zeroincombenze .. |en| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/flags/en_US.png :target: https://www.facebook.com/Zeroincombenze-Software-gestionale-online-249494305219415/ .. |it| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/flags/it_IT.png :target: https://www.facebook.com/Zeroincombenze-Software-gestionale-online-249494305219415/ .. |check| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/check.png .. |no_check| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/no_check.png .. |menu| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/menu.png .. |right_do| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/right_do.png .. |exclamation| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/exclamation.png .. |warning| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/warning.png .. |same| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/same.png .. |late| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/late.png .. |halt| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/halt.png .. |info| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/awesome/info.png .. |xml_schema| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/certificates/iso/icons/xml-schema.png :target: https://github.com/zeroincombenze/grymb/blob/master/certificates/iso/scope/xml-schema.md .. |DesktopTelematico| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/certificates/ade/icons/DesktopTelematico.png :target: https://github.com/zeroincombenze/grymb/blob/master/certificates/ade/scope/Desktoptelematico.md .. |FatturaPA| image:: https://raw.githubusercontent.com/zeroincombenze/grymb/master/certificates/ade/icons/fatturapa.png :target: https://github.com/zeroincombenze/grymb/blob/master/certificates/ade/scope/fatturapa.md .. |chat_with_us| image:: https://www.shs-av.com/wp-content/chat_with_us.gif :target: https://t.me/Assitenza_clienti_powERP
zar
/zar-2.0.3.tar.gz/zar-2.0.3/README.rst
README.rst
from __future__ import print_function, unicode_literals from whaaaaat import prompt, print_json import os from git import Repo all_done = True current_directory = os.getcwd() root_directory = os.path.expanduser('~/.deployer_config') try: config_file_content = open(root_directory, 'r').read() except: print("No configuration file in " + root_directory) exit(1) try: repo = Repo(current_directory) except: print("No git repository found in " + current_directory) exit(1) # branch iniziale initial_branch = repo.active_branch # lista di tutti i branch presenti nella repository branch_list = repo.remotes.origin.fetch() # lista dei commit più recenti commit_list = list(repo.iter_commits(initial_branch, max_count=7)) repo.config_writer().set_value("user", "name", "zarabaza").release() repo.config_writer().set_value("user", "email", "[email protected]").release() print("Current branch: " + str(initial_branch) + "\n") questions = [ { 'type': 'list', 'name': 'selected_commit', 'message': 'Select a commit to cherry-pick', 'choices': [str(c.message.replace("\n", "") + " [hash=>" +c.hexsha) for c in commit_list] }, { "type": "checkbox", "message": "Select branches to cherry-pick onto", "name": "selected_branches", "choices": [{'name': str(b)} for b in filter(lambda x: x!=initial_branch, branch_list)], 'validate': lambda answer: 'You must have to select at least one branch.' \ if len(answer) == 0 else True } ] # chiediamo quali commit e su quali branch fare cherry-pick answers = prompt(questions) print_json(answers) # se non è stato selezionato nessun commit o nessun branch esco if(answers['selected_commit'] == None or len(answers['selected_branches']) == 0): print("No commit or branch selected. Exiting.") exit(1) # faccio lo stash del branch iniziale per evitare di sovrascrivere modifiche pendenti repo.git.stash() for branch in answers['selected_branches']: branch = branch.replace("origin/", "") print("Cherry-pick " + answers['selected_commit'].split("hash=>")[1] + " commit onto " + branch + " branch") # mi sposto sul branch in cui cherry-pickare repo.git.checkout(branch) # faccio il pull del branch per avere le modifiche più recenti repo.git.pull("origin", branch) # faccio il cherry-pick try: repo.git.cherry_pick(answers['selected_commit'].split("hash=>")[1], strategy_option="theirs") except Exception as e: print("Cherry-pick failed. Skipping branch " + branch + "\n") print(e) all_done = False continue # faccio il push del branch con il cherry pick fatto try: repo.git.push("origin", branch) except Exception as e: print("Push failed. Skipping branch " + branch + "\n") print(e) all_done = False continue # se il branch è un dev chiedo se fare il merge in master if branch.find("/dev") != -1: # chiediamo se fare anche il merge in master questiones = [ { 'type': 'confirm', 'name': 'merge_master', 'message': 'Do you want to merge this branch into '+ branch.replace("/dev", "/master") + "?", 'default': False, } ] answers_to_merge = prompt(questiones) if(answers_to_merge['merge_master']): master_branch = branch.replace("/dev", "/master") print("Merging " + branch + " into " + master_branch) # mi sposto sul branch xxx/master try: repo.git.checkout(master_branch) except Exception as e: print("No branch found with name: " + master_branch + "\n") all_done = False continue # faccio il pull del branch per avere le modifiche più recenti repo.git.pull("origin",master_branch) # faccio il merge try: repo.git.merge(branch, strategy_option="theirs") except Exception as e: print("Merge failed. Skipping branch " + master_branch + "\n") print(e) all_done = False continue # faccio il push del branch con il merge fatto try: repo.git.push("origin", master_branch) except Exception as e: print("Push failed. Skipping branch " + master_branch + "\n") print(e) all_done = False continue # torno sul branch iniziale repo.git.checkout(initial_branch) # faccio il pop dello stash per ripristinare le modifiche pendenti try: repo.git.stash("pop") except Exception as e: print("No stash to pop. Skipping stash pop.\n") print("Back on branch " + str(initial_branch) + "\n") if(all_done): print("Operations successfully completed.")
zarabaza
/zarabaza-0.0.13.tar.gz/zarabaza-0.0.13/zarabaza.py
zarabaza.py
# zarame <img src="https://repository-images.githubusercontent.com/125183142/d7c96200-18e8-11ea-961a-a5b9d203a6d5" width=400> [![PyPI version](https://badge.fury.io/py/zarame.svg)](https://badge.fury.io/py/zarame) [![CircleCI](https://circleci.com/gh/pistatium/zarame/tree/master.svg?style=svg)](https://circleci.com/gh/pistatium/zarame/tree/master) Simple structural converter using NamedTuple ## Usage ### Load from dict ```python from typing import NamedTuple, List, Optional from enum import Enum from zarame import load class Status(Enum): ACTIVE = 'active' INACTIVE = 'inactive' DELETED = 'deleted' class User(NamedTuple): id: int name: str email: Optional[str] class Room(NamedTuple): status: Status users: List[User] room = { 'status': 'active', 'users': [ {'id': 1, 'name': 'Taro'}, {'id': 2, 'name': 'Hanako', 'email': '[email protected]'} ] } # Convert from dict to Room instance instance = load(room, Room) ``` __Result__ ``` Room( status=Status.Active, users=[ User(id=1, name='Taro', email=None), User(id=2, name='Hanako', email='[email protected]') ] ) ``` You can use `str`, `int`, `bool`, `float`, `Enum`, `List`, `Tuple(as List)`, `NamedTuple` to definition. ### Dump to dict ```python from zarame import dump room = Room( status=Status.ACTIVE, users=[ User(id=1, name='Taro', email=None), User(id=2, name='Hanako', email='[email protected]') ] ) d = dump(d) ``` __Result__ ```python OrderedDict([ ('status', 'active'), ('users', [ OrderedDict([ ('id', 1), ('name', 'Taro'), ('email', None) ]), OrderedDict([ ('id', 2), ('name', 'Hanako'), ('email', '[email protected]') ]) ]) ]) ```
zarame
/zarame-0.0.4.tar.gz/zarame-0.0.4/README.md
README.md
====== zardoz ====== .. image:: https://img.shields.io/pypi/v/zardoz.svg :target: https://pypi.python.org/pypi/zardoz .. image:: https://img.shields.io/travis/camillescott/zardoz.svg :target: https://travis-ci.com/camillescott/zardoz .. image:: https://readthedocs.org/projects/zardoz/badge/?version=latest :target: https://zardoz.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status Another dice bot for discord. * Free software: MIT license * Documentation: https://zardoz.readthedocs.io. Features -------- * Complex roll options provided via `python-dice <https://github.com/borntyping/python-dice#notation>`_ * Multiple game types to provide fast default dice rolls (ie, `1d100` represented by `r` in Rogue Trader mode) * Reports degrees of success or failure when in RT mode, or success or failure otherwise, when using comparison operators * Stores roll history for server Examples -------- For a basic roll, ``/z 1d100``:: Request: 1d100 Rolled out: {1d100 ⤳ 53} Result: [53] Multiple dice, ``/z 3d100``:: Request: 3d100 Rolled out: {3d100 ⤳ [27, 83, 73]} Result: [27, 83, 73] Distributed addition and subtraction, ``/z 3d100 + 10``:: Request: 3d100 + 10 Rolled out: {3d100 ⤳ [47, 30, 19]} + 10 Result: [57, 40, 29] Comparisons:: Request: 4d6 <= 4 Rolled out: {4d6 ⤳ [6, 2, 4, 2]} <= 4 Result: 6 ⤳ failed by 2 2 ⤳ succeeded by 2 4 ⤳ succeeded by 0 2 ⤳ succeeded by 2 DoF/Dos, ``/z 3d100 <= 50``:: Request: 3d100 <= 50 Rolled out: {3d100 ⤳ [57, 11, 88]} <= 50 Result: 57 ⤳ failure 11 ⤳ 3 DoS 88 ⤳ 3 DoF Order of operations, ``/z 3d100 <= 50 + 5``:: Request: 3d100 <= 50 + 5 Rolled out: {3d100 ⤳ [75, 87, 55]} <= 50 + 5 Result: 75 ⤳ 2 DoF 87 ⤳ 3 DoF 55 ⤳ success Credits ------- This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. .. _Cookiecutter: https://github.com/audreyr/cookiecutter .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
zardoz
/zardoz-1.0.0.tar.gz/zardoz-1.0.0/README.rst
README.rst
.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/camillescott/zardoz/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ zardoz could always use more documentation, whether as part of the official zardoz docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/camillescott/zardoz/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `zardoz` for local development. 1. Fork the `zardoz` repo on GitHub. 2. Clone your fork locally:: $ git clone [email protected]:your_name_here/zardoz.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv zardoz $ cd zardoz/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 zardoz tests $ python setup.py test or pytest $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8, and for PyPy. Check https://travis-ci.com/camillescott/zardoz/pull_requests and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ pytest tests.test_zardoz Deploying --------- A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bump2version patch # possible: major / minor / patch $ git push $ git push --tags Travis will then deploy to PyPI if tests pass.
zardoz
/zardoz-1.0.0.tar.gz/zardoz-1.0.0/CONTRIBUTING.rst
CONTRIBUTING.rst
.. highlight:: shell ============ Installation ============ Stable release -------------- To install zardoz, run this command in your terminal: .. code-block:: console $ pip install zardoz This is the preferred method to install zardoz, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for zardoz can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/camillescott/zardoz Or download the `tarball`_: .. code-block:: console $ curl -OJL https://github.com/camillescott/zardoz/tarball/master Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/camillescott/zardoz .. _tarball: https://github.com/camillescott/zardoz/tarball/master
zardoz
/zardoz-1.0.0.tar.gz/zardoz-1.0.0/docs/installation.rst
installation.rst
# Zarena 🦀 Rust Game Collection with Reninforcement Learning gym environments. This library aims to serve the same purpose as OpenSpiel, except in Rust to make it easier to use & maintain. The current games are gato, blackjack, chess & poker texas hold'em. All of these additionally support Web Assembly. You can play gato & chess against our Artificial Intelligence at [Zeti Games](https://zeti.ai/playground) ## Configurations Depending on the cargo file you want. You must change your cargo.toml to match that build. `Cargo.py.toml` -> Python Build `Cargo.rs.toml` -> Development Build `Cargo.wa.toml` -> Web Assembly Build `Cargo.toml` -> The actual file that Rust will build on. Copy from py/rs/wa to this file. ## Commands If you don't have Rust, no worries. Download Rust for Linux or Windows Subsystem. [If you need more help.](https://www.rust-lang.org/tools/install) `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh` #### Download the C compiler `sudo apt-get update && sudo apt-get install build-essential` #### Install poetry `curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python -` #### Install Maturin via Poetry `poetry install` #### Build the Maturin Develop Build `poetry run maturin develop` #### Build the Maturin Test Build `poetry run maturin build` #### Build the Maturin Production Build. The Python Wheel & source distribution. `poetry run maturin build --release` #### Build the Web Assembly file `wasm-pack build --target web -- --features wasm` ### Usage You can import the Python classes directly, or create pre-defined environments with `gym` in this case it is also necessary to import the class: ```python # import gym to use training environments import gym # from zarena import the training environment of your choice, for: # option 1.- use the python class directly # option 2.- register the environment in gym and use it with gym.make(environment_name) from zarena import gym_chess env = gym_chess.ChessEnv() # Option 1 env = gym.make('ChessEnv-v3') # Option 2 # reset the environment and get the initial state observation observation = env.reset() # obtain legal actions actions = env.legal_actions() # select action according to a criterion, in this case random action = random.choice(actions) # pass it to the env and get the next state observation, reward, if the game is over and environment information observation, reward, done, info = env.step(action) # get the player to play env.to_play() # properly close the game env.close() # display the game ovservation env.render() ``` ## Environments id - Tictactoe: `GatoEnv-v2` - Chess: `ChessEnv-v3` - Blackjack: `BlackjackEnv-v1` - Poker: `PokerEnv-v1` - Checkers `CheckersEnv-v1` ## Testing Run all the tests with `pytest`. ## Code linting and fixing Python code is formatted with [black](https://github.com/psf/black). Rust code is formatted with `cargo fmt`. ## Building the Rust code The environment uses a chess engine implemented in Rust that uses [PyO3](https://github.com/PyO3/pyo3) Maturin to bind to the Python interpreter. Rust is an amazing compiled language and this project holds 2 configurations: - `Cargo.py.toml` is used to build the library into a Python module with maturin - `Cargo.rs.toml` is used to build directly with `cargo` in Rust to access the library in the `main.rs` script for development - `Cargo.wa.toml` is used to build to build for Javascript with Web Assembly. The games can be played via Web Assembly on Zeti's website https://zeti.ai Note: we haven't found a way to specify the Cargo toml file to either process, so copy the contents of the config you want to use into `Cargo.toml` to make it work. ## Game of Gato The game of Xs & Os ### API #### Initialize environment ```python >>> env = BlackjackEnv(n_players=1) ``` - `n_players`: specify the number of players `2<=n_players<=7` (default: `1`) #### Set actions ```python >>> env.step(action) ``` - `action`: mark a position, could be `0<=action<=8` ```shell > 0 | 1 | 2 > 3 | 4 | 5 > 6 | 7 | 8 ``` <img src="https://i.imgur.com/qqK1mBc.jpeg" alt="gata" height="400"/> ## Blackjack ### API #### Initialize environment ```python >>> env = BlackjackEnv(n_players=1) ``` - `n_players`: specify the number of players `2<=n_players<=7` (default: `1`) #### Set actions ```python >>> env.step(action) ``` - `action`: can be * `0` -> stand * `1` -> HIT * `2` -> double down * `3` -> pull apart (currently disabled) ![21](https://black-jack.com/es/wp-content/uploads/sites/5/2019/02/blackjack-3.jpg) ## Chess #### See the chess board and moves Visualise the current state of the chess game: ```python env.render() ``` ```shell ------------------------- 8 | ♖ ♘ ♗ ♕ ♔ ♗ ♘ ♖ | 7 | ♙ ♙ ♙ ♙ ♙ ♙ ♙ ♙ | 6 | . . . . . . . . | 5 | . . . . . . . . | 4 | . . . . . . . . | 3 | . . . . . . . . | 2 | ♟ ♟ ♟ ♟ ♟ ♟ ♟ ♟ | 1 | ♜ ♞ ♝ ♛ ♚ ♝ ♞ ♜ | ------------------------- a b c d e f g h ``` You can also visualise multiple moves: ```python >>> moves = env.possible_moves >>> env.render_moves(moves[10:12] + moves[16:18]) ``` ### API #### Initialize environment ```python >>> env = ChessEnv(player_color="WHITE", opponent="random", log=True, initial_state=DEFAULT_BOARD) ``` - `opponent`: can be `"random"`, `"none"` or a function. Tells the environment whether to use a bot that picks a random move, play against self or use a specific bot policy (default: `"random"`) - `log`: `True` or `False`, specifies whether to log every move and render every new state (default: `True`) - `initial_state`: initial board positions, the default value is the default chess starting board. You can specify a custom board. View scripts `gym_chess/test/` for some examples - `player_color`: `"WHITE"` or `"BLACK"`, only useful if playing against a bot (default: `"WHITE"`) ```python >>> env.get_possible_moves(state=state, player="WHITE", attack=False) ``` This method will calculate the possible moves. By default they are calculated at the current state for the current player (`state.current_player`). - `state`: (optional) state for which to calculate the moves - `player`: (optional) "WHITE" or "BLACK", specifies the player #### Move specification: Moves are encoded as either: - a tuple of coordinates `((from_x, from_y), (to_x, to_y))` - or a string e.g. `"CASTLE_KING_SIDE_WHITE"`, `"CASTLE_QUEEN_SIDE_BLACK"`, `"RESIGN"` Moves are pre-calculated for every new state and stored in `possible_moves`. #### Get State ```python >>> print(env.state['board']) ``` ```shell [[-3, -5, -4, -2, -1, -4, -5, -3], [-6, -6, -6, -6, -6, -6, -6, -6], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [6, 6, 6, 6, 6, 6, 6, 6], [3, 5, 4, 2, 1, 4, 5, 3]] ``` Every integer represents a piece. Positive pieces are white and negative ones are black. Piece IDs are stored in constants that can be imported. ```python from gym_chess.envs.chess import ( KING_ID, QUEEN_ID, ROOK_ID, BISHOP_ID, KNIGHT_ID, PAWN_ID, ) ``` The schema is: ```python EMPTY_SQUARE_ID = 0 KING_ID = 1 QUEEN_ID = 2 ROOK_ID = 3 BISHOP_ID = 4 KNIGHT_ID = 5 PAWN_ID = 6 ``` Additional information can be found in other attributes of the environment: ```python env.current_player env.white_king_castle_possible env.white_queen_castle_possible env.black_king_castle_possible env.black_queen_castle_possible env.white_king_on_the_board env.black_king_on_the_board ``` ![Fischer](https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Bobby_Fischer_1960_in_Leipzig_in_color.jpg/375px-Bobby_Fischer_1960_in_Leipzig_in_color.jpg) #### Notes: En-passant has not been implemented yet. ## Poker ### API #### Initialize environment ```python >>> env = PokerEnv(n_players=2, infinite_game=True) ``` - `n_players`: specify the number of players `2<=n_players<=9` (default: `2`) - `infinite_game`: `True` or `False`, specify if players get their starting credit back after each round (default: `True`) #### Set actions ```python >>> env.step(action) ``` - `action`: can be * `0` -> small blind * `1` -> big blind * `2` -> fold * `3` -> check * `4` -> bet * `5` -> call * `6` -> raise to 25 * `7` -> raise to 50 * `8` -> raise to 100 * `9` -> raise to 500 * `10` -> raise to 1000 * `11` -> all in ![alt text](https://media.wired.com/photos/5fbe703e534553a88817f988/master/w_640,c_limit/Sec_poker_914262206.jpg) ## Checkers ### API #### Initialize environment ```python >>> env = CheckersEnv() ``` #### Set actions ```python >>> env.step(action) ``` - `action`: mark a position, could be `0<=action<1024` To encode the coordinates use something like this: ```rust // positions -> [[from_row, from_col], [to_row, to_col]] fn positions_to_action(&self, positions: &Vec<BoardPosition>) -> usize { let from = positions[0].row * 4 + ( positions[0].column - if positions[0].row % 2 == 0 {0} else {1} ) / 2; let to = positions[1].row * 4 + ( positions[1].column - if positions[1].row % 2 == 0 {0} else {1} ) / 2; from * 32 + to } ``` #### Get State ```python >>> print(env.get_state()) // Return (current_player, board, is_game_over) ``` Board: ```shell [[0, 3, 0, 3, 0, 3, 0, 3], [3, 0, 3, 0, 3, 0, 3, 0], [0, 3, 0, 3, 0, 3, 0, 3], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 1, 0, 1, 0]] ``` Every integer represents a piece. Piece IDs: - `0`: empty - `1`: man_1 - `2`: king_1 - `3`: man_2 - `4`: king_2 #### Set State ```python >>> // state -> (current_player, board) >>> env.set_state(state) // Return observation ``` <img src="https://store-images.s-microsoft.com/image/apps.1041.14134228309561141.5f4fe955-4bb7-4a0f-b49f-b2ea0b47fc47.482176d1-bf5b-4691-97e2-3fc400c416c5" alt="gata" height="200"/> ## References - https://github.com/PyO3/maturin - https://github.com/genyrosk/gym-chess (Thanks to genyrosk for gym-chess) - https://github.com/deepmind/open_spiel ## Contrbutions Pull Request Are Welcomed! ## License [GPL-3.0](https://www.gnu.org/licenses/gpl-3.0.html) ## Social [Discord](https://zetiai.slack.com/archives/C01G0HRJWPK/p1637616128002100) [Twitter](https://twitter.com/ZetiAi) [Youtube](https://www.youtube.com/channel/UC4f1XKeAqBsTuKHXLywXpEQ) [Facebook](https://www.facebook.com/ZetiAI/)
zarena
/zarena-0.2.2.tar.gz/zarena-0.2.2/README.md
README.md
zarenacord =========== A modern, easy to use, feature-rich, and async ready API wrapper for Discord written in Python. | .. image:: https://discord.com/api/guilds/456574328990072838/embed.png :target: `Support Server`_ :alt: Discord server invite .. image:: https://img.shields.io/pypi/v/zarenacord.svg :target: https://pypi.python.org/pypi/zarenacord :alt: PyPI version info .. image:: https://img.shields.io/pypi/pyversions/zarenacord.svg :target: https://pypi.python.org/pypi/zarenacord :alt: PyPI supported Python versions .. image:: https://readthedocs.org/projects/zarenacord/badge/?version=latest :target: http://zarenacord.readthedocs.io/?badge=latest :alt: Documentation Status | **PLEASE NOTE: This is a fork of OG** |discord.py|_ **by** |Rapptz!|_ **Since Danny no longer maintains dpy so I created this lib in order to add any upcoming feature from Discord** **and I'm using** |Maya|_ **slash command wrapper for application commands.** Key Features ------------- - Modern Pythonic API using ``async`` and ``await``. - Proper rate limit handling. - Command extension to aid with bot creation - Easy to use with an object oriented design - 100% coverage of the supported Discord API. - Optimised in both speed and memory. Installing ---------- **Python 3.8 or higher is required** To install the library without full voice support, you can just run the following command: .. code:: sh # Linux/macOS python3 -m pip install -U zarenacord # Windows py -3 -m pip install -U zarenacord Otherwise to get voice support you should run the following command: .. code:: sh # Linux/macOS python3 -m pip install -U "zarenacord[voice]" # Windows py -3 -m pip install -U zarenacord[voice] To install the development version, do the following: .. code:: sh $ git clone https://github.com/Zarenalabs/zarenacord.git $ cd zarenacord $ python3 -m pip install -U .[voice] Optional Packages ~~~~~~~~~~~~~~~~~~ * `PyNaCl <https://pypi.org/project/PyNaCl/>`__ (for voice support) Please note that on Linux installing voice you must install the following packages via your favourite package manager (e.g. ``apt``, ``dnf``, etc) before running the above commands: * libffi-dev (or ``libffi-devel`` on some systems) * python-dev (e.g. ``python3.6-dev`` for Python 3.6) Quick Example -------------- .. code:: py import discord class MyClient(discord.Client): async def on_ready(self): print('Logged in as', self.user) async def on_message(self, message): # don't respond to ourselves if message.author == self.user: return if message.content == 'ping': await message.channel.send('pong') client = MyClient() client.run('token') Bot Example ~~~~~~~~~~~~~ .. code:: py import discord from discord.ext import commands bot = commands.Bot(command_prefix='!') @bot.command() async def ping(ctx): await ctx.send('pong') bot.run('token') Application Commands Example ----------------------------- ``zarena`` defines a bot subclass to automatically handle posting updated commands to discords api. This isn't required but highly recommended to use. .. code:: py class MyBot(zarena.Bot): def __init__(self): super().__init__(command_prefix="!") # command prefix only applies to message based commands self.load_extension("cogs.my_cog") # important! if __name__ == '__main__': MyBot().run("token") **Sample cog:** .. code:: py class MyCog(zarena.ApplicationCog): # slash command @zarena.slash_command() async def slash(self, ctx: zarena.Context, number: int): await ctx.send(f"You selected #{number}!", ephemeral=True) # message context menus @zarena.message_command(name="Quote") async def quote(self, ctx: zarena.Context, message: discord.Message): await ctx.send(f'> {message.clean_content}\n- {message.author}') # user context menus @zarena.user_command(name='Cookie') async def cookie(self, ctx: zarena.Context, user: discord.Member): await ctx.send(f'{ctx.author} gave cookie to {user} 🍪') Links ------ Documentation_ | `Support Server`_ | `Discord API`_ .. _Documentation: https://zarenacord.readthedocs.io/en/latest/index.html .. _`Support Server`: https://discord.gg/SwfNRrmr3p .. _`Discord API`: https://discord.gg/discord-api .. _discord.py: https://github.com/Rapptz/discord.py .. |discord.py| replace:: **discord.py** .. _Rapptz!: https://github.com/Rapptz .. |Rapptz!| replace:: **Rapptz!** .. _Maya: https://github.com/XuaTheGrate .. |Maya| replace:: **Maya's**
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/README.rst
README.rst
from __future__ import annotations import io import os from typing import Any, Literal, Optional, TYPE_CHECKING, Tuple, Union from .errors import DiscordException from .errors import InvalidArgument from . import utils import yarl __all__ = ( 'Asset', ) if TYPE_CHECKING: ValidStaticFormatTypes = Literal['webp', 'jpeg', 'jpg', 'png'] ValidAssetFormatTypes = Literal['webp', 'jpeg', 'jpg', 'png', 'gif'] VALID_STATIC_FORMATS = frozenset({"jpeg", "jpg", "webp", "png"}) VALID_ASSET_FORMATS = VALID_STATIC_FORMATS | {"gif"} MISSING = utils.MISSING class AssetMixin: url: str _state: Optional[Any] async def read(self) -> bytes: """|coro| Retrieves the content of this asset as a :class:`bytes` object. Raises ------ DiscordException There was no internal connection state. HTTPException Downloading the asset failed. NotFound The asset was deleted. Returns ------- :class:`bytes` The content of the asset. """ if self._state is None: raise DiscordException('Invalid state (no ConnectionState provided)') return await self._state.http.get_from_cdn(self.url) async def save(self, fp: Union[str, bytes, os.PathLike, io.BufferedIOBase], *, seek_begin: bool = True) -> int: """|coro| Saves this asset into a file-like object. Parameters ---------- fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`] The file-like object to save this attachment to or the filename to use. If a filename is passed then a file is created with that filename and used instead. seek_begin: :class:`bool` Whether to seek to the beginning of the file after saving is successfully done. Raises ------ DiscordException There was no internal connection state. HTTPException Downloading the asset failed. NotFound The asset was deleted. Returns -------- :class:`int` The number of bytes written. """ data = await self.read() if isinstance(fp, io.BufferedIOBase): written = fp.write(data) if seek_begin: fp.seek(0) return written else: with open(fp, 'wb') as f: return f.write(data) class Asset(AssetMixin): """Represents a CDN asset on Discord. .. container:: operations .. describe:: str(x) Returns the URL of the CDN asset. .. describe:: len(x) Returns the length of the CDN asset's URL. .. describe:: x == y Checks if the asset is equal to another asset. .. describe:: x != y Checks if the asset is not equal to another asset. .. describe:: hash(x) Returns the hash of the asset. """ __slots__: Tuple[str, ...] = ( '_state', '_url', '_animated', '_key', ) BASE = 'https://cdn.discordapp.com' def __init__(self, state, *, url: str, key: str, animated: bool = False): self._state = state self._url = url self._animated = animated self._key = key @classmethod def _from_default_avatar(cls, state, index: int) -> Asset: return cls( state, url=f'{cls.BASE}/embed/avatars/{index}.png', key=str(index), animated=False, ) @classmethod def _from_avatar(cls, state, user_id: int, avatar: str) -> Asset: animated = avatar.startswith('a_') format = 'gif' if animated else 'png' return cls( state, url=f'{cls.BASE}/avatars/{user_id}/{avatar}.{format}?size=1024', key=avatar, animated=animated, ) @classmethod def _from_guild_avatar(cls, state, guild_id: int, member_id: int, avatar: str) -> Asset: animated = avatar.startswith('a_') format = 'gif' if animated else 'png' return cls( state, url=f"{cls.BASE}/guilds/{guild_id}/users/{member_id}/avatars/{avatar}.{format}?size=1024", key=avatar, animated=animated, ) @classmethod def _from_icon(cls, state, object_id: int, icon_hash: str, path: str) -> Asset: return cls( state, url=f'{cls.BASE}/{path}-icons/{object_id}/{icon_hash}.png?size=1024', key=icon_hash, animated=False, ) @classmethod def _from_cover_image(cls, state, object_id: int, cover_image_hash: str) -> Asset: return cls( state, url=f'{cls.BASE}/app-assets/{object_id}/store/{cover_image_hash}.png?size=1024', key=cover_image_hash, animated=False, ) @classmethod def _from_guild_image(cls, state, guild_id: int, image: str, path: str) -> Asset: return cls( state, url=f'{cls.BASE}/{path}/{guild_id}/{image}.png?size=1024', key=image, animated=False, ) @classmethod def _from_guild_icon(cls, state, guild_id: int, icon_hash: str) -> Asset: animated = icon_hash.startswith('a_') format = 'gif' if animated else 'png' return cls( state, url=f'{cls.BASE}/icons/{guild_id}/{icon_hash}.{format}?size=1024', key=icon_hash, animated=animated, ) @classmethod def _from_sticker_banner(cls, state, banner: int) -> Asset: return cls( state, url=f'{cls.BASE}/app-assets/710982414301790216/store/{banner}.png', key=str(banner), animated=False, ) @classmethod def _from_user_banner(cls, state, user_id: int, banner_hash: str) -> Asset: animated = banner_hash.startswith('a_') format = 'gif' if animated else 'png' return cls( state, url=f'{cls.BASE}/banners/{user_id}/{banner_hash}.{format}?size=512', key=banner_hash, animated=animated ) def __str__(self) -> str: return self._url def __len__(self) -> int: return len(self._url) def __repr__(self): shorten = self._url.replace(self.BASE, '') return f'<Asset url={shorten!r}>' def __eq__(self, other): return isinstance(other, Asset) and self._url == other._url def __hash__(self): return hash(self._url) @property def url(self) -> str: """:class:`str`: Returns the underlying URL of the asset.""" return self._url @property def key(self) -> str: """:class:`str`: Returns the identifying key of the asset.""" return self._key def is_animated(self) -> bool: """:class:`bool`: Returns whether the asset is animated.""" return self._animated def replace( self, *, size: int = MISSING, format: ValidAssetFormatTypes = MISSING, static_format: ValidStaticFormatTypes = MISSING, ) -> Asset: """Returns a new asset with the passed components replaced. Parameters ----------- size: :class:`int` The new size of the asset. format: :class:`str` The new format to change it to. Must be either 'webp', 'jpeg', 'jpg', 'png', or 'gif' if it's animated. static_format: :class:`str` The new format to change it to if the asset isn't animated. Must be either 'webp', 'jpeg', 'jpg', or 'png'. Raises ------- InvalidArgument An invalid size or format was passed. Returns -------- :class:`Asset` The newly updated asset. """ url = yarl.URL(self._url) path, _ = os.path.splitext(url.path) if format is not MISSING: if self._animated: if format not in VALID_ASSET_FORMATS: raise InvalidArgument(f'format must be one of {VALID_ASSET_FORMATS}') else: if format not in VALID_STATIC_FORMATS: raise InvalidArgument(f'format must be one of {VALID_STATIC_FORMATS}') url = url.with_path(f'{path}.{format}') if static_format is not MISSING and not self._animated: if static_format not in VALID_STATIC_FORMATS: raise InvalidArgument(f'static_format must be one of {VALID_STATIC_FORMATS}') url = url.with_path(f'{path}.{static_format}') if size is not MISSING: if not utils.valid_icon_size(size): raise InvalidArgument('size must be a power of 2 between 16 and 4096') url = url.with_query(size=size) else: url = url.with_query(url.raw_query_string) url = str(url) return Asset(state=self._state, url=url, key=self._key, animated=self._animated) def with_size(self, size: int, /) -> Asset: """Returns a new asset with the specified size. Parameters ------------ size: :class:`int` The new size of the asset. Raises ------- InvalidArgument The asset had an invalid size. Returns -------- :class:`Asset` The new updated asset. """ if not utils.valid_icon_size(size): raise InvalidArgument('size must be a power of 2 between 16 and 4096') url = str(yarl.URL(self._url).with_query(size=size)) return Asset(state=self._state, url=url, key=self._key, animated=self._animated) def with_format(self, format: ValidAssetFormatTypes, /) -> Asset: """Returns a new asset with the specified format. Parameters ------------ format: :class:`str` The new format of the asset. Raises ------- InvalidArgument The asset had an invalid format. Returns -------- :class:`Asset` The new updated asset. """ if self._animated: if format not in VALID_ASSET_FORMATS: raise InvalidArgument(f'format must be one of {VALID_ASSET_FORMATS}') else: if format not in VALID_STATIC_FORMATS: raise InvalidArgument(f'format must be one of {VALID_STATIC_FORMATS}') url = yarl.URL(self._url) path, _ = os.path.splitext(url.path) url = str(url.with_path(f'{path}.{format}').with_query(url.raw_query_string)) return Asset(state=self._state, url=url, key=self._key, animated=self._animated) def with_static_format(self, format: ValidStaticFormatTypes, /) -> Asset: """Returns a new asset with the specified static format. This only changes the format if the underlying asset is not animated. Otherwise, the asset is not changed. Parameters ------------ format: :class:`str` The new static format of the asset. Raises ------- InvalidArgument The asset had an invalid format. Returns -------- :class:`Asset` The new updated asset. """ if self._animated: return self return self.with_format(format)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/asset.py
asset.py
from __future__ import annotations from typing import List, Tuple, TypedDict, Any, TYPE_CHECKING, Callable, TypeVar, Literal, Optional, overload import array import ctypes import ctypes.util import logging import math import os.path import struct import sys from .errors import DiscordException, InvalidArgument if TYPE_CHECKING: T = TypeVar('T') BAND_CTL = Literal['narrow', 'medium', 'wide', 'superwide', 'full'] SIGNAL_CTL = Literal['auto', 'voice', 'music'] class BandCtl(TypedDict): narrow: int medium: int wide: int superwide: int full: int class SignalCtl(TypedDict): auto: int voice: int music: int __all__ = ( 'Encoder', 'OpusError', 'OpusNotLoaded', ) _log = logging.getLogger(__name__) c_int_ptr = ctypes.POINTER(ctypes.c_int) c_int16_ptr = ctypes.POINTER(ctypes.c_int16) c_float_ptr = ctypes.POINTER(ctypes.c_float) _lib = None class EncoderStruct(ctypes.Structure): pass class DecoderStruct(ctypes.Structure): pass EncoderStructPtr = ctypes.POINTER(EncoderStruct) DecoderStructPtr = ctypes.POINTER(DecoderStruct) ## Some constants from opus_defines.h # Error codes OK = 0 BAD_ARG = -1 # Encoder CTLs APPLICATION_AUDIO = 2049 APPLICATION_VOIP = 2048 APPLICATION_LOWDELAY = 2051 CTL_SET_BITRATE = 4002 CTL_SET_BANDWIDTH = 4008 CTL_SET_FEC = 4012 CTL_SET_PLP = 4014 CTL_SET_SIGNAL = 4024 # Decoder CTLs CTL_SET_GAIN = 4034 CTL_LAST_PACKET_DURATION = 4039 band_ctl: BandCtl = { 'narrow': 1101, 'medium': 1102, 'wide': 1103, 'superwide': 1104, 'full': 1105, } signal_ctl: SignalCtl = { 'auto': -1000, 'voice': 3001, 'music': 3002, } def _err_lt(result: int, func: Callable, args: List) -> int: if result < OK: _log.info('error has happened in %s', func.__name__) raise OpusError(result) return result def _err_ne(result: T, func: Callable, args: List) -> T: ret = args[-1]._obj if ret.value != OK: _log.info('error has happened in %s', func.__name__) raise OpusError(ret.value) return result # A list of exported functions. # The first argument is obviously the name. # The second one are the types of arguments it takes. # The third is the result type. # The fourth is the error handler. exported_functions: List[Tuple[Any, ...]] = [ # Generic ('opus_get_version_string', None, ctypes.c_char_p, None), ('opus_strerror', [ctypes.c_int], ctypes.c_char_p, None), # Encoder functions ('opus_encoder_get_size', [ctypes.c_int], ctypes.c_int, None), ('opus_encoder_create', [ctypes.c_int, ctypes.c_int, ctypes.c_int, c_int_ptr], EncoderStructPtr, _err_ne), ('opus_encode', [EncoderStructPtr, c_int16_ptr, ctypes.c_int, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int32, _err_lt), ('opus_encode_float', [EncoderStructPtr, c_float_ptr, ctypes.c_int, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int32, _err_lt), ('opus_encoder_ctl', None, ctypes.c_int32, _err_lt), ('opus_encoder_destroy', [EncoderStructPtr], None, None), # Decoder functions ('opus_decoder_get_size', [ctypes.c_int], ctypes.c_int, None), ('opus_decoder_create', [ctypes.c_int, ctypes.c_int, c_int_ptr], DecoderStructPtr, _err_ne), ('opus_decode', [DecoderStructPtr, ctypes.c_char_p, ctypes.c_int32, c_int16_ptr, ctypes.c_int, ctypes.c_int], ctypes.c_int, _err_lt), ('opus_decode_float', [DecoderStructPtr, ctypes.c_char_p, ctypes.c_int32, c_float_ptr, ctypes.c_int, ctypes.c_int], ctypes.c_int, _err_lt), ('opus_decoder_ctl', None, ctypes.c_int32, _err_lt), ('opus_decoder_destroy', [DecoderStructPtr], None, None), ('opus_decoder_get_nb_samples', [DecoderStructPtr, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int, _err_lt), # Packet functions ('opus_packet_get_bandwidth', [ctypes.c_char_p], ctypes.c_int, _err_lt), ('opus_packet_get_nb_channels', [ctypes.c_char_p], ctypes.c_int, _err_lt), ('opus_packet_get_nb_frames', [ctypes.c_char_p, ctypes.c_int], ctypes.c_int, _err_lt), ('opus_packet_get_samples_per_frame', [ctypes.c_char_p, ctypes.c_int], ctypes.c_int, _err_lt), ] def libopus_loader(name: str) -> Any: # create the library... lib = ctypes.cdll.LoadLibrary(name) # register the functions... for item in exported_functions: func = getattr(lib, item[0]) try: if item[1]: func.argtypes = item[1] func.restype = item[2] except KeyError: pass try: if item[3]: func.errcheck = item[3] except KeyError: _log.exception("Error assigning check function to %s", func) return lib def _load_default() -> bool: global _lib try: if sys.platform == 'win32': _basedir = os.path.dirname(os.path.abspath(__file__)) _bitness = struct.calcsize('P') * 8 _target = 'x64' if _bitness > 32 else 'x86' _filename = os.path.join(_basedir, 'bin', f'libopus-0.{_target}.dll') _lib = libopus_loader(_filename) else: _lib = libopus_loader(ctypes.util.find_library('opus')) except Exception: _lib = None return _lib is not None def load_opus(name: str) -> None: """Loads the libopus shared library for use with voice. If this function is not called then the library uses the function :func:`ctypes.util.find_library` and then loads that one if available. Not loading a library and attempting to use PCM based AudioSources will lead to voice not working. This function propagates the exceptions thrown. .. warning:: The bitness of the library must match the bitness of your python interpreter. If the library is 64-bit then your python interpreter must be 64-bit as well. Usually if there's a mismatch in bitness then the load will throw an exception. .. note:: On Windows, this function should not need to be called as the binaries are automatically loaded. .. note:: On Windows, the .dll extension is not necessary. However, on Linux the full extension is required to load the library, e.g. ``libopus.so.1``. On Linux however, :func:`ctypes.util.find_library` will usually find the library automatically without you having to call this. Parameters ---------- name: :class:`str` The filename of the shared library. """ global _lib _lib = libopus_loader(name) def is_loaded() -> bool: """Function to check if opus lib is successfully loaded either via the :func:`ctypes.util.find_library` call of :func:`load_opus`. This must return ``True`` for voice to work. Returns ------- :class:`bool` Indicates if the opus library has been loaded. """ global _lib return _lib is not None class OpusError(DiscordException): """An exception that is thrown for libopus related errors. Attributes ---------- code: :class:`int` The error code returned. """ def __init__(self, code: int): self.code: int = code msg = _lib.opus_strerror(self.code).decode('utf-8') _log.info('"%s" has happened', msg) super().__init__(msg) class OpusNotLoaded(DiscordException): """An exception that is thrown for when libopus is not loaded.""" pass class _OpusStruct: SAMPLING_RATE = 48000 CHANNELS = 2 FRAME_LENGTH = 20 # in milliseconds SAMPLE_SIZE = struct.calcsize('h') * CHANNELS SAMPLES_PER_FRAME = int(SAMPLING_RATE / 1000 * FRAME_LENGTH) FRAME_SIZE = SAMPLES_PER_FRAME * SAMPLE_SIZE @staticmethod def get_opus_version() -> str: if not is_loaded() and not _load_default(): raise OpusNotLoaded() return _lib.opus_get_version_string().decode('utf-8') class Encoder(_OpusStruct): def __init__(self, application: int = APPLICATION_AUDIO): _OpusStruct.get_opus_version() self.application: int = application self._state: EncoderStruct = self._create_state() self.set_bitrate(128) self.set_fec(True) self.set_expected_packet_loss_percent(0.15) self.set_bandwidth('full') self.set_signal_type('auto') def __del__(self) -> None: if hasattr(self, '_state'): _lib.opus_encoder_destroy(self._state) # This is a destructor, so it's okay to assign None self._state = None # type: ignore def _create_state(self) -> EncoderStruct: ret = ctypes.c_int() return _lib.opus_encoder_create(self.SAMPLING_RATE, self.CHANNELS, self.application, ctypes.byref(ret)) def set_bitrate(self, kbps: int) -> int: kbps = min(512, max(16, int(kbps))) _lib.opus_encoder_ctl(self._state, CTL_SET_BITRATE, kbps * 1024) return kbps def set_bandwidth(self, req: BAND_CTL) -> None: if req not in band_ctl: raise KeyError(f'{req!r} is not a valid bandwidth setting. Try one of: {",".join(band_ctl)}') k = band_ctl[req] _lib.opus_encoder_ctl(self._state, CTL_SET_BANDWIDTH, k) def set_signal_type(self, req: SIGNAL_CTL) -> None: if req not in signal_ctl: raise KeyError(f'{req!r} is not a valid bandwidth setting. Try one of: {",".join(signal_ctl)}') k = signal_ctl[req] _lib.opus_encoder_ctl(self._state, CTL_SET_SIGNAL, k) def set_fec(self, enabled: bool = True) -> None: _lib.opus_encoder_ctl(self._state, CTL_SET_FEC, 1 if enabled else 0) def set_expected_packet_loss_percent(self, percentage: float) -> None: _lib.opus_encoder_ctl(self._state, CTL_SET_PLP, min(100, max(0, int(percentage * 100)))) # type: ignore def encode(self, pcm: bytes, frame_size: int) -> bytes: max_data_bytes = len(pcm) # bytes can be used to reference pointer pcm_ptr = ctypes.cast(pcm, c_int16_ptr) # type: ignore data = (ctypes.c_char * max_data_bytes)() ret = _lib.opus_encode(self._state, pcm_ptr, frame_size, data, max_data_bytes) # array can be initialized with bytes but mypy doesn't know return array.array('b', data[:ret]).tobytes() # type: ignore class Decoder(_OpusStruct): def __init__(self): _OpusStruct.get_opus_version() self._state: DecoderStruct = self._create_state() def __del__(self) -> None: if hasattr(self, '_state'): _lib.opus_decoder_destroy(self._state) # This is a destructor, so it's okay to assign None self._state = None # type: ignore def _create_state(self) -> DecoderStruct: ret = ctypes.c_int() return _lib.opus_decoder_create(self.SAMPLING_RATE, self.CHANNELS, ctypes.byref(ret)) @staticmethod def packet_get_nb_frames(data: bytes) -> int: """Gets the number of frames in an Opus packet""" return _lib.opus_packet_get_nb_frames(data, len(data)) @staticmethod def packet_get_nb_channels(data: bytes) -> int: """Gets the number of channels in an Opus packet""" return _lib.opus_packet_get_nb_channels(data) @classmethod def packet_get_samples_per_frame(cls, data: bytes) -> int: """Gets the number of samples per frame from an Opus packet""" return _lib.opus_packet_get_samples_per_frame(data, cls.SAMPLING_RATE) def _set_gain(self, adjustment: int) -> int: """Configures decoder gain adjustment. Scales the decoded output by a factor specified in Q8 dB units. This has a maximum range of -32768 to 32767 inclusive, and returns OPUS_BAD_ARG (-1) otherwise. The default is zero indicating no adjustment. This setting survives decoder reset (irrelevant for now). gain = 10**x/(20.0*256) (from opus_defines.h) """ return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment) def set_gain(self, dB: float) -> int: """Sets the decoder gain in dB, from -128 to 128.""" dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8) return self._set_gain(dB_Q8) def set_volume(self, mult: float) -> int: """Sets the output volume as a float percent, i.e. 0.5 for 50%, 1.75 for 175%, etc.""" return self.set_gain(20 * math.log10(mult)) # amplitude ratio def _get_last_packet_duration(self) -> int: """Gets the duration (in samples) of the last packet successfully decoded or concealed.""" ret = ctypes.c_int32() _lib.opus_decoder_ctl(self._state, CTL_LAST_PACKET_DURATION, ctypes.byref(ret)) return ret.value @overload def decode(self, data: bytes, *, fec: bool) -> bytes: ... @overload def decode(self, data: Literal[None], *, fec: Literal[False]) -> bytes: ... def decode(self, data: Optional[bytes], *, fec: bool = False) -> bytes: if data is None and fec: raise InvalidArgument("Invalid arguments: FEC cannot be used with null data") if data is None: frame_size = self._get_last_packet_duration() or self.SAMPLES_PER_FRAME channel_count = self.CHANNELS else: frames = self.packet_get_nb_frames(data) channel_count = self.packet_get_nb_channels(data) samples_per_frame = self.packet_get_samples_per_frame(data) frame_size = frames * samples_per_frame pcm = (ctypes.c_int16 * (frame_size * channel_count))() pcm_ptr = ctypes.cast(pcm, c_int16_ptr) ret = _lib.opus_decode(self._state, data, len(data) if data else 0, pcm_ptr, frame_size, fec) return array.array('h', pcm[:ret * channel_count]).tobytes()
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/opus.py
opus.py
import argparse import sys from pathlib import Path import discord import pkg_resources import aiohttp import platform def show_version(): entries = [] entries.append('- Python v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}'.format(sys.version_info)) version_info = discord.version_info entries.append('- discord.py v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}'.format(version_info)) if version_info.releaselevel != 'final': pkg = pkg_resources.get_distribution('discord.py') if pkg: entries.append(f' - discord.py pkg_resources: v{pkg.version}') entries.append(f'- aiohttp v{aiohttp.__version__}') uname = platform.uname() entries.append('- system info: {0.system} {0.release} {0.version}'.format(uname)) print('\n'.join(entries)) def core(parser, args): if args.version: show_version() _bot_template = """#!/usr/bin/env python3 from discord.ext import commands import discord import config class Bot(commands.{base}): def __init__(self, **kwargs): super().__init__(command_prefix=commands.when_mentioned_or('{prefix}'), **kwargs) for cog in config.cogs: try: self.load_extension(cog) except Exception as exc: print(f'Could not load extension {{cog}} due to {{exc.__class__.__name__}}: {{exc}}') async def on_ready(self): print(f'Logged on as {{self.user}} (ID: {{self.user.id}})') bot = Bot() # write general commands here bot.run(config.token) """ _gitignore_template = """# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # Our configuration files config.py """ _cog_template = '''from discord.ext import commands import discord class {name}(commands.Cog{attrs}): """The description for {name} goes here.""" def __init__(self, bot): self.bot = bot {extra} def setup(bot): bot.add_cog({name}(bot)) ''' _cog_extras = ''' def cog_unload(self): # clean up logic goes here pass async def cog_check(self, ctx): # checks that apply to every command in here return True async def bot_check(self, ctx): # checks that apply to every command to the bot return True async def bot_check_once(self, ctx): # check that apply to every command but is guaranteed to be called only once return True async def cog_command_error(self, ctx, error): # error handling to every command in here pass async def cog_before_invoke(self, ctx): # called before a command is called here pass async def cog_after_invoke(self, ctx): # called after a command is called here pass ''' # certain file names and directory names are forbidden # see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247%28v=vs.85%29.aspx # although some of this doesn't apply to Linux, we might as well be consistent _base_table = { '<': '-', '>': '-', ':': '-', '"': '-', # '/': '-', these are fine # '\\': '-', '|': '-', '?': '-', '*': '-', } # NUL (0) and 1-31 are disallowed _base_table.update((chr(i), None) for i in range(32)) _translation_table = str.maketrans(_base_table) def to_path(parser, name, *, replace_spaces=False): if isinstance(name, Path): return name if sys.platform == 'win32': forbidden = ('CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', \ 'COM8', 'COM9', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9') if len(name) <= 4 and name.upper() in forbidden: parser.error('invalid directory name given, use a different one') name = name.translate(_translation_table) if replace_spaces: name = name.replace(' ', '-') return Path(name) def newbot(parser, args): new_directory = to_path(parser, args.directory) / to_path(parser, args.name) # as a note exist_ok for Path is a 3.5+ only feature # since we already checked above that we're >3.5 try: new_directory.mkdir(exist_ok=True, parents=True) except OSError as exc: parser.error(f'could not create our bot directory ({exc})') cogs = new_directory / 'cogs' try: cogs.mkdir(exist_ok=True) init = cogs / '__init__.py' init.touch() except OSError as exc: print(f'warning: could not create cogs directory ({exc})') try: with open(str(new_directory / 'config.py'), 'w', encoding='utf-8') as fp: fp.write('token = "place your token here"\ncogs = []\n') except OSError as exc: parser.error(f'could not create config file ({exc})') try: with open(str(new_directory / 'bot.py'), 'w', encoding='utf-8') as fp: base = 'Bot' if not args.sharded else 'AutoShardedBot' fp.write(_bot_template.format(base=base, prefix=args.prefix)) except OSError as exc: parser.error(f'could not create bot file ({exc})') if not args.no_git: try: with open(str(new_directory / '.gitignore'), 'w', encoding='utf-8') as fp: fp.write(_gitignore_template) except OSError as exc: print(f'warning: could not create .gitignore file ({exc})') print('successfully made bot at', new_directory) def newcog(parser, args): cog_dir = to_path(parser, args.directory) try: cog_dir.mkdir(exist_ok=True) except OSError as exc: print(f'warning: could not create cogs directory ({exc})') directory = cog_dir / to_path(parser, args.name) directory = directory.with_suffix('.py') try: with open(str(directory), 'w', encoding='utf-8') as fp: attrs = '' extra = _cog_extras if args.full else '' if args.class_name: name = args.class_name else: name = str(directory.stem) if '-' in name or '_' in name: translation = str.maketrans('-_', ' ') name = name.translate(translation).title().replace(' ', '') else: name = name.title() if args.display_name: attrs += f', name="{args.display_name}"' if args.hide_commands: attrs += ', command_attrs=dict(hidden=True)' fp.write(_cog_template.format(name=name, extra=extra, attrs=attrs)) except OSError as exc: parser.error(f'could not create cog file ({exc})') else: print('successfully made cog at', directory) def add_newbot_args(subparser): parser = subparser.add_parser('newbot', help='creates a command bot project quickly') parser.set_defaults(func=newbot) parser.add_argument('name', help='the bot project name') parser.add_argument('directory', help='the directory to place it in (default: .)', nargs='?', default=Path.cwd()) parser.add_argument('--prefix', help='the bot prefix (default: $)', default='$', metavar='<prefix>') parser.add_argument('--sharded', help='whether to use AutoShardedBot', action='store_true') parser.add_argument('--no-git', help='do not create a .gitignore file', action='store_true', dest='no_git') def add_newcog_args(subparser): parser = subparser.add_parser('newcog', help='creates a new cog template quickly') parser.set_defaults(func=newcog) parser.add_argument('name', help='the cog name') parser.add_argument('directory', help='the directory to place it in (default: cogs)', nargs='?', default=Path('cogs')) parser.add_argument('--class-name', help='the class name of the cog (default: <name>)', dest='class_name') parser.add_argument('--display-name', help='the cog name (default: <name>)') parser.add_argument('--hide-commands', help='whether to hide all commands in the cog', action='store_true') parser.add_argument('--full', help='add all special methods as well', action='store_true') def parse_args(): parser = argparse.ArgumentParser(prog='discord', description='Tools for helping with discord.py') parser.add_argument('-v', '--version', action='store_true', help='shows the library version') parser.set_defaults(func=core) subparser = parser.add_subparsers(dest='subcommand', title='subcommands') add_newbot_args(subparser) add_newcog_args(subparser) return parser, parser.parse_args() def main(): parser, args = parse_args() args.func(parser, args) if __name__ == '__main__': main()
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/__main__.py
__main__.py
from __future__ import annotations import datetime from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union, overload from .asset import Asset from .enums import ActivityType, try_enum from .colour import Colour from .partial_emoji import PartialEmoji from .utils import _get_as_snowflake __all__ = ( 'BaseActivity', 'Activity', 'Streaming', 'Game', 'Spotify', 'CustomActivity', ) """If curious, this is the current schema for an activity. It's fairly long so I will document it here: All keys are optional. state: str (max: 128), details: str (max: 128) timestamps: dict start: int (min: 1) end: int (min: 1) assets: dict large_image: str (max: 32) large_text: str (max: 128) small_image: str (max: 32) small_text: str (max: 128) party: dict id: str (max: 128), size: List[int] (max-length: 2) elem: int (min: 1) secrets: dict match: str (max: 128) join: str (max: 128) spectate: str (max: 128) instance: bool application_id: str name: str (max: 128) url: str type: int sync_id: str session_id: str flags: int buttons: list[dict] label: str (max: 32) url: str (max: 512) There are also activity flags which are mostly uninteresting for the library atm. t.ActivityFlags = { INSTANCE: 1, JOIN: 2, SPECTATE: 4, JOIN_REQUEST: 8, SYNC: 16, PLAY: 32 } """ if TYPE_CHECKING: from .types.activity import ( Activity as ActivityPayload, ActivityTimestamps, ActivityParty, ActivityAssets, ActivityButton, ) class BaseActivity: """The base activity that all user-settable activities inherit from. A user-settable activity is one that can be used in :meth:`Client.change_presence`. The following types currently count as user-settable: - :class:`Activity` - :class:`Game` - :class:`Streaming` - :class:`CustomActivity` Note that although these types are considered user-settable by the library, Discord typically ignores certain combinations of activity depending on what is currently set. This behaviour may change in the future so there are no guarantees on whether Discord will actually let you set these types. .. versionadded:: 1.3 """ __slots__ = ('_created_at',) def __init__(self, **kwargs): self._created_at: Optional[float] = kwargs.pop('created_at', None) @property def created_at(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: When the user started doing this activity in UTC. .. versionadded:: 1.3 """ if self._created_at is not None: return datetime.datetime.fromtimestamp(self._created_at / 1000, tz=datetime.timezone.utc) def to_dict(self) -> ActivityPayload: raise NotImplementedError class Activity(BaseActivity): """Represents an activity in Discord. This could be an activity such as streaming, playing, listening or watching. For memory optimisation purposes, some activities are offered in slimmed down versions: - :class:`Game` - :class:`Streaming` Attributes ------------ application_id: Optional[:class:`int`] The application ID of the game. name: Optional[:class:`str`] The name of the activity. url: Optional[:class:`str`] A stream URL that the activity could be doing. type: :class:`ActivityType` The type of activity currently being done. state: Optional[:class:`str`] The user's current state. For example, "In Game". details: Optional[:class:`str`] The detail of the user's current activity. timestamps: :class:`dict` A dictionary of timestamps. It contains the following optional keys: - ``start``: Corresponds to when the user started doing the activity in milliseconds since Unix epoch. - ``end``: Corresponds to when the user will finish doing the activity in milliseconds since Unix epoch. assets: :class:`dict` A dictionary representing the images and their hover text of an activity. It contains the following optional keys: - ``large_image``: A string representing the ID for the large image asset. - ``large_text``: A string representing the text when hovering over the large image asset. - ``small_image``: A string representing the ID for the small image asset. - ``small_text``: A string representing the text when hovering over the small image asset. party: :class:`dict` A dictionary representing the activity party. It contains the following optional keys: - ``id``: A string representing the party ID. - ``size``: A list of up to two integer elements denoting (current_size, maximum_size). buttons: List[:class:`dict`] An list of dictionaries representing custom buttons shown in a rich presence. Each dictionary contains the following keys: - ``label``: A string representing the text shown on the button. - ``url``: A string representing the URL opened upon clicking the button. .. versionadded:: 2.0 emoji: Optional[:class:`PartialEmoji`] The emoji that belongs to this activity. """ __slots__ = ( 'state', 'details', '_created_at', 'timestamps', 'assets', 'party', 'flags', 'sync_id', 'session_id', 'type', 'name', 'url', 'application_id', 'emoji', 'buttons', ) def __init__(self, **kwargs): super().__init__(**kwargs) self.state: Optional[str] = kwargs.pop('state', None) self.details: Optional[str] = kwargs.pop('details', None) self.timestamps: ActivityTimestamps = kwargs.pop('timestamps', {}) self.assets: ActivityAssets = kwargs.pop('assets', {}) self.party: ActivityParty = kwargs.pop('party', {}) self.application_id: Optional[int] = _get_as_snowflake(kwargs, 'application_id') self.name: Optional[str] = kwargs.pop('name', None) self.url: Optional[str] = kwargs.pop('url', None) self.flags: int = kwargs.pop('flags', 0) self.sync_id: Optional[str] = kwargs.pop('sync_id', None) self.session_id: Optional[str] = kwargs.pop('session_id', None) self.buttons: List[ActivityButton] = kwargs.pop('buttons', []) activity_type = kwargs.pop('type', -1) self.type: ActivityType = ( activity_type if isinstance(activity_type, ActivityType) else try_enum(ActivityType, activity_type) ) emoji = kwargs.pop('emoji', None) self.emoji: Optional[PartialEmoji] = PartialEmoji.from_dict(emoji) if emoji is not None else None def __repr__(self) -> str: attrs = ( ('type', self.type), ('name', self.name), ('url', self.url), ('details', self.details), ('application_id', self.application_id), ('session_id', self.session_id), ('emoji', self.emoji), ) inner = ' '.join('%s=%r' % t for t in attrs) return f'<Activity {inner}>' def to_dict(self) -> Dict[str, Any]: ret: Dict[str, Any] = {} for attr in self.__slots__: value = getattr(self, attr, None) if value is None: continue if isinstance(value, dict) and len(value) == 0: continue ret[attr] = value ret['type'] = int(self.type) if self.emoji: ret['emoji'] = self.emoji.to_dict() return ret @property def start(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: When the user started doing this activity in UTC, if applicable.""" try: timestamp = self.timestamps['start'] / 1000 except KeyError: return None else: return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) @property def end(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: When the user will stop doing this activity in UTC, if applicable.""" try: timestamp = self.timestamps['end'] / 1000 except KeyError: return None else: return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) @property def large_image_url(self) -> Optional[str]: """Optional[:class:`str`]: Returns a URL pointing to the large image asset of this activity if applicable.""" if self.application_id is None: return None try: large_image = self.assets['large_image'] except KeyError: return None else: return Asset.BASE + f'/app-assets/{self.application_id}/{large_image}.png' @property def small_image_url(self) -> Optional[str]: """Optional[:class:`str`]: Returns a URL pointing to the small image asset of this activity if applicable.""" if self.application_id is None: return None try: small_image = self.assets['small_image'] except KeyError: return None else: return Asset.BASE + f'/app-assets/{self.application_id}/{small_image}.png' @property def large_image_text(self) -> Optional[str]: """Optional[:class:`str`]: Returns the large image asset hover text of this activity if applicable.""" return self.assets.get('large_text', None) @property def small_image_text(self) -> Optional[str]: """Optional[:class:`str`]: Returns the small image asset hover text of this activity if applicable.""" return self.assets.get('small_text', None) class Game(BaseActivity): """A slimmed down version of :class:`Activity` that represents a Discord game. This is typically displayed via **Playing** on the official Discord client. .. container:: operations .. describe:: x == y Checks if two games are equal. .. describe:: x != y Checks if two games are not equal. .. describe:: hash(x) Returns the game's hash. .. describe:: str(x) Returns the game's name. Parameters ----------- name: :class:`str` The game's name. Attributes ----------- name: :class:`str` The game's name. """ __slots__ = ('name', '_end', '_start') def __init__(self, name: str, **extra): super().__init__(**extra) self.name: str = name try: timestamps: ActivityTimestamps = extra['timestamps'] except KeyError: self._start = 0 self._end = 0 else: self._start = timestamps.get('start', 0) self._end = timestamps.get('end', 0) @property def type(self) -> ActivityType: """:class:`ActivityType`: Returns the game's type. This is for compatibility with :class:`Activity`. It always returns :attr:`ActivityType.playing`. """ return ActivityType.playing @property def start(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: When the user started playing this game in UTC, if applicable.""" if self._start: return datetime.datetime.fromtimestamp(self._start / 1000, tz=datetime.timezone.utc) return None @property def end(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: When the user will stop playing this game in UTC, if applicable.""" if self._end: return datetime.datetime.fromtimestamp(self._end / 1000, tz=datetime.timezone.utc) return None def __str__(self) -> str: return str(self.name) def __repr__(self) -> str: return f'<Game name={self.name!r}>' def to_dict(self) -> Dict[str, Any]: timestamps: Dict[str, Any] = {} if self._start: timestamps['start'] = self._start if self._end: timestamps['end'] = self._end # fmt: off return { 'type': ActivityType.playing.value, 'name': str(self.name), 'timestamps': timestamps } # fmt: on def __eq__(self, other: Any) -> bool: return isinstance(other, Game) and other.name == self.name def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __hash__(self) -> int: return hash(self.name) class Streaming(BaseActivity): """A slimmed down version of :class:`Activity` that represents a Discord streaming status. This is typically displayed via **Streaming** on the official Discord client. .. container:: operations .. describe:: x == y Checks if two streams are equal. .. describe:: x != y Checks if two streams are not equal. .. describe:: hash(x) Returns the stream's hash. .. describe:: str(x) Returns the stream's name. Attributes ----------- platform: Optional[:class:`str`] Where the user is streaming from (ie. YouTube, Twitch). .. versionadded:: 1.3 name: Optional[:class:`str`] The stream's name. details: Optional[:class:`str`] An alias for :attr:`name` game: Optional[:class:`str`] The game being streamed. .. versionadded:: 1.3 url: :class:`str` The stream's URL. assets: :class:`dict` A dictionary comprising of similar keys than those in :attr:`Activity.assets`. """ __slots__ = ('platform', 'name', 'game', 'url', 'details', 'assets') def __init__(self, *, name: Optional[str], url: str, **extra: Any): super().__init__(**extra) self.platform: Optional[str] = name self.name: Optional[str] = extra.pop('details', name) self.game: Optional[str] = extra.pop('state', None) self.url: str = url self.details: Optional[str] = extra.pop('details', self.name) # compatibility self.assets: ActivityAssets = extra.pop('assets', {}) @property def type(self) -> ActivityType: """:class:`ActivityType`: Returns the game's type. This is for compatibility with :class:`Activity`. It always returns :attr:`ActivityType.streaming`. """ return ActivityType.streaming def __str__(self) -> str: return str(self.name) def __repr__(self) -> str: return f'<Streaming name={self.name!r}>' @property def twitch_name(self): """Optional[:class:`str`]: If provided, the twitch name of the user streaming. This corresponds to the ``large_image`` key of the :attr:`Streaming.assets` dictionary if it starts with ``twitch:``. Typically set by the Discord client. """ try: name = self.assets['large_image'] except KeyError: return None else: return name[7:] if name[:7] == 'twitch:' else None def to_dict(self) -> Dict[str, Any]: # fmt: off ret: Dict[str, Any] = { 'type': ActivityType.streaming.value, 'name': str(self.name), 'url': str(self.url), 'assets': self.assets } # fmt: on if self.details: ret['details'] = self.details return ret def __eq__(self, other: Any) -> bool: return isinstance(other, Streaming) and other.name == self.name and other.url == self.url def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __hash__(self) -> int: return hash(self.name) class Spotify: """Represents a Spotify listening activity from Discord. This is a special case of :class:`Activity` that makes it easier to work with the Spotify integration. .. container:: operations .. describe:: x == y Checks if two activities are equal. .. describe:: x != y Checks if two activities are not equal. .. describe:: hash(x) Returns the activity's hash. .. describe:: str(x) Returns the string 'Spotify'. """ __slots__ = ('_state', '_details', '_timestamps', '_assets', '_party', '_sync_id', '_session_id', '_created_at') def __init__(self, **data): self._state: str = data.pop('state', '') self._details: str = data.pop('details', '') self._timestamps: Dict[str, int] = data.pop('timestamps', {}) self._assets: ActivityAssets = data.pop('assets', {}) self._party: ActivityParty = data.pop('party', {}) self._sync_id: str = data.pop('sync_id') self._session_id: str = data.pop('session_id') self._created_at: Optional[float] = data.pop('created_at', None) @property def type(self) -> ActivityType: """:class:`ActivityType`: Returns the activity's type. This is for compatibility with :class:`Activity`. It always returns :attr:`ActivityType.listening`. """ return ActivityType.listening @property def created_at(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: When the user started listening in UTC. .. versionadded:: 1.3 """ if self._created_at is not None: return datetime.datetime.fromtimestamp(self._created_at / 1000, tz=datetime.timezone.utc) @property def colour(self) -> Colour: """:class:`Colour`: Returns the Spotify integration colour, as a :class:`Colour`. There is an alias for this named :attr:`color`""" return Colour(0x1DB954) @property def color(self) -> Colour: """:class:`Colour`: Returns the Spotify integration colour, as a :class:`Colour`. There is an alias for this named :attr:`colour`""" return self.colour def to_dict(self) -> Dict[str, Any]: return { 'flags': 48, # SYNC | PLAY 'name': 'Spotify', 'assets': self._assets, 'party': self._party, 'sync_id': self._sync_id, 'session_id': self._session_id, 'timestamps': self._timestamps, 'details': self._details, 'state': self._state, } @property def name(self) -> str: """:class:`str`: The activity's name. This will always return "Spotify".""" return 'Spotify' def __eq__(self, other: Any) -> bool: return ( isinstance(other, Spotify) and other._session_id == self._session_id and other._sync_id == self._sync_id and other.start == self.start ) def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __hash__(self) -> int: return hash(self._session_id) def __str__(self) -> str: return 'Spotify' def __repr__(self) -> str: return f'<Spotify title={self.title!r} artist={self.artist!r} track_id={self.track_id!r}>' @property def title(self) -> str: """:class:`str`: The title of the song being played.""" return self._details @property def artists(self) -> List[str]: """List[:class:`str`]: The artists of the song being played.""" return self._state.split('; ') @property def artist(self) -> str: """:class:`str`: The artist of the song being played. This does not attempt to split the artist information into multiple artists. Useful if there's only a single artist. """ return self._state @property def album(self) -> str: """:class:`str`: The album that the song being played belongs to.""" return self._assets.get('large_text', '') @property def album_cover_url(self) -> str: """:class:`str`: The album cover image URL from Spotify's CDN.""" large_image = self._assets.get('large_image', '') if large_image[:8] != 'spotify:': return '' album_image_id = large_image[8:] return 'https://i.scdn.co/image/' + album_image_id @property def track_id(self) -> str: """:class:`str`: The track ID used by Spotify to identify this song.""" return self._sync_id @property def track_url(self) -> str: """:class:`str`: The track URL to listen on Spotify. .. versionadded:: 2.0 """ return f'https://open.spotify.com/track/{self.track_id}' @property def start(self) -> datetime.datetime: """:class:`datetime.datetime`: When the user started playing this song in UTC.""" return datetime.datetime.fromtimestamp(self._timestamps['start'] / 1000, tz=datetime.timezone.utc) @property def end(self) -> datetime.datetime: """:class:`datetime.datetime`: When the user will stop playing this song in UTC.""" return datetime.datetime.fromtimestamp(self._timestamps['end'] / 1000, tz=datetime.timezone.utc) @property def duration(self) -> datetime.timedelta: """:class:`datetime.timedelta`: The duration of the song being played.""" return self.end - self.start @property def party_id(self) -> str: """:class:`str`: The party ID of the listening party.""" return self._party.get('id', '') class CustomActivity(BaseActivity): """Represents a Custom activity from Discord. .. container:: operations .. describe:: x == y Checks if two activities are equal. .. describe:: x != y Checks if two activities are not equal. .. describe:: hash(x) Returns the activity's hash. .. describe:: str(x) Returns the custom status text. .. versionadded:: 1.3 Attributes ----------- name: Optional[:class:`str`] The custom activity's name. emoji: Optional[:class:`PartialEmoji`] The emoji to pass to the activity, if any. """ __slots__ = ('name', 'emoji', 'state') def __init__(self, name: Optional[str], *, emoji: Optional[PartialEmoji] = None, **extra: Any): super().__init__(**extra) self.name: Optional[str] = name self.state: Optional[str] = extra.pop('state', None) if self.name == 'Custom Status': self.name = self.state self.emoji: Optional[PartialEmoji] if emoji is None: self.emoji = emoji elif isinstance(emoji, dict): self.emoji = PartialEmoji.from_dict(emoji) elif isinstance(emoji, str): self.emoji = PartialEmoji(name=emoji) elif isinstance(emoji, PartialEmoji): self.emoji = emoji else: raise TypeError(f'Expected str, PartialEmoji, or None, received {type(emoji)!r} instead.') @property def type(self) -> ActivityType: """:class:`ActivityType`: Returns the activity's type. This is for compatibility with :class:`Activity`. It always returns :attr:`ActivityType.custom`. """ return ActivityType.custom def to_dict(self) -> Dict[str, Any]: if self.name == self.state: o = { 'type': ActivityType.custom.value, 'state': self.name, 'name': 'Custom Status', } else: o = { 'type': ActivityType.custom.value, 'name': self.name, } if self.emoji: o['emoji'] = self.emoji.to_dict() return o def __eq__(self, other: Any) -> bool: return isinstance(other, CustomActivity) and other.name == self.name and other.emoji == self.emoji def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __hash__(self) -> int: return hash((self.name, str(self.emoji))) def __str__(self) -> str: if self.emoji: if self.name: return f'{self.emoji} {self.name}' return str(self.emoji) else: return str(self.name) def __repr__(self) -> str: return f'<CustomActivity name={self.name!r} emoji={self.emoji!r}>' ActivityTypes = Union[Activity, Game, CustomActivity, Streaming, Spotify] @overload def create_activity(data: ActivityPayload) -> ActivityTypes: ... @overload def create_activity(data: None) -> None: ... def create_activity(data: Optional[ActivityPayload]) -> Optional[ActivityTypes]: if not data: return None game_type = try_enum(ActivityType, data.get('type', -1)) if game_type is ActivityType.playing: if 'application_id' in data or 'session_id' in data: return Activity(**data) return Game(**data) elif game_type is ActivityType.custom: try: name = data.pop('name') except KeyError: return Activity(**data) else: # we removed the name key from data already return CustomActivity(name=name, **data) # type: ignore elif game_type is ActivityType.streaming: if 'url' in data: # the url won't be None here return Streaming(**data) # type: ignore return Activity(**data) elif game_type is ActivityType.listening and 'sync_id' in data and 'session_id' in data: return Spotify(**data) return Activity(**data)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/activity.py
activity.py
from __future__ import annotations import threading import traceback import subprocess import audioop import asyncio import logging import shlex import time import json import sys import re import io from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union from .errors import ClientException from .opus import Encoder as OpusEncoder from .oggparse import OggStream from .utils import MISSING if TYPE_CHECKING: from .voice_client import VoiceClient AT = TypeVar('AT', bound='AudioSource') FT = TypeVar('FT', bound='FFmpegOpusAudio') _log = logging.getLogger(__name__) __all__ = ( 'AudioSource', 'PCMAudio', 'FFmpegAudio', 'FFmpegPCMAudio', 'FFmpegOpusAudio', 'PCMVolumeTransformer', ) CREATE_NO_WINDOW: int if sys.platform != 'win32': CREATE_NO_WINDOW = 0 else: CREATE_NO_WINDOW = 0x08000000 class AudioSource: """Represents an audio stream. The audio stream can be Opus encoded or not, however if the audio stream is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM. .. warning:: The audio source reads are done in a separate thread. """ def read(self) -> bytes: """Reads 20ms worth of audio. Subclasses must implement this. If the audio is complete, then returning an empty :term:`py:bytes-like object` to signal this is the way to do so. If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return 20ms worth of Opus encoded audio. Otherwise, it must be 20ms worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes per frame (20ms worth of audio). Returns -------- :class:`bytes` A bytes like object that represents the PCM or Opus data. """ raise NotImplementedError def is_opus(self) -> bool: """Checks if the audio source is already encoded in Opus.""" return False def cleanup(self) -> None: """Called when clean-up is needed to be done. Useful for clearing buffer data or processes after it is done playing audio. """ pass def __del__(self) -> None: self.cleanup() class PCMAudio(AudioSource): """Represents raw 16-bit 48KHz stereo PCM audio source. Attributes ----------- stream: :term:`py:file object` A file-like object that reads byte data representing raw PCM. """ def __init__(self, stream: io.BufferedIOBase) -> None: self.stream: io.BufferedIOBase = stream def read(self) -> bytes: ret = self.stream.read(OpusEncoder.FRAME_SIZE) if len(ret) != OpusEncoder.FRAME_SIZE: return b'' return ret class FFmpegAudio(AudioSource): """Represents an FFmpeg (or AVConv) based AudioSource. User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and :class:`FFmpegOpusAudio` work should subclass this. .. versionadded:: 1.3 """ def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any): piping = subprocess_kwargs.get('stdin') == subprocess.PIPE if piping and isinstance(source, str): raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin") args = [executable, *args] kwargs = {'stdout': subprocess.PIPE} kwargs.update(subprocess_kwargs) self._process: subprocess.Popen = self._spawn_process(args, **kwargs) self._stdout: IO[bytes] = self._process.stdout # type: ignore self._stdin: Optional[IO[Bytes]] = None self._pipe_thread: Optional[threading.Thread] = None if piping: n = f'popen-stdin-writer:{id(self):#x}' self._stdin = self._process.stdin self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n) self._pipe_thread.start() def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen: process = None try: process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs) except FileNotFoundError: executable = args.partition(' ')[0] if isinstance(args, str) else args[0] raise ClientException(executable + ' was not found.') from None except subprocess.SubprocessError as exc: raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc else: return process def _kill_process(self) -> None: proc = self._process if proc is MISSING: return _log.info('Preparing to terminate ffmpeg process %s.', proc.pid) try: proc.kill() except Exception: _log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid) if proc.poll() is None: _log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid) proc.communicate() _log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode) else: _log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode) def _pipe_writer(self, source: io.BufferedIOBase) -> None: while self._process: # arbitrarily large read size data = source.read(8192) if not data: self._process.terminate() return try: self._stdin.write(data) except Exception: _log.debug('Write error for %s, this is probably not a problem', self, exc_info=True) # at this point the source data is either exhausted or the process is fubar self._process.terminate() return def cleanup(self) -> None: self._kill_process() self._process = self._stdout = self._stdin = MISSING class FFmpegPCMAudio(FFmpegAudio): """An audio source from FFmpeg (or AVConv). This launches a sub-process to a specific input file given. .. warning:: You must have the ffmpeg or avconv executable in your path environment variable in order for this to work. Parameters ------------ source: Union[:class:`str`, :class:`io.BufferedIOBase`] The input that ffmpeg will take and convert to PCM bytes. If ``pipe`` is ``True`` then this is a file-like object that is passed to the stdin of ffmpeg. executable: :class:`str` The executable name (and path) to use. Defaults to ``ffmpeg``. pipe: :class:`bool` If ``True``, denotes that ``source`` parameter will be passed to the stdin of ffmpeg. Defaults to ``False``. stderr: Optional[:term:`py:file object`] A file-like object to pass to the Popen constructor. Could also be an instance of ``subprocess.PIPE``. before_options: Optional[:class:`str`] Extra command line arguments to pass to ffmpeg before the ``-i`` flag. options: Optional[:class:`str`] Extra command line arguments to pass to ffmpeg after the ``-i`` flag. Raises -------- ClientException The subprocess failed to be created. """ def __init__( self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', pipe: bool = False, stderr: Optional[IO[str]] = None, before_options: Optional[str] = None, options: Optional[str] = None ) -> None: args = [] subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr} if isinstance(before_options, str): args.extend(shlex.split(before_options)) args.append('-i') args.append('-' if pipe else source) args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning')) if isinstance(options, str): args.extend(shlex.split(options)) args.append('pipe:1') super().__init__(source, executable=executable, args=args, **subprocess_kwargs) def read(self) -> bytes: ret = self._stdout.read(OpusEncoder.FRAME_SIZE) if len(ret) != OpusEncoder.FRAME_SIZE: return b'' return ret def is_opus(self) -> bool: return False class FFmpegOpusAudio(FFmpegAudio): """An audio source from FFmpeg (or AVConv). This launches a sub-process to a specific input file given. However, rather than producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to Opus, this class produces Opus packets, skipping the encoding step done by the library. Alternatively, instead of instantiating this class directly, you can use :meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This can be used to opportunistically skip pointless re-encoding of existing Opus audio data for a boost in performance at the cost of a short initial delay to gather the information. The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you know that the input source is Opus encoded beforehand. .. versionadded:: 1.3 .. warning:: You must have the ffmpeg or avconv executable in your path environment variable in order for this to work. Parameters ------------ source: Union[:class:`str`, :class:`io.BufferedIOBase`] The input that ffmpeg will take and convert to Opus bytes. If ``pipe`` is ``True`` then this is a file-like object that is passed to the stdin of ffmpeg. bitrate: :class:`int` The bitrate in kbps to encode the output to. Defaults to ``128``. codec: Optional[:class:`str`] The codec to use to encode the audio data. Normally this would be just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to opportunistically skip pointlessly re-encoding Opus audio data by passing ``copy`` as the codec value. Any values other than ``copy``, ``opus``, or ``libopus`` will be considered ``libopus``. Defaults to ``libopus``. .. warning:: Do not provide this parameter unless you are certain that the audio input is already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe` should be used to determine the proper value for this parameter. executable: :class:`str` The executable name (and path) to use. Defaults to ``ffmpeg``. pipe: :class:`bool` If ``True``, denotes that ``source`` parameter will be passed to the stdin of ffmpeg. Defaults to ``False``. stderr: Optional[:term:`py:file object`] A file-like object to pass to the Popen constructor. Could also be an instance of ``subprocess.PIPE``. before_options: Optional[:class:`str`] Extra command line arguments to pass to ffmpeg before the ``-i`` flag. options: Optional[:class:`str`] Extra command line arguments to pass to ffmpeg after the ``-i`` flag. Raises -------- ClientException The subprocess failed to be created. """ def __init__( self, source: Union[str, io.BufferedIOBase], *, bitrate: int = 128, codec: Optional[str] = None, executable: str = 'ffmpeg', pipe=False, stderr=None, before_options=None, options=None, ) -> None: args = [] subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr} if isinstance(before_options, str): args.extend(shlex.split(before_options)) args.append('-i') args.append('-' if pipe else source) codec = 'copy' if codec in ('opus', 'libopus') else 'libopus' args.extend(('-map_metadata', '-1', '-f', 'opus', '-c:a', codec, '-ar', '48000', '-ac', '2', '-b:a', f'{bitrate}k', '-loglevel', 'warning')) if isinstance(options, str): args.extend(shlex.split(options)) args.append('pipe:1') super().__init__(source, executable=executable, args=args, **subprocess_kwargs) self._packet_iter = OggStream(self._stdout).iter_packets() @classmethod async def from_probe( cls: Type[FT], source: str, *, method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None, **kwargs: Any, ) -> FT: """|coro| A factory method that creates a :class:`FFmpegOpusAudio` after probing the input source for audio codec and bitrate information. Examples ---------- Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: :: source = await discord.FFmpegOpusAudio.from_probe("song.webm") voice_client.play(source) If you are on Windows and don't have ffprobe installed, use the ``fallback`` method to probe using ffmpeg instead: :: source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback') voice_client.play(source) Using a custom method of determining codec and bitrate: :: def custom_probe(source, executable): # some analysis code here return codec, bitrate source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe) voice_client.play(source) Parameters ------------ source Identical to the ``source`` parameter for the constructor. method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]] The probing method used to determine bitrate and codec information. As a string, valid values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg (or avconv). As a callable, it must take two string arguments, ``source`` and ``executable``. Both parameters are the same values passed to this factory function. ``executable`` will default to ``ffmpeg`` if not provided as a keyword argument. kwargs The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor, excluding ``bitrate`` and ``codec``. Raises -------- AttributeError Invalid probe method, must be ``'native'`` or ``'fallback'``. TypeError Invalid value for ``probe`` parameter, must be :class:`str` or a callable. Returns -------- :class:`FFmpegOpusAudio` An instance of this class. """ executable = kwargs.get('executable') codec, bitrate = await cls.probe(source, method=method, executable=executable) return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore @classmethod async def probe( cls, source: str, *, method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None, executable: Optional[str] = None, ) -> Tuple[Optional[str], Optional[int]]: """|coro| Probes the input source for bitrate and codec information. Parameters ------------ source Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`. method Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`. executable: :class:`str` Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`. Raises -------- AttributeError Invalid probe method, must be ``'native'`` or ``'fallback'``. TypeError Invalid value for ``probe`` parameter, must be :class:`str` or a callable. Returns --------- Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]] A 2-tuple with the codec and bitrate of the input source. """ method = method or 'native' executable = executable or 'ffmpeg' probefunc = fallback = None if isinstance(method, str): probefunc = getattr(cls, '_probe_codec_' + method, None) if probefunc is None: raise AttributeError(f"Invalid probe method {method!r}") if probefunc is cls._probe_codec_native: fallback = cls._probe_codec_fallback elif callable(method): probefunc = method fallback = cls._probe_codec_fallback else: raise TypeError("Expected str or callable for parameter 'probe', " \ f"not '{method.__class__.__name__}'") codec = bitrate = None loop = asyncio.get_event_loop() try: codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore except Exception: if not fallback: _log.exception("Probe '%s' using '%s' failed", method, executable) return # type: ignore _log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable) try: codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore except Exception: _log.exception("Fallback probe using '%s' failed", executable) else: _log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate) else: _log.info("Probe found codec=%s, bitrate=%s", codec, bitrate) finally: return codec, bitrate @staticmethod def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]: exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source] output = subprocess.check_output(args, timeout=20) codec = bitrate = None if output: data = json.loads(output) streamdata = data['streams'][0] codec = streamdata.get('codec_name') bitrate = int(streamdata.get('bit_rate', 0)) bitrate = max(round(bitrate/1000), 512) return codec, bitrate @staticmethod def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]: args = [executable, '-hide_banner', '-i', source] proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = proc.communicate(timeout=20) output = out.decode('utf8') codec = bitrate = None codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output) if codec_match: codec = codec_match.group(1) br_match = re.search(r"(\d+) [kK]b/s", output) if br_match: bitrate = max(int(br_match.group(1)), 512) return codec, bitrate def read(self) -> bytes: return next(self._packet_iter, b'') def is_opus(self) -> bool: return True class PCMVolumeTransformer(AudioSource, Generic[AT]): """Transforms a previous :class:`AudioSource` to have volume controls. This does not work on audio sources that have :meth:`AudioSource.is_opus` set to ``True``. Parameters ------------ original: :class:`AudioSource` The original AudioSource to transform. volume: :class:`float` The initial volume to set it to. See :attr:`volume` for more info. Raises ------- TypeError Not an audio source. ClientException The audio source is opus encoded. """ def __init__(self, original: AT, volume: float = 1.0): if not isinstance(original, AudioSource): raise TypeError(f'expected AudioSource not {original.__class__.__name__}.') if original.is_opus(): raise ClientException('AudioSource must not be Opus encoded.') self.original: AT = original self.volume = volume @property def volume(self) -> float: """Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%).""" return self._volume @volume.setter def volume(self, value: float) -> None: self._volume = max(value, 0.0) def cleanup(self) -> None: self.original.cleanup() def read(self) -> bytes: ret = self.original.read() return audioop.mul(ret, 2, min(self._volume, 2.0)) class AudioPlayer(threading.Thread): DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0 def __init__(self, source: AudioSource, client: VoiceClient, *, after=None): threading.Thread.__init__(self) self.daemon: bool = True self.source: AudioSource = source self.client: VoiceClient = client self.after: Optional[Callable[[Optional[Exception]], Any]] = after self._end: threading.Event = threading.Event() self._resumed: threading.Event = threading.Event() self._resumed.set() # we are not paused self._current_error: Optional[Exception] = None self._connected: threading.Event = client._connected self._lock: threading.Lock = threading.Lock() if after is not None and not callable(after): raise TypeError('Expected a callable for the "after" parameter.') def _do_run(self) -> None: self.loops = 0 self._start = time.perf_counter() # getattr lookup speed ups play_audio = self.client.send_audio_packet self._speak(True) while not self._end.is_set(): # are we paused? if not self._resumed.is_set(): # wait until we aren't self._resumed.wait() continue # are we disconnected from voice? if not self._connected.is_set(): # wait until we are connected self._connected.wait() # reset our internal data self.loops = 0 self._start = time.perf_counter() self.loops += 1 data = self.source.read() if not data: self.stop() break play_audio(data, encode=not self.source.is_opus()) next_time = self._start + self.DELAY * self.loops delay = max(0, self.DELAY + (next_time - time.perf_counter())) time.sleep(delay) def run(self) -> None: try: self._do_run() except Exception as exc: self._current_error = exc self.stop() finally: self.source.cleanup() self._call_after() def _call_after(self) -> None: error = self._current_error if self.after is not None: try: self.after(error) except Exception as exc: _log.exception('Calling the after function failed.') exc.__context__ = error traceback.print_exception(type(exc), exc, exc.__traceback__) elif error: msg = f'Exception in voice thread {self.name}' _log.exception(msg, exc_info=error) print(msg, file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__) def stop(self) -> None: self._end.set() self._resumed.set() self._speak(False) def pause(self, *, update_speaking: bool = True) -> None: self._resumed.clear() if update_speaking: self._speak(False) def resume(self, *, update_speaking: bool = True) -> None: self.loops = 0 self._start = time.perf_counter() self._resumed.set() if update_speaking: self._speak(True) def is_playing(self) -> bool: return self._resumed.is_set() and not self._end.is_set() def is_paused(self) -> bool: return not self._end.is_set() and not self._resumed.is_set() def _set_source(self, source: AudioSource) -> None: with self._lock: self.pause(update_speaking=False) self.source = source self.resume(update_speaking=False) def _speak(self, speaking: bool) -> None: try: asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop) except Exception as e: _log.info("Speaking call in player failed: %s", e)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/player.py
player.py
from __future__ import annotations import asyncio import socket import logging import struct import threading from typing import Any, Callable, List, Optional, TYPE_CHECKING, Tuple from . import opus, utils from .backoff import ExponentialBackoff from .gateway import * from .errors import ClientException, ConnectionClosed from .player import AudioPlayer, AudioSource from .utils import MISSING if TYPE_CHECKING: from .client import Client from .guild import Guild from .state import ConnectionState from .user import ClientUser from .opus import Encoder from . import abc from .types.voice import ( GuildVoiceState as GuildVoiceStatePayload, VoiceServerUpdate as VoiceServerUpdatePayload, SupportedModes, ) has_nacl: bool try: import nacl.secret # type: ignore has_nacl = True except ImportError: has_nacl = False __all__ = ( 'VoiceProtocol', 'VoiceClient', ) _log = logging.getLogger(__name__) class VoiceProtocol: """A class that represents the Discord voice protocol. This is an abstract class. The library provides a concrete implementation under :class:`VoiceClient`. This class allows you to implement a protocol to allow for an external method of sending voice, such as Lavalink_ or a native library implementation. These classes are passed to :meth:`abc.Connectable.connect <VoiceChannel.connect>`. .. _Lavalink: https://github.com/freyacodes/Lavalink Parameters ------------ client: :class:`Client` The client (or its subclasses) that started the connection request. channel: :class:`abc.Connectable` The voice channel that is being connected to. """ def __init__(self, client: Client, channel: abc.Connectable) -> None: self.client: Client = client self.channel: abc.Connectable = channel async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None: """|coro| An abstract method that is called when the client's voice state has changed. This corresponds to ``VOICE_STATE_UPDATE``. Parameters ------------ data: :class:`dict` The raw `voice state payload`__. .. _voice_state_update_payload: https://discord.com/developers/docs/resources/voice#voice-state-object __ voice_state_update_payload_ """ raise NotImplementedError async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None: """|coro| An abstract method that is called when initially connecting to voice. This corresponds to ``VOICE_SERVER_UPDATE``. Parameters ------------ data: :class:`dict` The raw `voice server update payload`__. .. _voice_server_update_payload: https://discord.com/developers/docs/topics/gateway#voice-server-update-voice-server-update-event-fields __ voice_server_update_payload_ """ raise NotImplementedError async def connect(self, *, timeout: float, reconnect: bool) -> None: """|coro| An abstract method called when the client initiates the connection request. When a connection is requested initially, the library calls the constructor under ``__init__`` and then calls :meth:`connect`. If :meth:`connect` fails at some point then :meth:`disconnect` is called. Within this method, to start the voice connection flow it is recommended to use :meth:`Guild.change_voice_state` to start the flow. After which, :meth:`on_voice_server_update` and :meth:`on_voice_state_update` will be called. The order that these two are called is unspecified. Parameters ------------ timeout: :class:`float` The timeout for the connection. reconnect: :class:`bool` Whether reconnection is expected. """ raise NotImplementedError async def disconnect(self, *, force: bool) -> None: """|coro| An abstract method called when the client terminates the connection. See :meth:`cleanup`. Parameters ------------ force: :class:`bool` Whether the disconnection was forced. """ raise NotImplementedError def cleanup(self) -> None: """This method *must* be called to ensure proper clean-up during a disconnect. It is advisable to call this from within :meth:`disconnect` when you are completely done with the voice protocol instance. This method removes it from the internal state cache that keeps track of currently alive voice clients. Failure to clean-up will cause subsequent connections to report that it's still connected. """ key_id, _ = self.channel._get_voice_client_key() self.client._connection._remove_voice_client(key_id) class VoiceClient(VoiceProtocol): """Represents a Discord voice connection. You do not create these, you typically get them from e.g. :meth:`VoiceChannel.connect`. Warning -------- In order to use PCM based AudioSources, you must have the opus library installed on your system and loaded through :func:`opus.load_opus`. Otherwise, your AudioSources must be opus encoded (e.g. using :class:`FFmpegOpusAudio`) or the library will not be able to transmit audio. Attributes ----------- session_id: :class:`str` The voice connection session ID. token: :class:`str` The voice connection token. endpoint: :class:`str` The endpoint we are connecting to. channel: :class:`abc.Connectable` The voice channel connected to. loop: :class:`asyncio.AbstractEventLoop` The event loop that the voice client is running on. """ endpoint_ip: str voice_port: int secret_key: List[int] ssrc: int def __init__(self, client: Client, channel: abc.Connectable): if not has_nacl: raise RuntimeError("PyNaCl library needed in order to use voice") super().__init__(client, channel) state = client._connection self.token: str = MISSING self.socket = MISSING self.loop: asyncio.AbstractEventLoop = state.loop self._state: ConnectionState = state # this will be used in the AudioPlayer thread self._connected: threading.Event = threading.Event() self._handshaking: bool = False self._potentially_reconnecting: bool = False self._voice_state_complete: asyncio.Event = asyncio.Event() self._voice_server_complete: asyncio.Event = asyncio.Event() self.mode: str = MISSING self._connections: int = 0 self.sequence: int = 0 self.timestamp: int = 0 self.timeout: float = 0 self._runner: asyncio.Task = MISSING self._player: Optional[AudioPlayer] = None self.encoder: Encoder = MISSING self._lite_nonce: int = 0 self.ws: DiscordVoiceWebSocket = MISSING warn_nacl = not has_nacl supported_modes: Tuple[SupportedModes, ...] = ( 'xsalsa20_poly1305_lite', 'xsalsa20_poly1305_suffix', 'xsalsa20_poly1305', ) @property def guild(self) -> Optional[Guild]: """Optional[:class:`Guild`]: The guild we're connected to, if applicable.""" return getattr(self.channel, 'guild', None) @property def user(self) -> ClientUser: """:class:`ClientUser`: The user connected to voice (i.e. ourselves).""" return self._state.user def checked_add(self, attr, value, limit): val = getattr(self, attr) if val + value > limit: setattr(self, attr, 0) else: setattr(self, attr, val + value) # connection related async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None: self.session_id = data['session_id'] channel_id = data['channel_id'] if not self._handshaking or self._potentially_reconnecting: # If we're done handshaking then we just need to update ourselves # If we're potentially reconnecting due to a 4014, then we need to differentiate # a channel move and an actual force disconnect if channel_id is None: # We're being disconnected so cleanup await self.disconnect() else: guild = self.guild self.channel = channel_id and guild and guild.get_channel(int(channel_id)) # type: ignore else: self._voice_state_complete.set() async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None: if self._voice_server_complete.is_set(): _log.info('Ignoring extraneous voice server update.') return self.token = data.get('token') self.server_id = int(data['guild_id']) endpoint = data.get('endpoint') if endpoint is None or self.token is None: _log.warning('Awaiting endpoint... This requires waiting. ' \ 'If timeout occurred considering raising the timeout and reconnecting.') return self.endpoint, _, _ = endpoint.rpartition(':') if self.endpoint.startswith('wss://'): # Just in case, strip it off since we're going to add it later self.endpoint = self.endpoint[6:] # This gets set later self.endpoint_ip = MISSING self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.setblocking(False) if not self._handshaking: # If we're not handshaking then we need to terminate our previous connection in the websocket await self.ws.close(4000) return self._voice_server_complete.set() async def voice_connect(self) -> None: await self.channel.guild.change_voice_state(channel=self.channel) async def voice_disconnect(self) -> None: _log.info('The voice handshake is being terminated for Channel ID %s (Guild ID %s)', self.channel.id, self.guild.id) await self.channel.guild.change_voice_state(channel=None) def prepare_handshake(self) -> None: self._voice_state_complete.clear() self._voice_server_complete.clear() self._handshaking = True _log.info('Starting voice handshake... (connection attempt %d)', self._connections + 1) self._connections += 1 def finish_handshake(self) -> None: _log.info('Voice handshake complete. Endpoint found %s', self.endpoint) self._handshaking = False self._voice_server_complete.clear() self._voice_state_complete.clear() async def connect_websocket(self) -> DiscordVoiceWebSocket: ws = await DiscordVoiceWebSocket.from_client(self) self._connected.clear() while ws.secret_key is None: await ws.poll_event() self._connected.set() return ws async def connect(self, *, reconnect: bool, timeout: float) ->None: _log.info('Connecting to voice...') self.timeout = timeout for i in range(5): self.prepare_handshake() # This has to be created before we start the flow. futures = [ self._voice_state_complete.wait(), self._voice_server_complete.wait(), ] # Start the connection flow await self.voice_connect() try: await utils.sane_wait_for(futures, timeout=timeout) except asyncio.TimeoutError: await self.disconnect(force=True) raise self.finish_handshake() try: self.ws = await self.connect_websocket() break except (ConnectionClosed, asyncio.TimeoutError): if reconnect: _log.exception('Failed to connect to voice... Retrying...') await asyncio.sleep(1 + i * 2.0) await self.voice_disconnect() continue else: raise if self._runner is MISSING: self._runner = self.loop.create_task(self.poll_voice_ws(reconnect)) async def potential_reconnect(self) -> bool: # Attempt to stop the player thread from playing early self._connected.clear() self.prepare_handshake() self._potentially_reconnecting = True try: # We only care about VOICE_SERVER_UPDATE since VOICE_STATE_UPDATE can come before we get disconnected await asyncio.wait_for(self._voice_server_complete.wait(), timeout=self.timeout) except asyncio.TimeoutError: self._potentially_reconnecting = False await self.disconnect(force=True) return False self.finish_handshake() self._potentially_reconnecting = False try: self.ws = await self.connect_websocket() except (ConnectionClosed, asyncio.TimeoutError): return False else: return True @property def latency(self) -> float: """:class:`float`: Latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds. This could be referred to as the Discord Voice WebSocket latency and is an analogue of user's voice latencies as seen in the Discord client. .. versionadded:: 1.4 """ ws = self.ws return float("inf") if not ws else ws.latency @property def average_latency(self) -> float: """:class:`float`: Average of most recent 20 HEARTBEAT latencies in seconds. .. versionadded:: 1.4 """ ws = self.ws return float("inf") if not ws else ws.average_latency async def poll_voice_ws(self, reconnect: bool) -> None: backoff = ExponentialBackoff() while True: try: await self.ws.poll_event() except (ConnectionClosed, asyncio.TimeoutError) as exc: if isinstance(exc, ConnectionClosed): # The following close codes are undocumented so I will document them here. # 1000 - normal closure (obviously) # 4014 - voice channel has been deleted. # 4015 - voice server has crashed if exc.code in (1000, 4015): _log.info('Disconnecting from voice normally, close code %d.', exc.code) await self.disconnect() break if exc.code == 4014: _log.info('Disconnected from voice by force... potentially reconnecting.') successful = await self.potential_reconnect() if not successful: _log.info('Reconnect was unsuccessful, disconnecting from voice normally...') await self.disconnect() break else: continue if not reconnect: await self.disconnect() raise retry = backoff.delay() _log.exception('Disconnected from voice... Reconnecting in %.2fs.', retry) self._connected.clear() await asyncio.sleep(retry) await self.voice_disconnect() try: await self.connect(reconnect=True, timeout=self.timeout) except asyncio.TimeoutError: # at this point we've retried 5 times... let's continue the loop. _log.warning('Could not connect to voice... Retrying...') continue async def disconnect(self, *, force: bool = False) -> None: """|coro| Disconnects this voice client from voice. """ if not force and not self.is_connected(): return self.stop() self._connected.clear() try: if self.ws: await self.ws.close() await self.voice_disconnect() finally: self.cleanup() if self.socket: self.socket.close() async def move_to(self, channel: abc.Snowflake) -> None: """|coro| Moves you to a different voice channel. Parameters ----------- channel: :class:`abc.Snowflake` The channel to move to. Must be a voice channel. """ await self.channel.guild.change_voice_state(channel=channel) def is_connected(self) -> bool: """Indicates if the voice client is connected to voice.""" return self._connected.is_set() # audio related def _get_voice_packet(self, data): header = bytearray(12) # Formulate rtp header header[0] = 0x80 header[1] = 0x78 struct.pack_into('>H', header, 2, self.sequence) struct.pack_into('>I', header, 4, self.timestamp) struct.pack_into('>I', header, 8, self.ssrc) encrypt_packet = getattr(self, '_encrypt_' + self.mode) return encrypt_packet(header, data) def _encrypt_xsalsa20_poly1305(self, header: bytes, data) -> bytes: box = nacl.secret.SecretBox(bytes(self.secret_key)) nonce = bytearray(24) nonce[:12] = header return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext def _encrypt_xsalsa20_poly1305_suffix(self, header: bytes, data) -> bytes: box = nacl.secret.SecretBox(bytes(self.secret_key)) nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE) return header + box.encrypt(bytes(data), nonce).ciphertext + nonce def _encrypt_xsalsa20_poly1305_lite(self, header: bytes, data) -> bytes: box = nacl.secret.SecretBox(bytes(self.secret_key)) nonce = bytearray(24) nonce[:4] = struct.pack('>I', self._lite_nonce) self.checked_add('_lite_nonce', 1, 4294967295) return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext + nonce[:4] def play(self, source: AudioSource, *, after: Callable[[Optional[Exception]], Any]=None) -> None: """Plays an :class:`AudioSource`. The finalizer, ``after`` is called after the source has been exhausted or an error occurred. If an error happens while the audio player is running, the exception is caught and the audio player is then stopped. If no after callback is passed, any caught exception will be displayed as if it were raised. Parameters ----------- source: :class:`AudioSource` The audio source we're reading from. after: Callable[[Optional[:class:`Exception`]], Any] The finalizer that is called after the stream is exhausted. This function must have a single parameter, ``error``, that denotes an optional exception that was raised during playing. Raises ------- ClientException Already playing audio or not connected. TypeError Source is not a :class:`AudioSource` or after is not a callable. OpusNotLoaded Source is not opus encoded and opus is not loaded. """ if not self.is_connected(): raise ClientException('Not connected to voice.') if self.is_playing(): raise ClientException('Already playing audio.') if not isinstance(source, AudioSource): raise TypeError(f'source must be an AudioSource not {source.__class__.__name__}') if not self.encoder and not source.is_opus(): self.encoder = opus.Encoder() self._player = AudioPlayer(source, self, after=after) self._player.start() def is_playing(self) -> bool: """Indicates if we're currently playing audio.""" return self._player is not None and self._player.is_playing() def is_paused(self) -> bool: """Indicates if we're playing audio, but if we're paused.""" return self._player is not None and self._player.is_paused() def stop(self) -> None: """Stops playing audio.""" if self._player: self._player.stop() self._player = None def pause(self) -> None: """Pauses the audio playing.""" if self._player: self._player.pause() def resume(self) -> None: """Resumes the audio playing.""" if self._player: self._player.resume() @property def source(self) -> Optional[AudioSource]: """Optional[:class:`AudioSource`]: The audio source being played, if playing. This property can also be used to change the audio source currently being played. """ return self._player.source if self._player else None @source.setter def source(self, value: AudioSource) -> None: if not isinstance(value, AudioSource): raise TypeError(f'expected AudioSource not {value.__class__.__name__}.') if self._player is None: raise ValueError('Not playing anything.') self._player._set_source(value) def send_audio_packet(self, data: bytes, *, encode: bool = True) -> None: """Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: :class:`bytes` The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: :class:`bool` Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. opus.OpusError Encoding the data failed. """ self.checked_add('sequence', 1, 65535) if encode: encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME) else: encoded_data = data packet = self._get_voice_packet(encoded_data) try: self.socket.sendto(packet, (self.endpoint_ip, self.voice_port)) except BlockingIOError: _log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp) self.checked_add('timestamp', opus.Encoder.SAMPLES_PER_FRAME, 4294967295)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/voice_client.py
voice_client.py
from __future__ import annotations from typing import Any, Iterator, List, Optional, TYPE_CHECKING, Tuple from .asset import Asset, AssetMixin from .utils import SnowflakeList, snowflake_time, MISSING from .partial_emoji import _EmojiTag, PartialEmoji from .user import User __all__ = ( 'Emoji', ) if TYPE_CHECKING: from .types.emoji import Emoji as EmojiPayload from .guild import Guild from .state import ConnectionState from .abc import Snowflake from .role import Role from datetime import datetime class Emoji(_EmojiTag, AssetMixin): """Represents a custom emoji. Depending on the way this object was created, some of the attributes can have a value of ``None``. .. container:: operations .. describe:: x == y Checks if two emoji are the same. .. describe:: x != y Checks if two emoji are not the same. .. describe:: hash(x) Return the emoji's hash. .. describe:: iter(x) Returns an iterator of ``(field, value)`` pairs. This allows this class to be used as an iterable in list/dict/etc constructions. .. describe:: str(x) Returns the emoji rendered for discord. Attributes ----------- name: :class:`str` The name of the emoji. id: :class:`int` The emoji's ID. require_colons: :class:`bool` If colons are required to use this emoji in the client (:PJSalt: vs PJSalt). animated: :class:`bool` Whether an emoji is animated or not. managed: :class:`bool` If this emoji is managed by a Twitch integration. guild_id: :class:`int` The guild ID the emoji belongs to. available: :class:`bool` Whether the emoji is available for use. user: Optional[:class:`User`] The user that created the emoji. This can only be retrieved using :meth:`Guild.fetch_emoji` and having the :attr:`~Permissions.manage_emojis` permission. """ __slots__: Tuple[str, ...] = ( 'require_colons', 'animated', 'managed', 'id', 'name', '_roles', 'guild_id', '_state', 'user', 'available', ) def __init__(self, *, guild: Guild, state: ConnectionState, data: EmojiPayload): self.guild_id: int = guild.id self._state: ConnectionState = state self._from_data(data) def _from_data(self, emoji: EmojiPayload): self.require_colons: bool = emoji.get('require_colons', False) self.managed: bool = emoji.get('managed', False) self.id: int = int(emoji['id']) # type: ignore self.name: str = emoji['name'] # type: ignore self.animated: bool = emoji.get('animated', False) self.available: bool = emoji.get('available', True) self._roles: SnowflakeList = SnowflakeList(map(int, emoji.get('roles', []))) user = emoji.get('user') self.user: Optional[User] = User(state=self._state, data=user) if user else None def _to_partial(self) -> PartialEmoji: return PartialEmoji(name=self.name, animated=self.animated, id=self.id) def __iter__(self) -> Iterator[Tuple[str, Any]]: for attr in self.__slots__: if attr[0] != '_': value = getattr(self, attr, None) if value is not None: yield (attr, value) def __str__(self) -> str: if self.animated: return f'<a:{self.name}:{self.id}>' return f'<:{self.name}:{self.id}>' def __repr__(self) -> str: return f'<Emoji id={self.id} name={self.name!r} animated={self.animated} managed={self.managed}>' def __eq__(self, other: Any) -> bool: return isinstance(other, _EmojiTag) and self.id == other.id def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __hash__(self) -> int: return self.id >> 22 @property def created_at(self) -> datetime: """:class:`datetime.datetime`: Returns the emoji's creation time in UTC.""" return snowflake_time(self.id) @property def url(self) -> str: """:class:`str`: Returns the URL of the emoji.""" fmt = 'gif' if self.animated else 'png' return f'{Asset.BASE}/emojis/{self.id}.{fmt}' @property def roles(self) -> List[Role]: """List[:class:`Role`]: A :class:`list` of roles that is allowed to use this emoji. If roles is empty, the emoji is unrestricted. """ guild = self.guild if guild is None: return [] return [role for role in guild.roles if self._roles.has(role.id)] @property def guild(self) -> Guild: """:class:`Guild`: The guild this emoji belongs to.""" return self._state._get_guild(self.guild_id) def is_usable(self) -> bool: """:class:`bool`: Whether the bot can use this emoji. .. versionadded:: 1.3 """ if not self.available: return False if not self._roles: return True emoji_roles, my_roles = self._roles, self.guild.me._roles return any(my_roles.has(role_id) for role_id in emoji_roles) async def delete(self, *, reason: Optional[str] = None) -> None: """|coro| Deletes the custom emoji. You must have :attr:`~Permissions.manage_emojis` permission to do this. Parameters ----------- reason: Optional[:class:`str`] The reason for deleting this emoji. Shows up on the audit log. Raises ------- Forbidden You are not allowed to delete emojis. HTTPException An error occurred deleting the emoji. """ await self._state.http.delete_custom_emoji(self.guild.id, self.id, reason=reason) async def edit(self, *, name: str = MISSING, roles: List[Snowflake] = MISSING, reason: Optional[str] = None) -> Emoji: r"""|coro| Edits the custom emoji. You must have :attr:`~Permissions.manage_emojis` permission to do this. .. versionchanged:: 2.0 The newly updated emoji is returned. Parameters ----------- name: :class:`str` The new emoji name. roles: Optional[List[:class:`~discord.abc.Snowflake`]] A list of roles that can use this emoji. An empty list can be passed to make it available to everyone. reason: Optional[:class:`str`] The reason for editing this emoji. Shows up on the audit log. Raises ------- Forbidden You are not allowed to edit emojis. HTTPException An error occurred editing the emoji. Returns -------- :class:`Emoji` The newly updated emoji. """ payload = {} if name is not MISSING: payload['name'] = name if roles is not MISSING: payload['roles'] = [role.id for role in roles] data = await self._state.http.edit_custom_emoji(self.guild.id, self.id, payload=payload, reason=reason) return Emoji(guild=self.guild, data=data, state=self._state)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/emoji.py
emoji.py
from __future__ import annotations import copy import asyncio from typing import ( Any, Callable, Dict, List, Optional, TYPE_CHECKING, Protocol, Sequence, Tuple, TypeVar, Union, overload, runtime_checkable, ) from .iterators import HistoryIterator from .context_managers import Typing from .enums import ChannelType from .errors import InvalidArgument, ClientException from .mentions import AllowedMentions from .permissions import PermissionOverwrite, Permissions from .role import Role from .invite import Invite from .file import File from .voice_client import VoiceClient, VoiceProtocol from .sticker import GuildSticker, StickerItem from . import utils __all__ = ( 'Snowflake', 'User', 'PrivateChannel', 'GuildChannel', 'Messageable', 'Connectable', ) T = TypeVar('T', bound=VoiceProtocol) if TYPE_CHECKING: from datetime import datetime from .client import Client from .user import ClientUser from .asset import Asset from .state import ConnectionState from .guild import Guild from .member import Member from .channel import CategoryChannel from .embeds import Embed from .message import Message, MessageReference, PartialMessage from .channel import TextChannel, DMChannel, GroupChannel, PartialMessageable from .threads import Thread from .enums import InviteTarget from .ui.view import View from .types.channel import ( PermissionOverwrite as PermissionOverwritePayload, Channel as ChannelPayload, GuildChannel as GuildChannelPayload, OverwriteType, ) PartialMessageableChannel = Union[TextChannel, Thread, DMChannel, PartialMessageable] MessageableChannel = Union[PartialMessageableChannel, GroupChannel] SnowflakeTime = Union["Snowflake", datetime] MISSING = utils.MISSING class _Undefined: def __repr__(self) -> str: return 'see-below' _undefined: Any = _Undefined() @runtime_checkable class Snowflake(Protocol): """An ABC that details the common operations on a Discord model. Almost all :ref:`Discord models <discord_api_models>` meet this abstract base class. If you want to create a snowflake on your own, consider using :class:`.Object`. Attributes ----------- id: :class:`int` The model's unique ID. """ __slots__ = () id: int @runtime_checkable class User(Snowflake, Protocol): """An ABC that details the common operations on a Discord user. The following implement this ABC: - :class:`~discord.User` - :class:`~discord.ClientUser` - :class:`~discord.Member` This ABC must also implement :class:`~discord.abc.Snowflake`. Attributes ----------- name: :class:`str` The user's username. discriminator: :class:`str` The user's discriminator. avatar: :class:`~discord.Asset` The avatar asset the user has. bot: :class:`bool` If the user is a bot account. """ __slots__ = () name: str discriminator: str avatar: Asset bot: bool @property def display_name(self) -> str: """:class:`str`: Returns the user's display name.""" raise NotImplementedError @property def mention(self) -> str: """:class:`str`: Returns a string that allows you to mention the given user.""" raise NotImplementedError @runtime_checkable class PrivateChannel(Snowflake, Protocol): """An ABC that details the common operations on a private Discord channel. The following implement this ABC: - :class:`~discord.DMChannel` - :class:`~discord.GroupChannel` This ABC must also implement :class:`~discord.abc.Snowflake`. Attributes ----------- me: :class:`~discord.ClientUser` The user presenting yourself. """ __slots__ = () me: ClientUser class _Overwrites: __slots__ = ('id', 'allow', 'deny', 'type') ROLE = 0 MEMBER = 1 def __init__(self, data: PermissionOverwritePayload): self.id: int = int(data['id']) self.allow: int = int(data.get('allow', 0)) self.deny: int = int(data.get('deny', 0)) self.type: OverwriteType = data['type'] def _asdict(self) -> PermissionOverwritePayload: return { 'id': self.id, 'allow': str(self.allow), 'deny': str(self.deny), 'type': self.type, } def is_role(self) -> bool: return self.type == 0 def is_member(self) -> bool: return self.type == 1 GCH = TypeVar('GCH', bound='GuildChannel') class GuildChannel: """An ABC that details the common operations on a Discord guild channel. The following implement this ABC: - :class:`~discord.TextChannel` - :class:`~discord.VoiceChannel` - :class:`~discord.CategoryChannel` - :class:`~discord.StageChannel` This ABC must also implement :class:`~discord.abc.Snowflake`. Attributes ----------- name: :class:`str` The channel name. guild: :class:`~discord.Guild` The guild the channel belongs to. position: :class:`int` The position in the channel list. This is a number that starts at 0. e.g. the top channel is position 0. """ __slots__ = () id: int name: str guild: Guild type: ChannelType position: int category_id: Optional[int] _state: ConnectionState _overwrites: List[_Overwrites] if TYPE_CHECKING: def __init__(self, *, state: ConnectionState, guild: Guild, data: Dict[str, Any]): ... def __str__(self) -> str: return self.name @property def _sorting_bucket(self) -> int: raise NotImplementedError def _update(self, guild: Guild, data: Dict[str, Any]) -> None: raise NotImplementedError async def _move( self, position: int, parent_id: Optional[Any] = None, lock_permissions: bool = False, *, reason: Optional[str], ) -> None: if position < 0: raise InvalidArgument('Channel position cannot be less than 0.') http = self._state.http bucket = self._sorting_bucket channels: List[GuildChannel] = [c for c in self.guild.channels if c._sorting_bucket == bucket] channels.sort(key=lambda c: c.position) try: # remove ourselves from the channel list channels.remove(self) except ValueError: # not there somehow lol return else: index = next((i for i, c in enumerate(channels) if c.position >= position), len(channels)) # add ourselves at our designated position channels.insert(index, self) payload = [] for index, c in enumerate(channels): d: Dict[str, Any] = {'id': c.id, 'position': index} if parent_id is not _undefined and c.id == self.id: d.update(parent_id=parent_id, lock_permissions=lock_permissions) payload.append(d) await http.bulk_channel_update(self.guild.id, payload, reason=reason) async def _edit(self, options: Dict[str, Any], reason: Optional[str]) -> Optional[ChannelPayload]: try: parent = options.pop('category') except KeyError: parent_id = _undefined else: parent_id = parent and parent.id try: options['rate_limit_per_user'] = options.pop('slowmode_delay') except KeyError: pass try: rtc_region = options.pop('rtc_region') except KeyError: pass else: options['rtc_region'] = None if rtc_region is None else str(rtc_region) try: video_quality_mode = options.pop('video_quality_mode') except KeyError: pass else: options['video_quality_mode'] = int(video_quality_mode) lock_permissions = options.pop('sync_permissions', False) try: position = options.pop('position') except KeyError: if parent_id is not _undefined: if lock_permissions: category = self.guild.get_channel(parent_id) if category: options['permission_overwrites'] = [c._asdict() for c in category._overwrites] options['parent_id'] = parent_id elif lock_permissions and self.category_id is not None: # if we're syncing permissions on a pre-existing channel category without changing it # we need to update the permissions to point to the pre-existing category category = self.guild.get_channel(self.category_id) if category: options['permission_overwrites'] = [c._asdict() for c in category._overwrites] else: await self._move(position, parent_id=parent_id, lock_permissions=lock_permissions, reason=reason) overwrites = options.get('overwrites', None) if overwrites is not None: perms = [] for target, perm in overwrites.items(): if not isinstance(perm, PermissionOverwrite): raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}') allow, deny = perm.pair() payload = { 'allow': allow.value, 'deny': deny.value, 'id': target.id, } if isinstance(target, Role): payload['type'] = _Overwrites.ROLE else: payload['type'] = _Overwrites.MEMBER perms.append(payload) options['permission_overwrites'] = perms try: ch_type = options['type'] except KeyError: pass else: if not isinstance(ch_type, ChannelType): raise InvalidArgument('type field must be of type ChannelType') options['type'] = ch_type.value if options: return await self._state.http.edit_channel(self.id, reason=reason, **options) def _fill_overwrites(self, data: GuildChannelPayload) -> None: self._overwrites = [] everyone_index = 0 everyone_id = self.guild.id for index, overridden in enumerate(data.get('permission_overwrites', [])): overwrite = _Overwrites(overridden) self._overwrites.append(overwrite) if overwrite.type == _Overwrites.MEMBER: continue if overwrite.id == everyone_id: # the @everyone role is not guaranteed to be the first one # in the list of permission overwrites, however the permission # resolution code kind of requires that it is the first one in # the list since it is special. So we need the index so we can # swap it to be the first one. everyone_index = index # do the swap tmp = self._overwrites if tmp: tmp[everyone_index], tmp[0] = tmp[0], tmp[everyone_index] @property def changed_roles(self) -> List[Role]: """List[:class:`~discord.Role`]: Returns a list of roles that have been overridden from their default values in the :attr:`~discord.Guild.roles` attribute.""" ret = [] g = self.guild for overwrite in filter(lambda o: o.is_role(), self._overwrites): role = g.get_role(overwrite.id) if role is None: continue role = copy.copy(role) role.permissions.handle_overwrite(overwrite.allow, overwrite.deny) ret.append(role) return ret @property def mention(self) -> str: """:class:`str`: The string that allows you to mention the channel.""" return f'<#{self.id}>' @property def created_at(self) -> datetime: """:class:`datetime.datetime`: Returns the channel's creation time in UTC.""" return utils.snowflake_time(self.id) def overwrites_for(self, obj: Union[Role, User]) -> PermissionOverwrite: """Returns the channel-specific overwrites for a member or a role. Parameters ----------- obj: Union[:class:`~discord.Role`, :class:`~discord.abc.User`] The role or user denoting whose overwrite to get. Returns --------- :class:`~discord.PermissionOverwrite` The permission overwrites for this object. """ if isinstance(obj, User): predicate = lambda p: p.is_member() elif isinstance(obj, Role): predicate = lambda p: p.is_role() else: predicate = lambda p: True for overwrite in filter(predicate, self._overwrites): if overwrite.id == obj.id: allow = Permissions(overwrite.allow) deny = Permissions(overwrite.deny) return PermissionOverwrite.from_pair(allow, deny) return PermissionOverwrite() @property def overwrites(self) -> Dict[Union[Role, Member], PermissionOverwrite]: """Returns all of the channel's overwrites. This is returned as a dictionary where the key contains the target which can be either a :class:`~discord.Role` or a :class:`~discord.Member` and the value is the overwrite as a :class:`~discord.PermissionOverwrite`. Returns -------- Dict[Union[:class:`~discord.Role`, :class:`~discord.Member`], :class:`~discord.PermissionOverwrite`] The channel's permission overwrites. """ ret = {} for ow in self._overwrites: allow = Permissions(ow.allow) deny = Permissions(ow.deny) overwrite = PermissionOverwrite.from_pair(allow, deny) target = None if ow.is_role(): target = self.guild.get_role(ow.id) elif ow.is_member(): target = self.guild.get_member(ow.id) # TODO: There is potential data loss here in the non-chunked # case, i.e. target is None because get_member returned nothing. # This can be fixed with a slight breaking change to the return type, # i.e. adding discord.Object to the list of it # However, for now this is an acceptable compromise. if target is not None: ret[target] = overwrite return ret @property def category(self) -> Optional[CategoryChannel]: """Optional[:class:`~discord.CategoryChannel`]: The category this channel belongs to. If there is no category then this is ``None``. """ return self.guild.get_channel(self.category_id) # type: ignore @property def permissions_synced(self) -> bool: """:class:`bool`: Whether or not the permissions for this channel are synced with the category it belongs to. If there is no category then this is ``False``. .. versionadded:: 1.3 """ if self.category_id is None: return False category = self.guild.get_channel(self.category_id) return bool(category and category.overwrites == self.overwrites) def permissions_for(self, obj: Union[Member, Role], /) -> Permissions: """Handles permission resolution for the :class:`~discord.Member` or :class:`~discord.Role`. This function takes into consideration the following cases: - Guild owner - Guild roles - Channel overrides - Member overrides If a :class:`~discord.Role` is passed, then it checks the permissions someone with that role would have, which is essentially: - The default role permissions - The permissions of the role used as a parameter - The default role permission overwrites - The permission overwrites of the role used as a parameter .. versionchanged:: 2.0 The object passed in can now be a role object. Parameters ---------- obj: Union[:class:`~discord.Member`, :class:`~discord.Role`] The object to resolve permissions for. This could be either a member or a role. If it's a role then member overwrites are not computed. Returns ------- :class:`~discord.Permissions` The resolved permissions for the member or role. """ # The current cases can be explained as: # Guild owner get all permissions -- no questions asked. Otherwise... # The @everyone role gets the first application. # After that, the applied roles that the user has in the channel # (or otherwise) are then OR'd together. # After the role permissions are resolved, the member permissions # have to take into effect. # After all that is done.. you have to do the following: # If manage permissions is True, then all permissions are set to True. # The operation first takes into consideration the denied # and then the allowed. if self.guild.owner_id == obj.id: return Permissions.all() default = self.guild.default_role base = Permissions(default.permissions.value) # Handle the role case first if isinstance(obj, Role): base.value |= obj._permissions if base.administrator: return Permissions.all() # Apply @everyone allow/deny first since it's special try: maybe_everyone = self._overwrites[0] if maybe_everyone.id == self.guild.id: base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny) except IndexError: pass if obj.is_default(): return base overwrite = utils.get(self._overwrites, type=_Overwrites.ROLE, id=obj.id) if overwrite is not None: base.handle_overwrite(overwrite.allow, overwrite.deny) return base roles = obj._roles get_role = self.guild.get_role # Apply guild roles that the member has. for role_id in roles: role = get_role(role_id) if role is not None: base.value |= role._permissions # Guild-wide Administrator -> True for everything # Bypass all channel-specific overrides if base.administrator: return Permissions.all() # Apply @everyone allow/deny first since it's special try: maybe_everyone = self._overwrites[0] if maybe_everyone.id == self.guild.id: base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny) remaining_overwrites = self._overwrites[1:] else: remaining_overwrites = self._overwrites except IndexError: remaining_overwrites = self._overwrites denies = 0 allows = 0 # Apply channel specific role permission overwrites for overwrite in remaining_overwrites: if overwrite.is_role() and roles.has(overwrite.id): denies |= overwrite.deny allows |= overwrite.allow base.handle_overwrite(allow=allows, deny=denies) # Apply member specific permission overwrites for overwrite in remaining_overwrites: if overwrite.is_member() and overwrite.id == obj.id: base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny) break # if you can't send a message in a channel then you can't have certain # permissions as well if not base.send_messages: base.send_tts_messages = False base.mention_everyone = False base.embed_links = False base.attach_files = False # if you can't read a channel then you have no permissions there if not base.read_messages: denied = Permissions.all_channel() base.value &= ~denied.value return base async def delete(self, *, reason: Optional[str] = None) -> None: """|coro| Deletes the channel. You must have :attr:`~discord.Permissions.manage_channels` permission to use this. Parameters ----------- reason: Optional[:class:`str`] The reason for deleting this channel. Shows up on the audit log. Raises ------- ~discord.Forbidden You do not have proper permissions to delete the channel. ~discord.NotFound The channel was not found or was already deleted. ~discord.HTTPException Deleting the channel failed. """ await self._state.http.delete_channel(self.id, reason=reason) @overload async def set_permissions( self, target: Union[Member, Role], *, overwrite: Optional[Union[PermissionOverwrite, _Undefined]] = ..., reason: Optional[str] = ..., ) -> None: ... @overload async def set_permissions( self, target: Union[Member, Role], *, reason: Optional[str] = ..., **permissions: bool, ) -> None: ... async def set_permissions(self, target, *, overwrite=_undefined, reason=None, **permissions): r"""|coro| Sets the channel specific permission overwrites for a target in the channel. The ``target`` parameter should either be a :class:`~discord.Member` or a :class:`~discord.Role` that belongs to guild. The ``overwrite`` parameter, if given, must either be ``None`` or :class:`~discord.PermissionOverwrite`. For convenience, you can pass in keyword arguments denoting :class:`~discord.Permissions` attributes. If this is done, then you cannot mix the keyword arguments with the ``overwrite`` parameter. If the ``overwrite`` parameter is ``None``, then the permission overwrites are deleted. You must have the :attr:`~discord.Permissions.manage_roles` permission to use this. .. note:: This method *replaces* the old overwrites with the ones given. Examples ---------- Setting allow and deny: :: await message.channel.set_permissions(message.author, read_messages=True, send_messages=False) Deleting overwrites :: await channel.set_permissions(member, overwrite=None) Using :class:`~discord.PermissionOverwrite` :: overwrite = discord.PermissionOverwrite() overwrite.send_messages = False overwrite.read_messages = True await channel.set_permissions(member, overwrite=overwrite) Parameters ----------- target: Union[:class:`~discord.Member`, :class:`~discord.Role`] The member or role to overwrite permissions for. overwrite: Optional[:class:`~discord.PermissionOverwrite`] The permissions to allow and deny to the target, or ``None`` to delete the overwrite. \*\*permissions A keyword argument list of permissions to set for ease of use. Cannot be mixed with ``overwrite``. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. Raises ------- ~discord.Forbidden You do not have permissions to edit channel specific permissions. ~discord.HTTPException Editing channel specific permissions failed. ~discord.NotFound The role or member being edited is not part of the guild. ~discord.InvalidArgument The overwrite parameter invalid or the target type was not :class:`~discord.Role` or :class:`~discord.Member`. """ http = self._state.http if isinstance(target, User): perm_type = _Overwrites.MEMBER elif isinstance(target, Role): perm_type = _Overwrites.ROLE else: raise InvalidArgument('target parameter must be either Member or Role') if overwrite is _undefined: if len(permissions) == 0: raise InvalidArgument('No overwrite provided.') try: overwrite = PermissionOverwrite(**permissions) except (ValueError, TypeError): raise InvalidArgument('Invalid permissions given to keyword arguments.') else: if len(permissions) > 0: raise InvalidArgument('Cannot mix overwrite and keyword arguments.') # TODO: wait for event if overwrite is None: await http.delete_channel_permissions(self.id, target.id, reason=reason) elif isinstance(overwrite, PermissionOverwrite): (allow, deny) = overwrite.pair() await http.edit_channel_permissions(self.id, target.id, allow.value, deny.value, perm_type, reason=reason) else: raise InvalidArgument('Invalid overwrite type provided.') async def _clone_impl( self: GCH, base_attrs: Dict[str, Any], *, name: Optional[str] = None, reason: Optional[str] = None, ) -> GCH: base_attrs['permission_overwrites'] = [x._asdict() for x in self._overwrites] base_attrs['parent_id'] = self.category_id base_attrs['name'] = name or self.name guild_id = self.guild.id cls = self.__class__ data = await self._state.http.create_channel(guild_id, self.type.value, reason=reason, **base_attrs) obj = cls(state=self._state, guild=self.guild, data=data) # temporarily add it to the cache self.guild._channels[obj.id] = obj # type: ignore return obj async def clone(self: GCH, *, name: Optional[str] = None, reason: Optional[str] = None) -> GCH: """|coro| Clones this channel. This creates a channel with the same properties as this channel. You must have the :attr:`~discord.Permissions.manage_channels` permission to do this. .. versionadded:: 1.1 Parameters ------------ name: Optional[:class:`str`] The name of the new channel. If not provided, defaults to this channel name. reason: Optional[:class:`str`] The reason for cloning this channel. Shows up on the audit log. Raises ------- ~discord.Forbidden You do not have the proper permissions to create this channel. ~discord.HTTPException Creating the channel failed. Returns -------- :class:`.abc.GuildChannel` The channel that was created. """ raise NotImplementedError @overload async def move( self, *, beginning: bool, offset: int = MISSING, category: Optional[Snowflake] = MISSING, sync_permissions: bool = MISSING, reason: Optional[str] = MISSING, ) -> None: ... @overload async def move( self, *, end: bool, offset: int = MISSING, category: Optional[Snowflake] = MISSING, sync_permissions: bool = MISSING, reason: str = MISSING, ) -> None: ... @overload async def move( self, *, before: Snowflake, offset: int = MISSING, category: Optional[Snowflake] = MISSING, sync_permissions: bool = MISSING, reason: str = MISSING, ) -> None: ... @overload async def move( self, *, after: Snowflake, offset: int = MISSING, category: Optional[Snowflake] = MISSING, sync_permissions: bool = MISSING, reason: str = MISSING, ) -> None: ... async def move(self, **kwargs) -> None: """|coro| A rich interface to help move a channel relative to other channels. If exact position movement is required, ``edit`` should be used instead. You must have the :attr:`~discord.Permissions.manage_channels` permission to do this. .. note:: Voice channels will always be sorted below text channels. This is a Discord limitation. .. versionadded:: 1.7 Parameters ------------ beginning: :class:`bool` Whether to move the channel to the beginning of the channel list (or category if given). This is mutually exclusive with ``end``, ``before``, and ``after``. end: :class:`bool` Whether to move the channel to the end of the channel list (or category if given). This is mutually exclusive with ``beginning``, ``before``, and ``after``. before: :class:`~discord.abc.Snowflake` The channel that should be before our current channel. This is mutually exclusive with ``beginning``, ``end``, and ``after``. after: :class:`~discord.abc.Snowflake` The channel that should be after our current channel. This is mutually exclusive with ``beginning``, ``end``, and ``before``. offset: :class:`int` The number of channels to offset the move by. For example, an offset of ``2`` with ``beginning=True`` would move it 2 after the beginning. A positive number moves it below while a negative number moves it above. Note that this number is relative and computed after the ``beginning``, ``end``, ``before``, and ``after`` parameters. category: Optional[:class:`~discord.abc.Snowflake`] The category to move this channel under. If ``None`` is given then it moves it out of the category. This parameter is ignored if moving a category channel. sync_permissions: :class:`bool` Whether to sync the permissions with the category (if given). reason: :class:`str` The reason for the move. Raises ------- InvalidArgument An invalid position was given or a bad mix of arguments were passed. Forbidden You do not have permissions to move the channel. HTTPException Moving the channel failed. """ if not kwargs: return beginning, end = kwargs.get('beginning'), kwargs.get('end') before, after = kwargs.get('before'), kwargs.get('after') offset = kwargs.get('offset', 0) if sum(bool(a) for a in (beginning, end, before, after)) > 1: raise InvalidArgument('Only one of [before, after, end, beginning] can be used.') bucket = self._sorting_bucket parent_id = kwargs.get('category', MISSING) # fmt: off channels: List[GuildChannel] if parent_id not in (MISSING, None): parent_id = parent_id.id channels = [ ch for ch in self.guild.channels if ch._sorting_bucket == bucket and ch.category_id == parent_id ] else: channels = [ ch for ch in self.guild.channels if ch._sorting_bucket == bucket and ch.category_id == self.category_id ] # fmt: on channels.sort(key=lambda c: (c.position, c.id)) try: # Try to remove ourselves from the channel list channels.remove(self) except ValueError: # If we're not there then it's probably due to not being in the category pass index = None if beginning: index = 0 elif end: index = len(channels) elif before: index = next((i for i, c in enumerate(channels) if c.id == before.id), None) elif after: index = next((i + 1 for i, c in enumerate(channels) if c.id == after.id), None) if index is None: raise InvalidArgument('Could not resolve appropriate move position') channels.insert(max((index + offset), 0), self) payload = [] lock_permissions = kwargs.get('sync_permissions', False) reason = kwargs.get('reason') for index, channel in enumerate(channels): d = {'id': channel.id, 'position': index} if parent_id is not MISSING and channel.id == self.id: d.update(parent_id=parent_id, lock_permissions=lock_permissions) payload.append(d) await self._state.http.bulk_channel_update(self.guild.id, payload, reason=reason) async def create_invite( self, *, reason: Optional[str] = None, max_age: int = 0, max_uses: int = 0, temporary: bool = False, unique: bool = True, target_type: Optional[InviteTarget] = None, target_user: Optional[User] = None, target_application_id: Optional[int] = None, ) -> Invite: """|coro| Creates an instant invite from a text or voice channel. You must have the :attr:`~discord.Permissions.create_instant_invite` permission to do this. Parameters ------------ max_age: :class:`int` How long the invite should last in seconds. If it's 0 then the invite doesn't expire. Defaults to ``0``. max_uses: :class:`int` How many uses the invite could be used for. If it's 0 then there are unlimited uses. Defaults to ``0``. temporary: :class:`bool` Denotes that the invite grants temporary membership (i.e. they get kicked after they disconnect). Defaults to ``False``. unique: :class:`bool` Indicates if a unique invite URL should be created. Defaults to True. If this is set to ``False`` then it will return a previously created invite. reason: Optional[:class:`str`] The reason for creating this invite. Shows up on the audit log. target_type: Optional[:class:`.InviteTarget`] The type of target for the voice channel invite, if any. .. versionadded:: 2.0 target_user: Optional[:class:`User`] The user whose stream to display for this invite, required if `target_type` is `TargetType.stream`. The user must be streaming in the channel. .. versionadded:: 2.0 target_application_id:: Optional[:class:`int`] The id of the embedded application for the invite, required if `target_type` is `TargetType.embedded_application`. .. versionadded:: 2.0 Raises ------- ~discord.HTTPException Invite creation failed. ~discord.NotFound The channel that was passed is a category or an invalid channel. Returns -------- :class:`~discord.Invite` The invite that was created. """ data = await self._state.http.create_invite( self.id, reason=reason, max_age=max_age, max_uses=max_uses, temporary=temporary, unique=unique, target_type=target_type.value if target_type else None, target_user_id=target_user.id if target_user else None, target_application_id=target_application_id, ) return Invite.from_incomplete(data=data, state=self._state) async def invites(self) -> List[Invite]: """|coro| Returns a list of all active instant invites from this channel. You must have :attr:`~discord.Permissions.manage_channels` to get this information. Raises ------- ~discord.Forbidden You do not have proper permissions to get the information. ~discord.HTTPException An error occurred while fetching the information. Returns ------- List[:class:`~discord.Invite`] The list of invites that are currently active. """ state = self._state data = await state.http.invites_from_channel(self.id) guild = self.guild return [Invite(state=state, data=invite, channel=self, guild=guild) for invite in data] class Messageable: """An ABC that details the common operations on a model that can send messages. The following implement this ABC: - :class:`~discord.TextChannel` - :class:`~discord.DMChannel` - :class:`~discord.GroupChannel` - :class:`~discord.User` - :class:`~discord.Member` - :class:`~discord.ext.commands.Context` - :class:`~discord.Thread` """ __slots__ = () _state: ConnectionState async def _get_channel(self) -> MessageableChannel: raise NotImplementedError @overload async def send( self, content: Optional[str] = ..., *, tts: bool = ..., embed: Embed = ..., file: File = ..., stickers: Sequence[Union[GuildSticker, StickerItem]] = ..., delete_after: float = ..., nonce: Union[str, int] = ..., allowed_mentions: AllowedMentions = ..., reference: Union[Message, MessageReference, PartialMessage] = ..., mention_author: bool = ..., view: View = ..., ) -> Message: ... @overload async def send( self, content: Optional[str] = ..., *, tts: bool = ..., embed: Embed = ..., files: List[File] = ..., stickers: Sequence[Union[GuildSticker, StickerItem]] = ..., delete_after: float = ..., nonce: Union[str, int] = ..., allowed_mentions: AllowedMentions = ..., reference: Union[Message, MessageReference, PartialMessage] = ..., mention_author: bool = ..., view: View = ..., ) -> Message: ... @overload async def send( self, content: Optional[str] = ..., *, tts: bool = ..., embeds: List[Embed] = ..., file: File = ..., stickers: Sequence[Union[GuildSticker, StickerItem]] = ..., delete_after: float = ..., nonce: Union[str, int] = ..., allowed_mentions: AllowedMentions = ..., reference: Union[Message, MessageReference, PartialMessage] = ..., mention_author: bool = ..., view: View = ..., ) -> Message: ... @overload async def send( self, content: Optional[str] = ..., *, tts: bool = ..., embeds: List[Embed] = ..., files: List[File] = ..., stickers: Sequence[Union[GuildSticker, StickerItem]] = ..., delete_after: float = ..., nonce: Union[str, int] = ..., allowed_mentions: AllowedMentions = ..., reference: Union[Message, MessageReference, PartialMessage] = ..., mention_author: bool = ..., view: View = ..., ) -> Message: ... async def send( self, content=None, *, tts=None, embed=None, embeds=None, file=None, files=None, stickers=None, delete_after=None, nonce=None, allowed_mentions=None, reference=None, mention_author=None, view=None, ): """|coro| Sends a message to the destination with the content given. The content must be a type that can convert to a string through ``str(content)``. If the content is set to ``None`` (the default), then the ``embed`` parameter must be provided. To upload a single file, the ``file`` parameter should be used with a single :class:`~discord.File` object. To upload multiple files, the ``files`` parameter should be used with a :class:`list` of :class:`~discord.File` objects. **Specifying both parameters will lead to an exception**. To upload a single embed, the ``embed`` parameter should be used with a single :class:`~discord.Embed` object. To upload multiple embeds, the ``embeds`` parameter should be used with a :class:`list` of :class:`~discord.Embed` objects. **Specifying both parameters will lead to an exception**. Parameters ------------ content: Optional[:class:`str`] The content of the message to send. tts: :class:`bool` Indicates if the message should be sent using text-to-speech. embed: :class:`~discord.Embed` The rich embed for the content. file: :class:`~discord.File` The file to upload. files: List[:class:`~discord.File`] A list of files to upload. Must be a maximum of 10. nonce: :class:`int` The nonce to use for sending this message. If the message was successfully sent, then the message will have a nonce with this value. delete_after: :class:`float` If provided, the number of seconds to wait in the background before deleting the message we just sent. If the deletion fails, then it is silently ignored. allowed_mentions: :class:`~discord.AllowedMentions` Controls the mentions being processed in this message. If this is passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`. The merging behaviour only overrides attributes that have been explicitly passed to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`. If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions` are used instead. .. versionadded:: 1.4 reference: Union[:class:`~discord.Message`, :class:`~discord.MessageReference`, :class:`~discord.PartialMessage`] A reference to the :class:`~discord.Message` to which you are replying, this can be created using :meth:`~discord.Message.to_reference` or passed directly as a :class:`~discord.Message`. You can control whether this mentions the author of the referenced message using the :attr:`~discord.AllowedMentions.replied_user` attribute of ``allowed_mentions`` or by setting ``mention_author``. .. versionadded:: 1.6 mention_author: Optional[:class:`bool`] If set, overrides the :attr:`~discord.AllowedMentions.replied_user` attribute of ``allowed_mentions``. .. versionadded:: 1.6 view: :class:`discord.ui.View` A Discord UI View to add to the message. embeds: List[:class:`~discord.Embed`] A list of embeds to upload. Must be a maximum of 10. .. versionadded:: 2.0 stickers: Sequence[Union[:class:`~discord.GuildSticker`, :class:`~discord.StickerItem`]] A list of stickers to upload. Must be a maximum of 3. .. versionadded:: 2.0 Raises -------- ~discord.HTTPException Sending the message failed. ~discord.Forbidden You do not have the proper permissions to send the message. ~discord.InvalidArgument The ``files`` list is not of the appropriate size, you specified both ``file`` and ``files``, or you specified both ``embed`` and ``embeds``, or the ``reference`` object is not a :class:`~discord.Message`, :class:`~discord.MessageReference` or :class:`~discord.PartialMessage`. Returns --------- :class:`~discord.Message` The message that was sent. """ channel = await self._get_channel() state = self._state content = str(content) if content is not None else None if embed is not None and embeds is not None: raise InvalidArgument('cannot pass both embed and embeds parameter to send()') if embed is not None: embed = embed.to_dict() elif embeds is not None: if len(embeds) > 10: raise InvalidArgument('embeds parameter must be a list of up to 10 elements') embeds = [embed.to_dict() for embed in embeds] if stickers is not None: stickers = [sticker.id for sticker in stickers] if allowed_mentions is not None: if state.allowed_mentions is not None: allowed_mentions = state.allowed_mentions.merge(allowed_mentions).to_dict() else: allowed_mentions = allowed_mentions.to_dict() else: allowed_mentions = state.allowed_mentions and state.allowed_mentions.to_dict() if mention_author is not None: allowed_mentions = allowed_mentions or AllowedMentions().to_dict() allowed_mentions['replied_user'] = bool(mention_author) if reference is not None: try: reference = reference.to_message_reference_dict() except AttributeError: raise InvalidArgument('reference parameter must be Message, MessageReference, or PartialMessage') from None if view: if not hasattr(view, '__discord_ui_view__'): raise InvalidArgument(f'view parameter must be View not {view.__class__!r}') components = view.to_components() else: components = None if file is not None and files is not None: raise InvalidArgument('cannot pass both file and files parameter to send()') if file is not None: if not isinstance(file, File): raise InvalidArgument('file parameter must be File') try: data = await state.http.send_files( channel.id, files=[file], allowed_mentions=allowed_mentions, content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, message_reference=reference, stickers=stickers, components=components, ) finally: file.close() elif files is not None: if len(files) > 10: raise InvalidArgument('files parameter must be a list of up to 10 elements') elif not all(isinstance(file, File) for file in files): raise InvalidArgument('files parameter must be a list of File') try: data = await state.http.send_files( channel.id, files=files, content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, message_reference=reference, stickers=stickers, components=components, ) finally: for f in files: f.close() else: data = await state.http.send_message( channel.id, content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, message_reference=reference, stickers=stickers, components=components, ) ret = state.create_message(channel=channel, data=data) if view: state.store_view(view, ret.id) if delete_after is not None: await ret.delete(delay=delete_after) return ret async def trigger_typing(self) -> None: """|coro| Triggers a *typing* indicator to the destination. *Typing* indicator will go away after 10 seconds, or after a message is sent. """ channel = await self._get_channel() await self._state.http.send_typing(channel.id) def typing(self) -> Typing: """Returns a context manager that allows you to type for an indefinite period of time. This is useful for denoting long computations in your bot. .. note:: This is both a regular context manager and an async context manager. This means that both ``with`` and ``async with`` work with this. Example Usage: :: async with channel.typing(): # simulate something heavy await asyncio.sleep(10) await channel.send('done!') """ return Typing(self) async def fetch_message(self, id: int, /) -> Message: """|coro| Retrieves a single :class:`~discord.Message` from the destination. Parameters ------------ id: :class:`int` The message ID to look for. Raises -------- ~discord.NotFound The specified message was not found. ~discord.Forbidden You do not have the permissions required to get a message. ~discord.HTTPException Retrieving the message failed. Returns -------- :class:`~discord.Message` The message asked for. """ channel = await self._get_channel() data = await self._state.http.get_message(channel.id, id) return self._state.create_message(channel=channel, data=data) async def pins(self) -> List[Message]: """|coro| Retrieves all messages that are currently pinned in the channel. .. note:: Due to a limitation with the Discord API, the :class:`.Message` objects returned by this method do not contain complete :attr:`.Message.reactions` data. Raises ------- ~discord.HTTPException Retrieving the pinned messages failed. Returns -------- List[:class:`~discord.Message`] The messages that are currently pinned. """ channel = await self._get_channel() state = self._state data = await state.http.pins_from(channel.id) return [state.create_message(channel=channel, data=m) for m in data] def history( self, *, limit: Optional[int] = 100, before: Optional[SnowflakeTime] = None, after: Optional[SnowflakeTime] = None, around: Optional[SnowflakeTime] = None, oldest_first: Optional[bool] = None, ) -> HistoryIterator: """Returns an :class:`~discord.AsyncIterator` that enables receiving the destination's message history. You must have :attr:`~discord.Permissions.read_message_history` permissions to use this. Examples --------- Usage :: counter = 0 async for message in channel.history(limit=200): if message.author == client.user: counter += 1 Flattening into a list: :: messages = await channel.history(limit=123).flatten() # messages is now a list of Message... All parameters are optional. Parameters ----------- limit: Optional[:class:`int`] The number of messages to retrieve. If ``None``, retrieves every message in the channel. Note, however, that this would make it a slow operation. before: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]] Retrieve messages before this date or message. If a datetime is provided, it is recommended to use a UTC aware datetime. If the datetime is naive, it is assumed to be local time. after: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]] Retrieve messages after this date or message. If a datetime is provided, it is recommended to use a UTC aware datetime. If the datetime is naive, it is assumed to be local time. around: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]] Retrieve messages around this date or message. If a datetime is provided, it is recommended to use a UTC aware datetime. If the datetime is naive, it is assumed to be local time. When using this argument, the maximum limit is 101. Note that if the limit is an even number then this will return at most limit + 1 messages. oldest_first: Optional[:class:`bool`] If set to ``True``, return messages in oldest->newest order. Defaults to ``True`` if ``after`` is specified, otherwise ``False``. Raises ------ ~discord.Forbidden You do not have permissions to get channel message history. ~discord.HTTPException The request to get message history failed. Yields ------- :class:`~discord.Message` The message with the message data parsed. """ return HistoryIterator(self, limit=limit, before=before, after=after, around=around, oldest_first=oldest_first) class Connectable(Protocol): """An ABC that details the common operations on a channel that can connect to a voice server. The following implement this ABC: - :class:`~discord.VoiceChannel` - :class:`~discord.StageChannel` Note ---- This ABC is not decorated with :func:`typing.runtime_checkable`, so will fail :func:`isinstance`/:func:`issubclass` checks. """ __slots__ = () _state: ConnectionState def _get_voice_client_key(self) -> Tuple[int, str]: raise NotImplementedError def _get_voice_state_pair(self) -> Tuple[int, int]: raise NotImplementedError async def connect( self, *, timeout: float = 60.0, reconnect: bool = True, cls: Callable[[Client, Connectable], T] = VoiceClient, ) -> T: """|coro| Connects to voice and creates a :class:`VoiceClient` to establish your connection to the voice server. This requires :attr:`Intents.voice_states`. Parameters ----------- timeout: :class:`float` The timeout in seconds to wait for the voice endpoint. reconnect: :class:`bool` Whether the bot should automatically attempt a reconnect if a part of the handshake fails or the gateway goes down. cls: Type[:class:`VoiceProtocol`] A type that subclasses :class:`~discord.VoiceProtocol` to connect with. Defaults to :class:`~discord.VoiceClient`. Raises ------- asyncio.TimeoutError Could not connect to the voice channel in time. ~discord.ClientException You are already connected to a voice channel. ~discord.opus.OpusNotLoaded The opus library has not been loaded. Returns -------- :class:`~discord.VoiceProtocol` A voice client that is fully connected to the voice server. """ key_id, _ = self._get_voice_client_key() state = self._state if state._get_voice_client(key_id): raise ClientException('Already connected to a voice channel.') client = state._get_client() voice = cls(client, self) if not isinstance(voice, VoiceProtocol): raise TypeError('Type must meet VoiceProtocol abstract base class.') state._add_voice_client(key_id, voice) try: await voice.connect(timeout=timeout, reconnect=reconnect) except asyncio.TimeoutError: try: await voice.disconnect(force=True) except Exception: # we don't care if disconnect failed because connection failed pass raise # re-raise return voice
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/abc.py
abc.py
from __future__ import annotations import asyncio import json import logging import sys from typing import ( Any, ClassVar, Coroutine, Dict, Iterable, List, Optional, Sequence, TYPE_CHECKING, Tuple, Type, TypeVar, Union, ) from urllib.parse import quote as _uriquote import weakref import aiohttp from .errors import HTTPException, Forbidden, NotFound, LoginFailure, DiscordServerError, GatewayNotFound, InvalidArgument from .gateway import DiscordClientWebSocketResponse from . import __version__, utils from .utils import MISSING _log = logging.getLogger(__name__) if TYPE_CHECKING: from .file import File from .enums import ( AuditLogAction, InteractionResponseType, ) from .types import ( appinfo, audit_log, channel, components, emoji, embed, guild, integration, interactions, invite, member, message, template, role, user, webhook, channel, widget, threads, voice, sticker, ) from .types.snowflake import Snowflake, SnowflakeList from types import TracebackType T = TypeVar('T') BE = TypeVar('BE', bound=BaseException) MU = TypeVar('MU', bound='MaybeUnlock') Response = Coroutine[Any, Any, T] async def json_or_text(response: aiohttp.ClientResponse) -> Union[Dict[str, Any], str]: text = await response.text(encoding='utf-8') try: if response.headers['content-type'] == 'application/json': return utils._from_json(text) except KeyError: # Thanks Cloudflare pass return text class Route: BASE: ClassVar[str] = 'https://discord.com/api/v8' def __init__(self, method: str, path: str, **parameters: Any) -> None: self.path: str = path self.method: str = method url = self.BASE + self.path if parameters: url = url.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()}) self.url: str = url # major parameters: self.channel_id: Optional[Snowflake] = parameters.get('channel_id') self.guild_id: Optional[Snowflake] = parameters.get('guild_id') self.webhook_id: Optional[Snowflake] = parameters.get('webhook_id') self.webhook_token: Optional[str] = parameters.get('webhook_token') @property def bucket(self) -> str: # the bucket is just method + path w/ major parameters return f'{self.channel_id}:{self.guild_id}:{self.path}' class MaybeUnlock: def __init__(self, lock: asyncio.Lock) -> None: self.lock: asyncio.Lock = lock self._unlock: bool = True def __enter__(self: MU) -> MU: return self def defer(self) -> None: self._unlock = False def __exit__( self, exc_type: Optional[Type[BE]], exc: Optional[BE], traceback: Optional[TracebackType], ) -> None: if self._unlock: self.lock.release() # For some reason, the Discord voice websocket expects this header to be # completely lowercase while aiohttp respects spec and does it as case-insensitive aiohttp.hdrs.WEBSOCKET = 'websocket' # type: ignore class HTTPClient: """Represents an HTTP client sending HTTP requests to the Discord API.""" def __init__( self, connector: Optional[aiohttp.BaseConnector] = None, *, proxy: Optional[str] = None, proxy_auth: Optional[aiohttp.BasicAuth] = None, loop: Optional[asyncio.AbstractEventLoop] = None, unsync_clock: bool = True, ) -> None: self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop self.connector = connector self.__session: aiohttp.ClientSession = MISSING # filled in static_login self._locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary() self._global_over: asyncio.Event = asyncio.Event() self._global_over.set() self.token: Optional[str] = None self.bot_token: bool = False self.proxy: Optional[str] = proxy self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth self.use_clock: bool = not unsync_clock user_agent = 'DiscordBot (https://github.com/Rapptz/discord.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}' self.user_agent: str = user_agent.format(__version__, sys.version_info, aiohttp.__version__) def recreate(self) -> None: if self.__session.closed: self.__session = aiohttp.ClientSession( connector=self.connector, ws_response_class=DiscordClientWebSocketResponse ) async def ws_connect(self, url: str, *, compress: int = 0) -> Any: kwargs = { 'proxy_auth': self.proxy_auth, 'proxy': self.proxy, 'max_msg_size': 0, 'timeout': 30.0, 'autoclose': False, 'headers': { 'User-Agent': self.user_agent, }, 'compress': compress, } return await self.__session.ws_connect(url, **kwargs) async def request( self, route: Route, *, files: Optional[Sequence[File]] = None, form: Optional[Iterable[Dict[str, Any]]] = None, **kwargs: Any, ) -> Any: bucket = route.bucket method = route.method url = route.url lock = self._locks.get(bucket) if lock is None: lock = asyncio.Lock() if bucket is not None: self._locks[bucket] = lock # header creation headers: Dict[str, str] = { 'User-Agent': self.user_agent, } if self.token is not None: headers['Authorization'] = 'Bot ' + self.token # some checking if it's a JSON request if 'json' in kwargs: headers['Content-Type'] = 'application/json' kwargs['data'] = utils._to_json(kwargs.pop('json')) try: reason = kwargs.pop('reason') except KeyError: pass else: if reason: headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ') kwargs['headers'] = headers # Proxy support if self.proxy is not None: kwargs['proxy'] = self.proxy if self.proxy_auth is not None: kwargs['proxy_auth'] = self.proxy_auth if not self._global_over.is_set(): # wait until the global lock is complete await self._global_over.wait() response: Optional[aiohttp.ClientResponse] = None data: Optional[Union[Dict[str, Any], str]] = None await lock.acquire() with MaybeUnlock(lock) as maybe_lock: for tries in range(5): if files: for f in files: f.reset(seek=tries) if form: form_data = aiohttp.FormData() for params in form: form_data.add_field(**params) kwargs['data'] = form_data try: async with self.__session.request(method, url, **kwargs) as response: _log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), response.status) # even errors have text involved in them so this is safe to call data = await json_or_text(response) # check if we have rate limit header information remaining = response.headers.get('X-Ratelimit-Remaining') if remaining == '0' and response.status != 429: # we've depleted our current bucket delta = utils._parse_ratelimit_header(response, use_clock=self.use_clock) _log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta) maybe_lock.defer() self.loop.call_later(delta, lock.release) # the request was successful so just return the text/json if 300 > response.status >= 200: _log.debug('%s %s has received %s', method, url, data) return data # we are being rate limited if response.status == 429: if not response.headers.get('Via') or isinstance(data, str): # Banned by Cloudflare more than likely. raise HTTPException(response, data) fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s"' # sleep a bit retry_after: float = data['retry_after'] _log.warning(fmt, retry_after, bucket) # check if it's a global rate limit is_global = data.get('global', False) if is_global: _log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after) self._global_over.clear() await asyncio.sleep(retry_after) _log.debug('Done sleeping for the rate limit. Retrying...') # release the global lock now that the # global rate limit has passed if is_global: self._global_over.set() _log.debug('Global rate limit is now over.') continue # we've received a 500, 502, or 504, unconditional retry if response.status in {500, 502, 504}: await asyncio.sleep(1 + tries * 2) continue # the usual error cases if response.status == 403: raise Forbidden(response, data) elif response.status == 404: raise NotFound(response, data) elif response.status >= 500: raise DiscordServerError(response, data) else: raise HTTPException(response, data) # This is handling exceptions from the request except OSError as e: # Connection reset by peer if tries < 4 and e.errno in (54, 10054): await asyncio.sleep(1 + tries * 2) continue raise if response is not None: # We've run out of retries, raise. if response.status >= 500: raise DiscordServerError(response, data) raise HTTPException(response, data) raise RuntimeError('Unreachable code in HTTP handling') async def get_from_cdn(self, url: str) -> bytes: async with self.__session.get(url) as resp: if resp.status == 200: return await resp.read() elif resp.status == 404: raise NotFound(resp, 'asset not found') elif resp.status == 403: raise Forbidden(resp, 'cannot retrieve asset') else: raise HTTPException(resp, 'failed to get asset') # state management async def close(self) -> None: if self.__session: await self.__session.close() # login management async def static_login(self, token: str) -> user.User: # Necessary to get aiohttp to stop complaining about session creation self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=DiscordClientWebSocketResponse) old_token = self.token self.token = token try: data = await self.request(Route('GET', '/users/@me')) except HTTPException as exc: self.token = old_token if exc.status == 401: raise LoginFailure('Improper token has been passed.') from exc raise return data def logout(self) -> Response[None]: return self.request(Route('POST', '/auth/logout')) # Group functionality def start_group(self, user_id: Snowflake, recipients: List[int]) -> Response[channel.GroupDMChannel]: payload = { 'recipients': recipients, } return self.request(Route('POST', '/users/{user_id}/channels', user_id=user_id), json=payload) def leave_group(self, channel_id) -> Response[None]: return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id)) # Message management def start_private_message(self, user_id: Snowflake) -> Response[channel.DMChannel]: payload = { 'recipient_id': user_id, } return self.request(Route('POST', '/users/@me/channels'), json=payload) def send_message( self, channel_id: Snowflake, content: Optional[str], *, tts: bool = False, embed: Optional[embed.Embed] = None, embeds: Optional[List[embed.Embed]] = None, nonce: Optional[str] = None, allowed_mentions: Optional[message.AllowedMentions] = None, message_reference: Optional[message.MessageReference] = None, stickers: Optional[List[sticker.StickerItem]] = None, components: Optional[List[components.Component]] = None, ) -> Response[message.Message]: r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id) payload = {} if content: payload['content'] = content if tts: payload['tts'] = True if embed: payload['embeds'] = [embed] if embeds: payload['embeds'] = embeds if nonce: payload['nonce'] = nonce if allowed_mentions: payload['allowed_mentions'] = allowed_mentions if message_reference: payload['message_reference'] = message_reference if components: payload['components'] = components if stickers: payload['sticker_ids'] = stickers return self.request(r, json=payload) def send_typing(self, channel_id: Snowflake) -> Response[None]: return self.request(Route('POST', '/channels/{channel_id}/typing', channel_id=channel_id)) def send_multipart_helper( self, route: Route, *, files: Sequence[File], content: Optional[str] = None, tts: bool = False, embed: Optional[embed.Embed] = None, embeds: Optional[Iterable[Optional[embed.Embed]]] = None, nonce: Optional[str] = None, allowed_mentions: Optional[message.AllowedMentions] = None, message_reference: Optional[message.MessageReference] = None, stickers: Optional[List[sticker.StickerItem]] = None, components: Optional[List[components.Component]] = None, ) -> Response[message.Message]: form = [] payload: Dict[str, Any] = {'tts': tts} if content: payload['content'] = content if embed: payload['embeds'] = [embed] if embeds: payload['embeds'] = embeds if nonce: payload['nonce'] = nonce if allowed_mentions: payload['allowed_mentions'] = allowed_mentions if message_reference: payload['message_reference'] = message_reference if components: payload['components'] = components if stickers: payload['sticker_ids'] = stickers form.append({'name': 'payload_json', 'value': utils._to_json(payload)}) if len(files) == 1: file = files[0] form.append( { 'name': 'file', 'value': file.fp, 'filename': file.filename, 'content_type': 'application/octet-stream', } ) else: for index, file in enumerate(files): form.append( { 'name': f'file{index}', 'value': file.fp, 'filename': file.filename, 'content_type': 'application/octet-stream', } ) return self.request(route, form=form, files=files) def send_files( self, channel_id: Snowflake, *, files: Sequence[File], content: Optional[str] = None, tts: bool = False, embed: Optional[embed.Embed] = None, embeds: Optional[List[embed.Embed]] = None, nonce: Optional[str] = None, allowed_mentions: Optional[message.AllowedMentions] = None, message_reference: Optional[message.MessageReference] = None, stickers: Optional[List[sticker.StickerItem]] = None, components: Optional[List[components.Component]] = None, ) -> Response[message.Message]: r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id) return self.send_multipart_helper( r, files=files, content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, message_reference=message_reference, stickers=stickers, components=components, ) def delete_message( self, channel_id: Snowflake, message_id: Snowflake, *, reason: Optional[str] = None ) -> Response[None]: r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id) return self.request(r, reason=reason) def delete_messages( self, channel_id: Snowflake, message_ids: SnowflakeList, *, reason: Optional[str] = None ) -> Response[None]: r = Route('POST', '/channels/{channel_id}/messages/bulk-delete', channel_id=channel_id) payload = { 'messages': message_ids, } return self.request(r, json=payload, reason=reason) def edit_message(self, channel_id: Snowflake, message_id: Snowflake, **fields: Any) -> Response[message.Message]: r = Route('PATCH', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id) return self.request(r, json=fields) def add_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]: r = Route( 'PUT', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me', channel_id=channel_id, message_id=message_id, emoji=emoji, ) return self.request(r) def remove_reaction( self, channel_id: Snowflake, message_id: Snowflake, emoji: str, member_id: Snowflake ) -> Response[None]: r = Route( 'DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}', channel_id=channel_id, message_id=message_id, member_id=member_id, emoji=emoji, ) return self.request(r) def remove_own_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]: r = Route( 'DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me', channel_id=channel_id, message_id=message_id, emoji=emoji, ) return self.request(r) def get_reaction_users( self, channel_id: Snowflake, message_id: Snowflake, emoji: str, limit: int, after: Optional[Snowflake] = None, ) -> Response[List[user.User]]: r = Route( 'GET', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}', channel_id=channel_id, message_id=message_id, emoji=emoji, ) params: Dict[str, Any] = { 'limit': limit, } if after: params['after'] = after return self.request(r, params=params) def clear_reactions(self, channel_id: Snowflake, message_id: Snowflake) -> Response[None]: r = Route( 'DELETE', '/channels/{channel_id}/messages/{message_id}/reactions', channel_id=channel_id, message_id=message_id, ) return self.request(r) def clear_single_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]: r = Route( 'DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}', channel_id=channel_id, message_id=message_id, emoji=emoji, ) return self.request(r) def get_message(self, channel_id: Snowflake, message_id: Snowflake) -> Response[message.Message]: r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id) return self.request(r) def get_channel(self, channel_id: Snowflake) -> Response[channel.Channel]: r = Route('GET', '/channels/{channel_id}', channel_id=channel_id) return self.request(r) def logs_from( self, channel_id: Snowflake, limit: int, before: Optional[Snowflake] = None, after: Optional[Snowflake] = None, around: Optional[Snowflake] = None, ) -> Response[List[message.Message]]: params: Dict[str, Any] = { 'limit': limit, } if before is not None: params['before'] = before if after is not None: params['after'] = after if around is not None: params['around'] = around return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params) def publish_message(self, channel_id: Snowflake, message_id: Snowflake) -> Response[message.Message]: return self.request( Route( 'POST', '/channels/{channel_id}/messages/{message_id}/crosspost', channel_id=channel_id, message_id=message_id, ) ) def pin_message(self, channel_id: Snowflake, message_id: Snowflake, reason: Optional[str] = None) -> Response[None]: r = Route( 'PUT', '/channels/{channel_id}/pins/{message_id}', channel_id=channel_id, message_id=message_id, ) return self.request(r, reason=reason) def unpin_message(self, channel_id: Snowflake, message_id: Snowflake, reason: Optional[str] = None) -> Response[None]: r = Route( 'DELETE', '/channels/{channel_id}/pins/{message_id}', channel_id=channel_id, message_id=message_id, ) return self.request(r, reason=reason) def pins_from(self, channel_id: Snowflake) -> Response[List[message.Message]]: return self.request(Route('GET', '/channels/{channel_id}/pins', channel_id=channel_id)) # Member management def kick(self, user_id: Snowflake, guild_id: Snowflake, reason: Optional[str] = None) -> Response[None]: r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id) if reason: # thanks aiohttp r.url = f'{r.url}?reason={_uriquote(reason)}' return self.request(r) def ban( self, user_id: Snowflake, guild_id: Snowflake, delete_message_days: int = 1, reason: Optional[str] = None, ) -> Response[None]: r = Route('PUT', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id) params = { 'delete_message_days': delete_message_days, } return self.request(r, params=params, reason=reason) def unban(self, user_id: Snowflake, guild_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]: r = Route('DELETE', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id) return self.request(r, reason=reason) def guild_voice_state( self, user_id: Snowflake, guild_id: Snowflake, *, mute: Optional[bool] = None, deafen: Optional[bool] = None, reason: Optional[str] = None, ) -> Response[member.Member]: r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id) payload = {} if mute is not None: payload['mute'] = mute if deafen is not None: payload['deaf'] = deafen return self.request(r, json=payload, reason=reason) def edit_profile(self, payload: Dict[str, Any]) -> Response[user.User]: return self.request(Route('PATCH', '/users/@me'), json=payload) def change_my_nickname( self, guild_id: Snowflake, nickname: str, *, reason: Optional[str] = None, ) -> Response[member.Nickname]: r = Route('PATCH', '/guilds/{guild_id}/members/@me/nick', guild_id=guild_id) payload = { 'nick': nickname, } return self.request(r, json=payload, reason=reason) def change_nickname( self, guild_id: Snowflake, user_id: Snowflake, nickname: str, *, reason: Optional[str] = None, ) -> Response[member.Member]: r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id) payload = { 'nick': nickname, } return self.request(r, json=payload, reason=reason) def edit_my_voice_state(self, guild_id: Snowflake, payload: Dict[str, Any]) -> Response[None]: r = Route('PATCH', '/guilds/{guild_id}/voice-states/@me', guild_id=guild_id) return self.request(r, json=payload) def edit_voice_state(self, guild_id: Snowflake, user_id: Snowflake, payload: Dict[str, Any]) -> Response[None]: r = Route('PATCH', '/guilds/{guild_id}/voice-states/{user_id}', guild_id=guild_id, user_id=user_id) return self.request(r, json=payload) def edit_member( self, guild_id: Snowflake, user_id: Snowflake, *, reason: Optional[str] = None, **fields: Any, ) -> Response[member.MemberWithUser]: r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id) return self.request(r, json=fields, reason=reason) # Channel management def edit_channel( self, channel_id: Snowflake, *, reason: Optional[str] = None, **options: Any, ) -> Response[channel.Channel]: r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id) valid_keys = ( 'name', 'parent_id', 'topic', 'bitrate', 'nsfw', 'user_limit', 'position', 'permission_overwrites', 'rate_limit_per_user', 'type', 'rtc_region', 'video_quality_mode', 'archived', 'auto_archive_duration', 'locked', 'invitable', 'default_auto_archive_duration', ) payload = {k: v for k, v in options.items() if k in valid_keys} return self.request(r, reason=reason, json=payload) def bulk_channel_update( self, guild_id: Snowflake, data: List[guild.ChannelPositionUpdate], *, reason: Optional[str] = None, ) -> Response[None]: r = Route('PATCH', '/guilds/{guild_id}/channels', guild_id=guild_id) return self.request(r, json=data, reason=reason) def create_channel( self, guild_id: Snowflake, channel_type: channel.ChannelType, *, reason: Optional[str] = None, **options: Any, ) -> Response[channel.GuildChannel]: payload = { 'type': channel_type, } valid_keys = ( 'name', 'parent_id', 'topic', 'bitrate', 'nsfw', 'user_limit', 'position', 'permission_overwrites', 'rate_limit_per_user', 'rtc_region', 'video_quality_mode', 'auto_archive_duration', ) payload.update({k: v for k, v in options.items() if k in valid_keys and v is not None}) return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload, reason=reason) def delete_channel( self, channel_id: Snowflake, *, reason: Optional[str] = None, ) -> Response[None]: return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason) # Thread management def start_thread_with_message( self, channel_id: Snowflake, message_id: Snowflake, *, name: str, auto_archive_duration: threads.ThreadArchiveDuration, reason: Optional[str] = None, ) -> Response[threads.Thread]: payload = { 'name': name, 'auto_archive_duration': auto_archive_duration, } route = Route( 'POST', '/channels/{channel_id}/messages/{message_id}/threads', channel_id=channel_id, message_id=message_id ) return self.request(route, json=payload, reason=reason) def start_thread_without_message( self, channel_id: Snowflake, *, name: str, auto_archive_duration: threads.ThreadArchiveDuration, type: threads.ThreadType, invitable: bool = True, reason: Optional[str] = None, ) -> Response[threads.Thread]: payload = { 'name': name, 'auto_archive_duration': auto_archive_duration, 'type': type, 'invitable': invitable, } route = Route('POST', '/channels/{channel_id}/threads', channel_id=channel_id) return self.request(route, json=payload, reason=reason) def join_thread(self, channel_id: Snowflake) -> Response[None]: return self.request(Route('POST', '/channels/{channel_id}/thread-members/@me', channel_id=channel_id)) def add_user_to_thread(self, channel_id: Snowflake, user_id: Snowflake) -> Response[None]: return self.request( Route('PUT', '/channels/{channel_id}/thread-members/{user_id}', channel_id=channel_id, user_id=user_id) ) def leave_thread(self, channel_id: Snowflake) -> Response[None]: return self.request(Route('DELETE', '/channels/{channel_id}/thread-members/@me', channel_id=channel_id)) def remove_user_from_thread(self, channel_id: Snowflake, user_id: Snowflake) -> Response[None]: route = Route('DELETE', '/channels/{channel_id}/thread-members/{user_id}', channel_id=channel_id, user_id=user_id) return self.request(route) def get_public_archived_threads( self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50 ) -> Response[threads.ThreadPaginationPayload]: route = Route('GET', '/channels/{channel_id}/threads/archived/public', channel_id=channel_id) params = {} if before: params['before'] = before params['limit'] = limit return self.request(route, params=params) def get_private_archived_threads( self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50 ) -> Response[threads.ThreadPaginationPayload]: route = Route('GET', '/channels/{channel_id}/threads/archived/private', channel_id=channel_id) params = {} if before: params['before'] = before params['limit'] = limit return self.request(route, params=params) def get_joined_private_archived_threads( self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50 ) -> Response[threads.ThreadPaginationPayload]: route = Route('GET', '/channels/{channel_id}/users/@me/threads/archived/private', channel_id=channel_id) params = {} if before: params['before'] = before params['limit'] = limit return self.request(route, params=params) def get_active_threads(self, guild_id: Snowflake) -> Response[threads.ThreadPaginationPayload]: route = Route('GET', '/guilds/{guild_id}/threads/active', guild_id=guild_id) return self.request(route) def get_thread_members(self, channel_id: Snowflake) -> Response[List[threads.ThreadMember]]: route = Route('GET', '/channels/{channel_id}/thread-members', channel_id=channel_id) return self.request(route) # Webhook management def create_webhook( self, channel_id: Snowflake, *, name: str, avatar: Optional[bytes] = None, reason: Optional[str] = None, ) -> Response[webhook.Webhook]: payload: Dict[str, Any] = { 'name': name, } if avatar is not None: payload['avatar'] = avatar r = Route('POST', '/channels/{channel_id}/webhooks', channel_id=channel_id) return self.request(r, json=payload, reason=reason) def channel_webhooks(self, channel_id: Snowflake) -> Response[List[webhook.Webhook]]: return self.request(Route('GET', '/channels/{channel_id}/webhooks', channel_id=channel_id)) def guild_webhooks(self, guild_id: Snowflake) -> Response[List[webhook.Webhook]]: return self.request(Route('GET', '/guilds/{guild_id}/webhooks', guild_id=guild_id)) def get_webhook(self, webhook_id: Snowflake) -> Response[webhook.Webhook]: return self.request(Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id)) def follow_webhook( self, channel_id: Snowflake, webhook_channel_id: Snowflake, reason: Optional[str] = None, ) -> Response[None]: payload = { 'webhook_channel_id': str(webhook_channel_id), } return self.request( Route('POST', '/channels/{channel_id}/followers', channel_id=channel_id), json=payload, reason=reason ) # Guild management def get_guilds( self, limit: int, before: Optional[Snowflake] = None, after: Optional[Snowflake] = None, ) -> Response[List[guild.Guild]]: params: Dict[str, Any] = { 'limit': limit, } if before: params['before'] = before if after: params['after'] = after return self.request(Route('GET', '/users/@me/guilds'), params=params) def leave_guild(self, guild_id: Snowflake) -> Response[None]: return self.request(Route('DELETE', '/users/@me/guilds/{guild_id}', guild_id=guild_id)) def get_guild(self, guild_id: Snowflake) -> Response[guild.Guild]: return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id)) def delete_guild(self, guild_id: Snowflake) -> Response[None]: return self.request(Route('DELETE', '/guilds/{guild_id}', guild_id=guild_id)) def create_guild(self, name: str, region: str, icon: Optional[str]) -> Response[guild.Guild]: payload = { 'name': name, 'region': region, } if icon: payload['icon'] = icon return self.request(Route('POST', '/guilds'), json=payload) def edit_guild(self, guild_id: Snowflake, *, reason: Optional[str] = None, **fields: Any) -> Response[guild.Guild]: valid_keys = ( 'name', 'region', 'icon', 'afk_timeout', 'owner_id', 'afk_channel_id', 'splash', 'discovery_splash', 'features', 'verification_level', 'system_channel_id', 'default_message_notifications', 'description', 'explicit_content_filter', 'banner', 'system_channel_flags', 'rules_channel_id', 'public_updates_channel_id', 'preferred_locale', ) payload = {k: v for k, v in fields.items() if k in valid_keys} return self.request(Route('PATCH', '/guilds/{guild_id}', guild_id=guild_id), json=payload, reason=reason) def get_template(self, code: str) -> Response[template.Template]: return self.request(Route('GET', '/guilds/templates/{code}', code=code)) def guild_templates(self, guild_id: Snowflake) -> Response[List[template.Template]]: return self.request(Route('GET', '/guilds/{guild_id}/templates', guild_id=guild_id)) def create_template(self, guild_id: Snowflake, payload: template.CreateTemplate) -> Response[template.Template]: return self.request(Route('POST', '/guilds/{guild_id}/templates', guild_id=guild_id), json=payload) def sync_template(self, guild_id: Snowflake, code: str) -> Response[template.Template]: return self.request(Route('PUT', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code)) def edit_template(self, guild_id: Snowflake, code: str, payload) -> Response[template.Template]: valid_keys = ( 'name', 'description', ) payload = {k: v for k, v in payload.items() if k in valid_keys} return self.request( Route('PATCH', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code), json=payload ) def delete_template(self, guild_id: Snowflake, code: str) -> Response[None]: return self.request(Route('DELETE', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code)) def create_from_template(self, code: str, name: str, region: str, icon: Optional[str]) -> Response[guild.Guild]: payload = { 'name': name, 'region': region, } if icon: payload['icon'] = icon return self.request(Route('POST', '/guilds/templates/{code}', code=code), json=payload) def get_bans(self, guild_id: Snowflake) -> Response[List[guild.Ban]]: return self.request(Route('GET', '/guilds/{guild_id}/bans', guild_id=guild_id)) def get_ban(self, user_id: Snowflake, guild_id: Snowflake) -> Response[guild.Ban]: return self.request(Route('GET', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)) def get_vanity_code(self, guild_id: Snowflake) -> Response[invite.VanityInvite]: return self.request(Route('GET', '/guilds/{guild_id}/vanity-url', guild_id=guild_id)) def change_vanity_code(self, guild_id: Snowflake, code: str, *, reason: Optional[str] = None) -> Response[None]: payload: Dict[str, Any] = {'code': code} return self.request(Route('PATCH', '/guilds/{guild_id}/vanity-url', guild_id=guild_id), json=payload, reason=reason) def get_all_guild_channels(self, guild_id: Snowflake) -> Response[List[guild.GuildChannel]]: return self.request(Route('GET', '/guilds/{guild_id}/channels', guild_id=guild_id)) def get_members( self, guild_id: Snowflake, limit: int, after: Optional[Snowflake] ) -> Response[List[member.MemberWithUser]]: params: Dict[str, Any] = { 'limit': limit, } if after: params['after'] = after r = Route('GET', '/guilds/{guild_id}/members', guild_id=guild_id) return self.request(r, params=params) def get_member(self, guild_id: Snowflake, member_id: Snowflake) -> Response[member.MemberWithUser]: return self.request(Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id)) def prune_members( self, guild_id: Snowflake, days: int, compute_prune_count: bool, roles: List[str], *, reason: Optional[str] = None, ) -> Response[guild.GuildPrune]: payload: Dict[str, Any] = { 'days': days, 'compute_prune_count': 'true' if compute_prune_count else 'false', } if roles: payload['include_roles'] = ', '.join(roles) return self.request(Route('POST', '/guilds/{guild_id}/prune', guild_id=guild_id), json=payload, reason=reason) def estimate_pruned_members( self, guild_id: Snowflake, days: int, roles: List[str], ) -> Response[guild.GuildPrune]: params: Dict[str, Any] = { 'days': days, } if roles: params['include_roles'] = ', '.join(roles) return self.request(Route('GET', '/guilds/{guild_id}/prune', guild_id=guild_id), params=params) def get_sticker(self, sticker_id: Snowflake) -> Response[sticker.Sticker]: return self.request(Route('GET', '/stickers/{sticker_id}', sticker_id=sticker_id)) def list_premium_sticker_packs(self) -> Response[sticker.ListPremiumStickerPacks]: return self.request(Route('GET', '/sticker-packs')) def get_all_guild_stickers(self, guild_id: Snowflake) -> Response[List[sticker.GuildSticker]]: return self.request(Route('GET', '/guilds/{guild_id}/stickers', guild_id=guild_id)) def get_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake) -> Response[sticker.GuildSticker]: return self.request( Route('GET', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id) ) def create_guild_sticker( self, guild_id: Snowflake, payload: sticker.CreateGuildSticker, file: File, reason: str ) -> Response[sticker.GuildSticker]: initial_bytes = file.fp.read(16) try: mime_type = utils._get_mime_type_for_image(initial_bytes) except InvalidArgument: if initial_bytes.startswith(b'{'): mime_type = 'application/json' else: mime_type = 'application/octet-stream' finally: file.reset() form: List[Dict[str, Any]] = [ { 'name': 'file', 'value': file.fp, 'filename': file.filename, 'content_type': mime_type, } ] for k, v in payload.items(): form.append( { 'name': k, 'value': v, } ) return self.request( Route('POST', '/guilds/{guild_id}/stickers', guild_id=guild_id), form=form, files=[file], reason=reason ) def modify_guild_sticker( self, guild_id: Snowflake, sticker_id: Snowflake, payload: sticker.EditGuildSticker, reason: Optional[str], ) -> Response[sticker.GuildSticker]: return self.request( Route('PATCH', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id), json=payload, reason=reason, ) def delete_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake, reason: Optional[str]) -> Response[None]: return self.request( Route('DELETE', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id), reason=reason, ) def get_all_custom_emojis(self, guild_id: Snowflake) -> Response[List[emoji.Emoji]]: return self.request(Route('GET', '/guilds/{guild_id}/emojis', guild_id=guild_id)) def get_custom_emoji(self, guild_id: Snowflake, emoji_id: Snowflake) -> Response[emoji.Emoji]: return self.request(Route('GET', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)) def create_custom_emoji( self, guild_id: Snowflake, name: str, image: bytes, *, roles: Optional[SnowflakeList] = None, reason: Optional[str] = None, ) -> Response[emoji.Emoji]: payload = { 'name': name, 'image': image, 'roles': roles or [], } r = Route('POST', '/guilds/{guild_id}/emojis', guild_id=guild_id) return self.request(r, json=payload, reason=reason) def delete_custom_emoji( self, guild_id: Snowflake, emoji_id: Snowflake, *, reason: Optional[str] = None, ) -> Response[None]: r = Route('DELETE', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id) return self.request(r, reason=reason) def edit_custom_emoji( self, guild_id: Snowflake, emoji_id: Snowflake, *, payload: Dict[str, Any], reason: Optional[str] = None, ) -> Response[emoji.Emoji]: r = Route('PATCH', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id) return self.request(r, json=payload, reason=reason) def get_all_integrations(self, guild_id: Snowflake) -> Response[List[integration.Integration]]: r = Route('GET', '/guilds/{guild_id}/integrations', guild_id=guild_id) return self.request(r) def create_integration(self, guild_id: Snowflake, type: integration.IntegrationType, id: int) -> Response[None]: payload = { 'type': type, 'id': id, } r = Route('POST', '/guilds/{guild_id}/integrations', guild_id=guild_id) return self.request(r, json=payload) def edit_integration(self, guild_id: Snowflake, integration_id: Snowflake, **payload: Any) -> Response[None]: r = Route( 'PATCH', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id, integration_id=integration_id ) return self.request(r, json=payload) def sync_integration(self, guild_id: Snowflake, integration_id: Snowflake) -> Response[None]: r = Route( 'POST', '/guilds/{guild_id}/integrations/{integration_id}/sync', guild_id=guild_id, integration_id=integration_id ) return self.request(r) def delete_integration( self, guild_id: Snowflake, integration_id: Snowflake, *, reason: Optional[str] = None ) -> Response[None]: r = Route( 'DELETE', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id, integration_id=integration_id ) return self.request(r, reason=reason) def get_audit_logs( self, guild_id: Snowflake, limit: int = 100, before: Optional[Snowflake] = None, after: Optional[Snowflake] = None, user_id: Optional[Snowflake] = None, action_type: Optional[AuditLogAction] = None, ) -> Response[audit_log.AuditLog]: params: Dict[str, Any] = {'limit': limit} if before: params['before'] = before if after: params['after'] = after if user_id: params['user_id'] = user_id if action_type: params['action_type'] = action_type r = Route('GET', '/guilds/{guild_id}/audit-logs', guild_id=guild_id) return self.request(r, params=params) def get_widget(self, guild_id: Snowflake) -> Response[widget.Widget]: return self.request(Route('GET', '/guilds/{guild_id}/widget.json', guild_id=guild_id)) def edit_widget(self, guild_id: Snowflake, payload) -> Response[widget.WidgetSettings]: return self.request(Route('PATCH', '/guilds/{guild_id}/widget', guild_id=guild_id), json=payload) # Invite management def create_invite( self, channel_id: Snowflake, *, reason: Optional[str] = None, max_age: int = 0, max_uses: int = 0, temporary: bool = False, unique: bool = True, target_type: Optional[invite.InviteTargetType] = None, target_user_id: Optional[Snowflake] = None, target_application_id: Optional[Snowflake] = None, ) -> Response[invite.Invite]: r = Route('POST', '/channels/{channel_id}/invites', channel_id=channel_id) payload = { 'max_age': max_age, 'max_uses': max_uses, 'temporary': temporary, 'unique': unique, } if target_type: payload['target_type'] = target_type if target_user_id: payload['target_user_id'] = target_user_id if target_application_id: payload['target_application_id'] = str(target_application_id) return self.request(r, reason=reason, json=payload) def get_invite( self, invite_id: str, *, with_counts: bool = True, with_expiration: bool = True ) -> Response[invite.Invite]: params = { 'with_counts': int(with_counts), 'with_expiration': int(with_expiration), } return self.request(Route('GET', '/invites/{invite_id}', invite_id=invite_id), params=params) def invites_from(self, guild_id: Snowflake) -> Response[List[invite.Invite]]: return self.request(Route('GET', '/guilds/{guild_id}/invites', guild_id=guild_id)) def invites_from_channel(self, channel_id: Snowflake) -> Response[List[invite.Invite]]: return self.request(Route('GET', '/channels/{channel_id}/invites', channel_id=channel_id)) def delete_invite(self, invite_id: str, *, reason: Optional[str] = None) -> Response[None]: return self.request(Route('DELETE', '/invites/{invite_id}', invite_id=invite_id), reason=reason) # Role management def get_roles(self, guild_id: Snowflake) -> Response[List[role.Role]]: return self.request(Route('GET', '/guilds/{guild_id}/roles', guild_id=guild_id)) def edit_role( self, guild_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None, **fields: Any ) -> Response[role.Role]: r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id) valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable') payload = {k: v for k, v in fields.items() if k in valid_keys} return self.request(r, json=payload, reason=reason) def delete_role(self, guild_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]: r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id) return self.request(r, reason=reason) def replace_roles( self, user_id: Snowflake, guild_id: Snowflake, role_ids: List[int], *, reason: Optional[str] = None, ) -> Response[member.MemberWithUser]: return self.edit_member(guild_id=guild_id, user_id=user_id, roles=role_ids, reason=reason) def create_role(self, guild_id: Snowflake, *, reason: Optional[str] = None, **fields: Any) -> Response[role.Role]: r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id) return self.request(r, json=fields, reason=reason) def move_role_position( self, guild_id: Snowflake, positions: List[guild.RolePositionUpdate], *, reason: Optional[str] = None, ) -> Response[List[role.Role]]: r = Route('PATCH', '/guilds/{guild_id}/roles', guild_id=guild_id) return self.request(r, json=positions, reason=reason) def add_role( self, guild_id: Snowflake, user_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None ) -> Response[None]: r = Route( 'PUT', '/guilds/{guild_id}/members/{user_id}/roles/{role_id}', guild_id=guild_id, user_id=user_id, role_id=role_id, ) return self.request(r, reason=reason) def remove_role( self, guild_id: Snowflake, user_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None ) -> Response[None]: r = Route( 'DELETE', '/guilds/{guild_id}/members/{user_id}/roles/{role_id}', guild_id=guild_id, user_id=user_id, role_id=role_id, ) return self.request(r, reason=reason) def edit_channel_permissions( self, channel_id: Snowflake, target: Snowflake, allow: str, deny: str, type: channel.OverwriteType, *, reason: Optional[str] = None, ) -> Response[None]: payload = {'id': target, 'allow': allow, 'deny': deny, 'type': type} r = Route('PUT', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target) return self.request(r, json=payload, reason=reason) def delete_channel_permissions( self, channel_id: Snowflake, target: channel.OverwriteType, *, reason: Optional[str] = None ) -> Response[None]: r = Route('DELETE', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target) return self.request(r, reason=reason) # Voice management def move_member( self, user_id: Snowflake, guild_id: Snowflake, channel_id: Snowflake, *, reason: Optional[str] = None, ) -> Response[member.MemberWithUser]: return self.edit_member(guild_id=guild_id, user_id=user_id, channel_id=channel_id, reason=reason) # Stage instance management def get_stage_instance(self, channel_id: Snowflake) -> Response[channel.StageInstance]: return self.request(Route('GET', '/stage-instances/{channel_id}', channel_id=channel_id)) def create_stage_instance(self, *, reason: Optional[str], **payload: Any) -> Response[channel.StageInstance]: valid_keys = ( 'channel_id', 'topic', 'privacy_level', ) payload = {k: v for k, v in payload.items() if k in valid_keys} return self.request(Route('POST', '/stage-instances'), json=payload, reason=reason) def edit_stage_instance(self, channel_id: Snowflake, *, reason: Optional[str] = None, **payload: Any) -> Response[None]: valid_keys = ( 'topic', 'privacy_level', ) payload = {k: v for k, v in payload.items() if k in valid_keys} return self.request( Route('PATCH', '/stage-instances/{channel_id}', channel_id=channel_id), json=payload, reason=reason ) def delete_stage_instance(self, channel_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]: return self.request(Route('DELETE', '/stage-instances/{channel_id}', channel_id=channel_id), reason=reason) # Application commands (global) def get_global_commands(self, application_id: Snowflake) -> Response[List[interactions.ApplicationCommand]]: return self.request(Route('GET', '/applications/{application_id}/commands', application_id=application_id)) def get_global_command( self, application_id: Snowflake, command_id: Snowflake ) -> Response[interactions.ApplicationCommand]: r = Route( 'GET', '/applications/{application_id}/commands/{command_id}', application_id=application_id, command_id=command_id, ) return self.request(r) def upsert_global_command(self, application_id: Snowflake, payload) -> Response[interactions.ApplicationCommand]: r = Route('POST', '/applications/{application_id}/commands', application_id=application_id) return self.request(r, json=payload) def edit_global_command( self, application_id: Snowflake, command_id: Snowflake, payload: interactions.EditApplicationCommand, ) -> Response[interactions.ApplicationCommand]: valid_keys = ( 'name', 'description', 'options', ) payload = {k: v for k, v in payload.items() if k in valid_keys} # type: ignore r = Route( 'PATCH', '/applications/{application_id}/commands/{command_id}', application_id=application_id, command_id=command_id, ) return self.request(r, json=payload) def delete_global_command(self, application_id: Snowflake, command_id: Snowflake) -> Response[None]: r = Route( 'DELETE', '/applications/{application_id}/commands/{command_id}', application_id=application_id, command_id=command_id, ) return self.request(r) def bulk_upsert_global_commands( self, application_id: Snowflake, payload ) -> Response[List[interactions.ApplicationCommand]]: r = Route('PUT', '/applications/{application_id}/commands', application_id=application_id) return self.request(r, json=payload) # Application commands (guild) def get_guild_commands( self, application_id: Snowflake, guild_id: Snowflake ) -> Response[List[interactions.ApplicationCommand]]: r = Route( 'GET', '/applications/{application_id}/guilds/{guild_id}/commands', application_id=application_id, guild_id=guild_id, ) return self.request(r) def get_guild_command( self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake, ) -> Response[interactions.ApplicationCommand]: r = Route( 'GET', '/applications/{application_id}/guilds/{guild_id}/commands/{command_id}', application_id=application_id, guild_id=guild_id, command_id=command_id, ) return self.request(r) def upsert_guild_command( self, application_id: Snowflake, guild_id: Snowflake, payload: interactions.EditApplicationCommand, ) -> Response[interactions.ApplicationCommand]: r = Route( 'POST', '/applications/{application_id}/guilds/{guild_id}/commands', application_id=application_id, guild_id=guild_id, ) return self.request(r, json=payload) def edit_guild_command( self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake, payload: interactions.EditApplicationCommand, ) -> Response[interactions.ApplicationCommand]: valid_keys = ( 'name', 'description', 'options', ) payload = {k: v for k, v in payload.items() if k in valid_keys} # type: ignore r = Route( 'PATCH', '/applications/{application_id}/guilds/{guild_id}/commands/{command_id}', application_id=application_id, guild_id=guild_id, command_id=command_id, ) return self.request(r, json=payload) def delete_guild_command( self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake, ) -> Response[None]: r = Route( 'DELETE', '/applications/{application_id}/guilds/{guild_id}/commands/{command_id}', application_id=application_id, guild_id=guild_id, command_id=command_id, ) return self.request(r) def bulk_upsert_guild_commands( self, application_id: Snowflake, guild_id: Snowflake, payload: List[interactions.EditApplicationCommand], ) -> Response[List[interactions.ApplicationCommand]]: r = Route( 'PUT', '/applications/{application_id}/guilds/{guild_id}/commands', application_id=application_id, guild_id=guild_id, ) return self.request(r, json=payload) # Interaction responses def _edit_webhook_helper( self, route: Route, file: Optional[File] = None, content: Optional[str] = None, embeds: Optional[List[embed.Embed]] = None, allowed_mentions: Optional[message.AllowedMentions] = None, ): payload: Dict[str, Any] = {} if content: payload['content'] = content if embeds: payload['embeds'] = embeds if allowed_mentions: payload['allowed_mentions'] = allowed_mentions form: List[Dict[str, Any]] = [ { 'name': 'payload_json', 'value': utils._to_json(payload), } ] if file: form.append( { 'name': 'file', 'value': file.fp, 'filename': file.filename, 'content_type': 'application/octet-stream', } ) return self.request(route, form=form, files=[file] if file else None) def create_interaction_response( self, interaction_id: Snowflake, token: str, *, type: InteractionResponseType, data: Optional[interactions.InteractionApplicationCommandCallbackData] = None, ) -> Response[None]: r = Route( 'POST', '/interactions/{interaction_id}/{interaction_token}/callback', interaction_id=interaction_id, interaction_token=token, ) payload: Dict[str, Any] = { 'type': type, } if data is not None: payload['data'] = data return self.request(r, json=payload) def get_original_interaction_response( self, application_id: Snowflake, token: str, ) -> Response[message.Message]: r = Route( 'GET', '/webhooks/{application_id}/{interaction_token}/messages/@original', application_id=application_id, interaction_token=token, ) return self.request(r) def edit_original_interaction_response( self, application_id: Snowflake, token: str, file: Optional[File] = None, content: Optional[str] = None, embeds: Optional[List[embed.Embed]] = None, allowed_mentions: Optional[message.AllowedMentions] = None, ) -> Response[message.Message]: r = Route( 'PATCH', '/webhooks/{application_id}/{interaction_token}/messages/@original', application_id=application_id, interaction_token=token, ) return self._edit_webhook_helper(r, file=file, content=content, embeds=embeds, allowed_mentions=allowed_mentions) def delete_original_interaction_response(self, application_id: Snowflake, token: str) -> Response[None]: r = Route( 'DELETE', '/webhooks/{application_id}/{interaction_token}/messages/@original', application_id=application_id, interaction_token=token, ) return self.request(r) def create_followup_message( self, application_id: Snowflake, token: str, files: List[File] = [], content: Optional[str] = None, tts: bool = False, embeds: Optional[List[embed.Embed]] = None, allowed_mentions: Optional[message.AllowedMentions] = None, ) -> Response[message.Message]: r = Route( 'POST', '/webhooks/{application_id}/{interaction_token}', application_id=application_id, interaction_token=token, ) return self.send_multipart_helper( r, content=content, files=files, tts=tts, embeds=embeds, allowed_mentions=allowed_mentions, ) def edit_followup_message( self, application_id: Snowflake, token: str, message_id: Snowflake, file: Optional[File] = None, content: Optional[str] = None, embeds: Optional[List[embed.Embed]] = None, allowed_mentions: Optional[message.AllowedMentions] = None, ) -> Response[message.Message]: r = Route( 'PATCH', '/webhooks/{application_id}/{interaction_token}/messages/{message_id}', application_id=application_id, interaction_token=token, message_id=message_id, ) return self._edit_webhook_helper(r, file=file, content=content, embeds=embeds, allowed_mentions=allowed_mentions) def delete_followup_message(self, application_id: Snowflake, token: str, message_id: Snowflake) -> Response[None]: r = Route( 'DELETE', '/webhooks/{application_id}/{interaction_token}/messages/{message_id}', application_id=application_id, interaction_token=token, message_id=message_id, ) return self.request(r) def get_guild_application_command_permissions( self, application_id: Snowflake, guild_id: Snowflake, ) -> Response[List[interactions.GuildApplicationCommandPermissions]]: r = Route( 'GET', '/applications/{application_id}/guilds/{guild_id}/commands/permissions', application_id=application_id, guild_id=guild_id, ) return self.request(r) def get_application_command_permissions( self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake, ) -> Response[interactions.GuildApplicationCommandPermissions]: r = Route( 'GET', '/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions', application_id=application_id, guild_id=guild_id, command_id=command_id, ) return self.request(r) def edit_application_command_permissions( self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake, payload: interactions.BaseGuildApplicationCommandPermissions, ) -> Response[None]: r = Route( 'PUT', '/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions', application_id=application_id, guild_id=guild_id, command_id=command_id, ) return self.request(r, json=payload) def bulk_edit_guild_application_command_permissions( self, application_id: Snowflake, guild_id: Snowflake, payload: List[interactions.PartialGuildApplicationCommandPermissions], ) -> Response[None]: r = Route( 'PUT', '/applications/{application_id}/guilds/{guild_id}/commands/permissions', application_id=application_id, guild_id=guild_id, ) return self.request(r, json=payload) # Misc def application_info(self) -> Response[appinfo.AppInfo]: return self.request(Route('GET', '/oauth2/applications/@me')) async def get_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> str: try: data = await self.request(Route('GET', '/gateway')) except HTTPException as exc: raise GatewayNotFound() from exc if zlib: value = '{0}?encoding={1}&v=9&compress=zlib-stream' else: value = '{0}?encoding={1}&v=9' return value.format(data['url'], encoding) async def get_bot_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> Tuple[int, str]: try: data = await self.request(Route('GET', '/gateway/bot')) except HTTPException as exc: raise GatewayNotFound() from exc if zlib: value = '{0}?encoding={1}&v=9&compress=zlib-stream' else: value = '{0}?encoding={1}&v=9' return data['shards'], value.format(data['url'], encoding) def get_user(self, user_id: Snowflake) -> Response[user.User]: return self.request(Route('GET', '/users/{user_id}', user_id=user_id))
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/http.py
http.py
from __future__ import annotations import asyncio import datetime import re import io from os import PathLike from typing import Dict, TYPE_CHECKING, Union, List, Optional, Any, Callable, Tuple, ClassVar, Optional, overload, TypeVar, Type from . import utils from .reaction import Reaction from .emoji import Emoji from .partial_emoji import PartialEmoji from .enums import MessageType, ChannelType, try_enum from .errors import InvalidArgument, HTTPException from .components import _component_factory from .embeds import Embed from .member import Member from .flags import MessageFlags from .file import File from .utils import escape_mentions, MISSING from .guild import Guild from .mixins import Hashable from .sticker import StickerItem from .threads import Thread if TYPE_CHECKING: from .types.message import ( Message as MessagePayload, Attachment as AttachmentPayload, MessageReference as MessageReferencePayload, MessageApplication as MessageApplicationPayload, MessageActivity as MessageActivityPayload, Reaction as ReactionPayload, ) from .types.components import Component as ComponentPayload from .types.threads import ThreadArchiveDuration from .types.member import ( Member as MemberPayload, UserWithMember as UserWithMemberPayload, ) from .types.user import User as UserPayload from .types.embed import Embed as EmbedPayload from .abc import Snowflake from .abc import GuildChannel, PartialMessageableChannel, MessageableChannel from .components import Component from .state import ConnectionState from .channel import TextChannel, GroupChannel, DMChannel, PartialMessageable from .mentions import AllowedMentions from .user import User from .role import Role from .ui.view import View MR = TypeVar('MR', bound='MessageReference') EmojiInputType = Union[Emoji, PartialEmoji, str] __all__ = ( 'Attachment', 'Message', 'PartialMessage', 'MessageReference', 'DeletedReferencedMessage', ) def convert_emoji_reaction(emoji): if isinstance(emoji, Reaction): emoji = emoji.emoji if isinstance(emoji, Emoji): return f'{emoji.name}:{emoji.id}' if isinstance(emoji, PartialEmoji): return emoji._as_reaction() if isinstance(emoji, str): # Reactions can be in :name:id format, but not <:name:id>. # No existing emojis have <> in them, so this should be okay. return emoji.strip('<>') raise InvalidArgument(f'emoji argument must be str, Emoji, or Reaction not {emoji.__class__.__name__}.') class Attachment(Hashable): """Represents an attachment from Discord. .. container:: operations .. describe:: str(x) Returns the URL of the attachment. .. describe:: x == y Checks if the attachment is equal to another attachment. .. describe:: x != y Checks if the attachment is not equal to another attachment. .. describe:: hash(x) Returns the hash of the attachment. .. versionchanged:: 1.7 Attachment can now be casted to :class:`str` and is hashable. Attributes ------------ id: :class:`int` The attachment ID. size: :class:`int` The attachment size in bytes. height: Optional[:class:`int`] The attachment's height, in pixels. Only applicable to images and videos. width: Optional[:class:`int`] The attachment's width, in pixels. Only applicable to images and videos. filename: :class:`str` The attachment's filename. url: :class:`str` The attachment URL. If the message this attachment was attached to is deleted, then this will 404. proxy_url: :class:`str` The proxy URL. This is a cached version of the :attr:`~Attachment.url` in the case of images. When the message is deleted, this URL might be valid for a few minutes or not valid at all. content_type: Optional[:class:`str`] The attachment's `media type <https://en.wikipedia.org/wiki/Media_type>`_ .. versionadded:: 1.7 """ __slots__ = ('id', 'size', 'height', 'width', 'filename', 'url', 'proxy_url', '_http', 'content_type') def __init__(self, *, data: AttachmentPayload, state: ConnectionState): self.id: int = int(data['id']) self.size: int = data['size'] self.height: Optional[int] = data.get('height') self.width: Optional[int] = data.get('width') self.filename: str = data['filename'] self.url: str = data.get('url') self.proxy_url: str = data.get('proxy_url') self._http = state.http self.content_type: Optional[str] = data.get('content_type') def is_spoiler(self) -> bool: """:class:`bool`: Whether this attachment contains a spoiler.""" return self.filename.startswith('SPOILER_') def __repr__(self) -> str: return f'<Attachment id={self.id} filename={self.filename!r} url={self.url!r}>' def __str__(self) -> str: return self.url or '' async def save( self, fp: Union[io.BufferedIOBase, PathLike], *, seek_begin: bool = True, use_cached: bool = False, ) -> int: """|coro| Saves this attachment into a file-like object. Parameters ----------- fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`] The file-like object to save this attachment to or the filename to use. If a filename is passed then a file is created with that filename and used instead. seek_begin: :class:`bool` Whether to seek to the beginning of the file after saving is successfully done. use_cached: :class:`bool` Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading the attachment. This will allow attachments to be saved after deletion more often, compared to the regular URL which is generally deleted right after the message is deleted. Note that this can still fail to download deleted attachments if too much time has passed and it does not work on some types of attachments. Raises -------- HTTPException Saving the attachment failed. NotFound The attachment was deleted. Returns -------- :class:`int` The number of bytes written. """ data = await self.read(use_cached=use_cached) if isinstance(fp, io.BufferedIOBase): written = fp.write(data) if seek_begin: fp.seek(0) return written else: with open(fp, 'wb') as f: return f.write(data) async def read(self, *, use_cached: bool = False) -> bytes: """|coro| Retrieves the content of this attachment as a :class:`bytes` object. .. versionadded:: 1.1 Parameters ----------- use_cached: :class:`bool` Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading the attachment. This will allow attachments to be saved after deletion more often, compared to the regular URL which is generally deleted right after the message is deleted. Note that this can still fail to download deleted attachments if too much time has passed and it does not work on some types of attachments. Raises ------ HTTPException Downloading the attachment failed. Forbidden You do not have permissions to access this attachment NotFound The attachment was deleted. Returns ------- :class:`bytes` The contents of the attachment. """ url = self.proxy_url if use_cached else self.url data = await self._http.get_from_cdn(url) return data async def to_file(self, *, use_cached: bool = False, spoiler: bool = False) -> File: """|coro| Converts the attachment into a :class:`File` suitable for sending via :meth:`abc.Messageable.send`. .. versionadded:: 1.3 Parameters ----------- use_cached: :class:`bool` Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading the attachment. This will allow attachments to be saved after deletion more often, compared to the regular URL which is generally deleted right after the message is deleted. Note that this can still fail to download deleted attachments if too much time has passed and it does not work on some types of attachments. .. versionadded:: 1.4 spoiler: :class:`bool` Whether the file is a spoiler. .. versionadded:: 1.4 Raises ------ HTTPException Downloading the attachment failed. Forbidden You do not have permissions to access this attachment NotFound The attachment was deleted. Returns ------- :class:`File` The attachment as a file suitable for sending. """ data = await self.read(use_cached=use_cached) return File(io.BytesIO(data), filename=self.filename, spoiler=spoiler) def to_dict(self) -> AttachmentPayload: result: AttachmentPayload = { 'filename': self.filename, 'id': self.id, 'proxy_url': self.proxy_url, 'size': self.size, 'url': self.url, 'spoiler': self.is_spoiler(), } if self.height: result['height'] = self.height if self.width: result['width'] = self.width if self.content_type: result['content_type'] = self.content_type return result class DeletedReferencedMessage: """A special sentinel type that denotes whether the resolved message referenced message had since been deleted. The purpose of this class is to separate referenced messages that could not be fetched and those that were previously fetched but have since been deleted. .. versionadded:: 1.6 """ __slots__ = ('_parent',) def __init__(self, parent: MessageReference): self._parent: MessageReference = parent def __repr__(self) -> str: return f"<DeletedReferencedMessage id={self.id} channel_id={self.channel_id} guild_id={self.guild_id!r}>" @property def id(self) -> int: """:class:`int`: The message ID of the deleted referenced message.""" # the parent's message id won't be None here return self._parent.message_id # type: ignore @property def channel_id(self) -> int: """:class:`int`: The channel ID of the deleted referenced message.""" return self._parent.channel_id @property def guild_id(self) -> Optional[int]: """Optional[:class:`int`]: The guild ID of the deleted referenced message.""" return self._parent.guild_id class MessageReference: """Represents a reference to a :class:`~discord.Message`. .. versionadded:: 1.5 .. versionchanged:: 1.6 This class can now be constructed by users. Attributes ----------- message_id: Optional[:class:`int`] The id of the message referenced. channel_id: :class:`int` The channel id of the message referenced. guild_id: Optional[:class:`int`] The guild id of the message referenced. fail_if_not_exists: :class:`bool` Whether replying to the referenced message should raise :class:`HTTPException` if the message no longer exists or Discord could not fetch the message. .. versionadded:: 1.7 resolved: Optional[Union[:class:`Message`, :class:`DeletedReferencedMessage`]] The message that this reference resolved to. If this is ``None`` then the original message was not fetched either due to the Discord API not attempting to resolve it or it not being available at the time of creation. If the message was resolved at a prior point but has since been deleted then this will be of type :class:`DeletedReferencedMessage`. Currently, this is mainly the replied to message when a user replies to a message. .. versionadded:: 1.6 """ __slots__ = ('message_id', 'channel_id', 'guild_id', 'fail_if_not_exists', 'resolved', '_state') def __init__(self, *, message_id: int, channel_id: int, guild_id: Optional[int] = None, fail_if_not_exists: bool = True): self._state: Optional[ConnectionState] = None self.resolved: Optional[Union[Message, DeletedReferencedMessage]] = None self.message_id: Optional[int] = message_id self.channel_id: int = channel_id self.guild_id: Optional[int] = guild_id self.fail_if_not_exists: bool = fail_if_not_exists @classmethod def with_state(cls: Type[MR], state: ConnectionState, data: MessageReferencePayload) -> MR: self = cls.__new__(cls) self.message_id = utils._get_as_snowflake(data, 'message_id') self.channel_id = int(data.pop('channel_id')) self.guild_id = utils._get_as_snowflake(data, 'guild_id') self.fail_if_not_exists = data.get('fail_if_not_exists', True) self._state = state self.resolved = None return self @classmethod def from_message(cls: Type[MR], message: Message, *, fail_if_not_exists: bool = True) -> MR: """Creates a :class:`MessageReference` from an existing :class:`~discord.Message`. .. versionadded:: 1.6 Parameters ---------- message: :class:`~discord.Message` The message to be converted into a reference. fail_if_not_exists: :class:`bool` Whether replying to the referenced message should raise :class:`HTTPException` if the message no longer exists or Discord could not fetch the message. .. versionadded:: 1.7 Returns ------- :class:`MessageReference` A reference to the message. """ self = cls( message_id=message.id, channel_id=message.channel.id, guild_id=getattr(message.guild, 'id', None), fail_if_not_exists=fail_if_not_exists, ) self._state = message._state return self @property def cached_message(self) -> Optional[Message]: """Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache.""" return self._state and self._state._get_message(self.message_id) @property def jump_url(self) -> str: """:class:`str`: Returns a URL that allows the client to jump to the referenced message. .. versionadded:: 1.7 """ guild_id = self.guild_id if self.guild_id is not None else '@me' return f'https://discord.com/channels/{guild_id}/{self.channel_id}/{self.message_id}' def __repr__(self) -> str: return f'<MessageReference message_id={self.message_id!r} channel_id={self.channel_id!r} guild_id={self.guild_id!r}>' def to_dict(self) -> MessageReferencePayload: result: MessageReferencePayload = {'message_id': self.message_id} if self.message_id is not None else {} result['channel_id'] = self.channel_id if self.guild_id is not None: result['guild_id'] = self.guild_id if self.fail_if_not_exists is not None: result['fail_if_not_exists'] = self.fail_if_not_exists return result to_message_reference_dict = to_dict def flatten_handlers(cls): prefix = len('_handle_') handlers = [ (key[prefix:], value) for key, value in cls.__dict__.items() if key.startswith('_handle_') and key != '_handle_member' ] # store _handle_member last handlers.append(('member', cls._handle_member)) cls._HANDLERS = handlers cls._CACHED_SLOTS = [attr for attr in cls.__slots__ if attr.startswith('_cs_')] return cls @flatten_handlers class Message(Hashable): r"""Represents a message from Discord. .. container:: operations .. describe:: x == y Checks if two messages are equal. .. describe:: x != y Checks if two messages are not equal. .. describe:: hash(x) Returns the message's hash. Attributes ----------- tts: :class:`bool` Specifies if the message was done with text-to-speech. This can only be accurately received in :func:`on_message` due to a discord limitation. type: :class:`MessageType` The type of message. In most cases this should not be checked, but it is helpful in cases where it might be a system message for :attr:`system_content`. author: Union[:class:`Member`, :class:`abc.User`] A :class:`Member` that sent the message. If :attr:`channel` is a private channel or the user has the left the guild, then it is a :class:`User` instead. content: :class:`str` The actual contents of the message. nonce: Optional[Union[:class:`str`, :class:`int`]] The value used by the discord guild and the client to verify that the message is successfully sent. This is not stored long term within Discord's servers and is only used ephemerally. embeds: List[:class:`Embed`] A list of embeds the message has. channel: Union[:class:`TextChannel`, :class:`Thread`, :class:`DMChannel`, :class:`GroupChannel`, :class:`PartialMessageable`] The :class:`TextChannel` or :class:`Thread` that the message was sent from. Could be a :class:`DMChannel` or :class:`GroupChannel` if it's a private message. reference: Optional[:class:`~discord.MessageReference`] The message that this message references. This is only applicable to messages of type :attr:`MessageType.pins_add`, crossposted messages created by a followed channel integration, or message replies. .. versionadded:: 1.5 mention_everyone: :class:`bool` Specifies if the message mentions everyone. .. note:: This does not check if the ``@everyone`` or the ``@here`` text is in the message itself. Rather this boolean indicates if either the ``@everyone`` or the ``@here`` text is in the message **and** it did end up mentioning. mentions: List[:class:`abc.User`] A list of :class:`Member` that were mentioned. If the message is in a private message then the list will be of :class:`User` instead. For messages that are not of type :attr:`MessageType.default`\, this array can be used to aid in system messages. For more information, see :attr:`system_content`. .. warning:: The order of the mentions list is not in any particular order so you should not rely on it. This is a Discord limitation, not one with the library. channel_mentions: List[:class:`abc.GuildChannel`] A list of :class:`abc.GuildChannel` that were mentioned. If the message is in a private message then the list is always empty. role_mentions: List[:class:`Role`] A list of :class:`Role` that were mentioned. If the message is in a private message then the list is always empty. id: :class:`int` The message ID. webhook_id: Optional[:class:`int`] If this message was sent by a webhook, then this is the webhook ID's that sent this message. attachments: List[:class:`Attachment`] A list of attachments given to a message. pinned: :class:`bool` Specifies if the message is currently pinned. flags: :class:`MessageFlags` Extra features of the message. .. versionadded:: 1.3 reactions : List[:class:`Reaction`] Reactions to a message. Reactions can be either custom emoji or standard unicode emoji. activity: Optional[:class:`dict`] The activity associated with this message. Sent with Rich-Presence related messages that for example, request joining, spectating, or listening to or with another member. It is a dictionary with the following optional keys: - ``type``: An integer denoting the type of message activity being requested. - ``party_id``: The party ID associated with the party. application: Optional[:class:`dict`] The rich presence enabled application associated with this message. It is a dictionary with the following keys: - ``id``: A string representing the application's ID. - ``name``: A string representing the application's name. - ``description``: A string representing the application's description. - ``icon``: A string representing the icon ID of the application. - ``cover_image``: A string representing the embed's image asset ID. stickers: List[:class:`StickerItem`] A list of sticker items given to the message. .. versionadded:: 1.6 components: List[:class:`Component`] A list of components in the message. .. versionadded:: 2.0 guild: Optional[:class:`Guild`] The guild that the message belongs to, if applicable. """ __slots__ = ( '_state', '_edited_timestamp', '_cs_channel_mentions', '_cs_raw_mentions', '_cs_clean_content', '_cs_raw_channel_mentions', '_cs_raw_role_mentions', '_cs_system_content', 'tts', 'content', 'channel', 'webhook_id', 'mention_everyone', 'embeds', 'id', 'mentions', 'author', 'attachments', 'nonce', 'pinned', 'role_mentions', 'type', 'flags', 'reactions', 'reference', 'application', 'activity', 'stickers', 'components', 'guild', ) if TYPE_CHECKING: _HANDLERS: ClassVar[List[Tuple[str, Callable[..., None]]]] _CACHED_SLOTS: ClassVar[List[str]] guild: Optional[Guild] reference: Optional[MessageReference] mentions: List[Union[User, Member]] author: Union[User, Member] role_mentions: List[Role] def __init__( self, *, state: ConnectionState, channel: MessageableChannel, data: MessagePayload, ): self._state: ConnectionState = state self.id: int = int(data['id']) self.webhook_id: Optional[int] = utils._get_as_snowflake(data, 'webhook_id') self.reactions: List[Reaction] = [Reaction(message=self, data=d) for d in data.get('reactions', [])] self.attachments: List[Attachment] = [Attachment(data=a, state=self._state) for a in data['attachments']] self.embeds: List[Embed] = [Embed.from_dict(a) for a in data['embeds']] self.application: Optional[MessageApplicationPayload] = data.get('application') self.activity: Optional[MessageActivityPayload] = data.get('activity') self.channel: MessageableChannel = channel self._edited_timestamp: Optional[datetime.datetime] = utils.parse_time(data['edited_timestamp']) self.type: MessageType = try_enum(MessageType, data['type']) self.pinned: bool = data['pinned'] self.flags: MessageFlags = MessageFlags._from_value(data.get('flags', 0)) self.mention_everyone: bool = data['mention_everyone'] self.tts: bool = data['tts'] self.content: str = data['content'] self.nonce: Optional[Union[int, str]] = data.get('nonce') self.stickers: List[StickerItem] = [StickerItem(data=d, state=state) for d in data.get('sticker_items', [])] self.components: List[Component] = [_component_factory(d) for d in data.get('components', [])] try: # if the channel doesn't have a guild attribute, we handle that self.guild = channel.guild # type: ignore except AttributeError: self.guild = state._get_guild(utils._get_as_snowflake(data, 'guild_id')) try: ref = data['message_reference'] except KeyError: self.reference = None else: self.reference = ref = MessageReference.with_state(state, ref) try: resolved = data['referenced_message'] except KeyError: pass else: if resolved is None: ref.resolved = DeletedReferencedMessage(ref) else: # Right now the channel IDs match but maybe in the future they won't. if ref.channel_id == channel.id: chan = channel else: chan, _ = state._get_guild_channel(resolved) # the channel will be the correct type here ref.resolved = self.__class__(channel=chan, data=resolved, state=state) # type: ignore for handler in ('author', 'member', 'mentions', 'mention_roles'): try: getattr(self, f'_handle_{handler}')(data[handler]) except KeyError: continue def __repr__(self) -> str: name = self.__class__.__name__ return ( f'<{name} id={self.id} channel={self.channel!r} type={self.type!r} author={self.author!r} flags={self.flags!r}>' ) def _try_patch(self, data, key, transform=None) -> None: try: value = data[key] except KeyError: pass else: if transform is None: setattr(self, key, value) else: setattr(self, key, transform(value)) def _add_reaction(self, data, emoji, user_id) -> Reaction: reaction = utils.find(lambda r: r.emoji == emoji, self.reactions) is_me = data['me'] = user_id == self._state.self_id if reaction is None: reaction = Reaction(message=self, data=data, emoji=emoji) self.reactions.append(reaction) else: reaction.count += 1 if is_me: reaction.me = is_me return reaction def _remove_reaction(self, data: ReactionPayload, emoji: EmojiInputType, user_id: int) -> Reaction: reaction = utils.find(lambda r: r.emoji == emoji, self.reactions) if reaction is None: # already removed? raise ValueError('Emoji already removed?') # if reaction isn't in the list, we crash. This means discord # sent bad data, or we stored improperly reaction.count -= 1 if user_id == self._state.self_id: reaction.me = False if reaction.count == 0: # this raises ValueError if something went wrong as well. self.reactions.remove(reaction) return reaction def _clear_emoji(self, emoji) -> Optional[Reaction]: to_check = str(emoji) for index, reaction in enumerate(self.reactions): if str(reaction.emoji) == to_check: break else: # didn't find anything so just return return del self.reactions[index] return reaction def _update(self, data): # In an update scheme, 'author' key has to be handled before 'member' # otherwise they overwrite each other which is undesirable. # Since there's no good way to do this we have to iterate over every # handler rather than iterating over the keys which is a little slower for key, handler in self._HANDLERS: try: value = data[key] except KeyError: continue else: handler(self, value) # clear the cached properties for attr in self._CACHED_SLOTS: try: delattr(self, attr) except AttributeError: pass def _handle_edited_timestamp(self, value: str) -> None: self._edited_timestamp = utils.parse_time(value) def _handle_pinned(self, value: bool) -> None: self.pinned = value def _handle_flags(self, value: int) -> None: self.flags = MessageFlags._from_value(value) def _handle_application(self, value: MessageApplicationPayload) -> None: self.application = value def _handle_activity(self, value: MessageActivityPayload) -> None: self.activity = value def _handle_mention_everyone(self, value: bool) -> None: self.mention_everyone = value def _handle_tts(self, value: bool) -> None: self.tts = value def _handle_type(self, value: int) -> None: self.type = try_enum(MessageType, value) def _handle_content(self, value: str) -> None: self.content = value def _handle_attachments(self, value: List[AttachmentPayload]) -> None: self.attachments = [Attachment(data=a, state=self._state) for a in value] def _handle_embeds(self, value: List[EmbedPayload]) -> None: self.embeds = [Embed.from_dict(data) for data in value] def _handle_nonce(self, value: Union[str, int]) -> None: self.nonce = value def _handle_author(self, author: UserPayload) -> None: self.author = self._state.store_user(author) if isinstance(self.guild, Guild): found = self.guild.get_member(self.author.id) if found is not None: self.author = found def _handle_member(self, member: MemberPayload) -> None: # The gateway now gives us full Member objects sometimes with the following keys # deaf, mute, joined_at, roles # For the sake of performance I'm going to assume that the only # field that needs *updating* would be the joined_at field. # If there is no Member object (for some strange reason), then we can upgrade # ourselves to a more "partial" member object. author = self.author try: # Update member reference author._update_from_message(member) # type: ignore except AttributeError: # It's a user here # TODO: consider adding to cache here self.author = Member._from_message(message=self, data=member) def _handle_mentions(self, mentions: List[UserWithMemberPayload]) -> None: self.mentions = r = [] guild = self.guild state = self._state if not isinstance(guild, Guild): self.mentions = [state.store_user(m) for m in mentions] return for mention in filter(None, mentions): id_search = int(mention['id']) member = guild.get_member(id_search) if member is not None: r.append(member) else: r.append(Member._try_upgrade(data=mention, guild=guild, state=state)) def _handle_mention_roles(self, role_mentions: List[int]) -> None: self.role_mentions = [] if isinstance(self.guild, Guild): for role_id in map(int, role_mentions): role = self.guild.get_role(role_id) if role is not None: self.role_mentions.append(role) def _handle_components(self, components: List[ComponentPayload]): self.components = [_component_factory(d) for d in components] def _rebind_cached_references(self, new_guild: Guild, new_channel: Union[TextChannel, Thread]) -> None: self.guild = new_guild self.channel = new_channel @utils.cached_slot_property('_cs_raw_mentions') def raw_mentions(self) -> List[int]: """List[:class:`int`]: A property that returns an array of user IDs matched with the syntax of ``<@user_id>`` in the message content. This allows you to receive the user IDs of mentioned users even in a private message context. """ return [int(x) for x in re.findall(r'<@!?([0-9]{15,20})>', self.content)] @utils.cached_slot_property('_cs_raw_channel_mentions') def raw_channel_mentions(self) -> List[int]: """List[:class:`int`]: A property that returns an array of channel IDs matched with the syntax of ``<#channel_id>`` in the message content. """ return [int(x) for x in re.findall(r'<#([0-9]{15,20})>', self.content)] @utils.cached_slot_property('_cs_raw_role_mentions') def raw_role_mentions(self) -> List[int]: """List[:class:`int`]: A property that returns an array of role IDs matched with the syntax of ``<@&role_id>`` in the message content. """ return [int(x) for x in re.findall(r'<@&([0-9]{15,20})>', self.content)] @utils.cached_slot_property('_cs_channel_mentions') def channel_mentions(self) -> List[GuildChannel]: if self.guild is None: return [] it = filter(None, map(self.guild.get_channel, self.raw_channel_mentions)) return utils._unique(it) @utils.cached_slot_property('_cs_clean_content') def clean_content(self) -> str: """:class:`str`: A property that returns the content in a "cleaned up" manner. This basically means that mentions are transformed into the way the client shows it. e.g. ``<#id>`` will transform into ``#name``. This will also transform @everyone and @here mentions into non-mentions. .. note:: This *does not* affect markdown. If you want to escape or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown` respectively, along with this function. """ # fmt: off transformations = { re.escape(f'<#{channel.id}>'): '#' + channel.name for channel in self.channel_mentions } mention_transforms = { re.escape(f'<@{member.id}>'): '@' + member.display_name for member in self.mentions } # add the <@!user_id> cases as well.. second_mention_transforms = { re.escape(f'<@!{member.id}>'): '@' + member.display_name for member in self.mentions } transformations.update(mention_transforms) transformations.update(second_mention_transforms) if self.guild is not None: role_transforms = { re.escape(f'<@&{role.id}>'): '@' + role.name for role in self.role_mentions } transformations.update(role_transforms) # fmt: on def repl(obj): return transformations.get(re.escape(obj.group(0)), '') pattern = re.compile('|'.join(transformations.keys())) result = pattern.sub(repl, self.content) return escape_mentions(result) @property def created_at(self) -> datetime.datetime: """:class:`datetime.datetime`: The message's creation time in UTC.""" return utils.snowflake_time(self.id) @property def edited_at(self) -> Optional[datetime.datetime]: """Optional[:class:`datetime.datetime`]: An aware UTC datetime object containing the edited time of the message.""" return self._edited_timestamp @property def jump_url(self) -> str: """:class:`str`: Returns a URL that allows the client to jump to this message.""" guild_id = getattr(self.guild, 'id', '@me') return f'https://discord.com/channels/{guild_id}/{self.channel.id}/{self.id}' def is_system(self) -> bool: """:class:`bool`: Whether the message is a system message. A system message is a message that is constructed entirely by the Discord API in response to something. .. versionadded:: 1.3 """ return self.type not in ( MessageType.default, MessageType.reply, MessageType.application_command, MessageType.thread_starter_message, ) @utils.cached_slot_property('_cs_system_content') def system_content(self): r""":class:`str`: A property that returns the content that is rendered regardless of the :attr:`Message.type`. In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\, this just returns the regular :attr:`Message.content`. Otherwise this returns an English message denoting the contents of the system message. """ if self.type is MessageType.default: return self.content if self.type is MessageType.recipient_add: if self.channel.type is ChannelType.group: return f'{self.author.name} added {self.mentions[0].name} to the group.' else: return f'{self.author.name} added {self.mentions[0].name} to the thread.' if self.type is MessageType.recipient_remove: if self.channel.type is ChannelType.group: return f'{self.author.name} removed {self.mentions[0].name} from the group.' else: return f'{self.author.name} removed {self.mentions[0].name} from the thread.' if self.type is MessageType.channel_name_change: return f'{self.author.name} changed the channel name: **{self.content}**' if self.type is MessageType.channel_icon_change: return f'{self.author.name} changed the channel icon.' if self.type is MessageType.pins_add: return f'{self.author.name} pinned a message to this channel.' if self.type is MessageType.new_member: formats = [ "{0} joined the party.", "{0} is here.", "Welcome, {0}. We hope you brought pizza.", "A wild {0} appeared.", "{0} just landed.", "{0} just slid into the server.", "{0} just showed up!", "Welcome {0}. Say hi!", "{0} hopped into the server.", "Everyone welcome {0}!", "Glad you're here, {0}.", "Good to see you, {0}.", "Yay you made it, {0}!", ] created_at_ms = int(self.created_at.timestamp() * 1000) return formats[created_at_ms % len(formats)].format(self.author.name) if self.type is MessageType.premium_guild_subscription: if not self.content: return f'{self.author.name} just boosted the server!' else: return f'{self.author.name} just boosted the server **{self.content}** times!' if self.type is MessageType.premium_guild_tier_1: if not self.content: return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 1!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 1!**' if self.type is MessageType.premium_guild_tier_2: if not self.content: return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 2!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 2!**' if self.type is MessageType.premium_guild_tier_3: if not self.content: return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 3!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 3!**' if self.type is MessageType.channel_follow_add: return f'{self.author.name} has added {self.content} to this channel' if self.type is MessageType.guild_stream: # the author will be a Member return f'{self.author.name} is live! Now streaming {self.author.activity.name}' # type: ignore if self.type is MessageType.guild_discovery_disqualified: return 'This server has been removed from Server Discovery because it no longer passes all the requirements. Check Server Settings for more details.' if self.type is MessageType.guild_discovery_requalified: return 'This server is eligible for Server Discovery again and has been automatically relisted!' if self.type is MessageType.guild_discovery_grace_period_initial_warning: return 'This server has failed Discovery activity requirements for 1 week. If this server fails for 4 weeks in a row, it will be automatically removed from Discovery.' if self.type is MessageType.guild_discovery_grace_period_final_warning: return 'This server has failed Discovery activity requirements for 3 weeks in a row. If this server fails for 1 more week, it will be removed from Discovery.' if self.type is MessageType.thread_created: return f'{self.author.name} started a thread: **{self.content}**. See all **threads**.' if self.type is MessageType.reply: return self.content if self.type is MessageType.thread_starter_message: if self.reference is None or self.reference.resolved is None: return 'Sorry, we couldn\'t load the first message in this thread' # the resolved message for the reference will be a Message return self.reference.resolved.content # type: ignore if self.type is MessageType.guild_invite_reminder: return 'Wondering who to invite?\nStart by inviting anyone who can help you build the server!' async def delete(self, *, delay: Optional[float] = None) -> None: """|coro| Deletes the message. Your own messages could be deleted without any proper permissions. However to delete other people's messages, you need the :attr:`~Permissions.manage_messages` permission. .. versionchanged:: 1.1 Added the new ``delay`` keyword-only parameter. Parameters ----------- delay: Optional[:class:`float`] If provided, the number of seconds to wait in the background before deleting the message. If the deletion fails then it is silently ignored. Raises ------ Forbidden You do not have proper permissions to delete the message. NotFound The message was deleted already HTTPException Deleting the message failed. """ if delay is not None: async def delete(delay: float): await asyncio.sleep(delay) try: await self._state.http.delete_message(self.channel.id, self.id) except HTTPException: pass asyncio.create_task(delete(delay)) else: await self._state.http.delete_message(self.channel.id, self.id) @overload async def edit( self, *, content: Optional[str] = ..., embed: Optional[Embed] = ..., attachments: List[Attachment] = ..., suppress: bool = ..., delete_after: Optional[float] = ..., allowed_mentions: Optional[AllowedMentions] = ..., view: Optional[View] = ..., ) -> Message: ... @overload async def edit( self, *, content: Optional[str] = ..., embeds: List[Embed] = ..., attachments: List[Attachment] = ..., suppress: bool = ..., delete_after: Optional[float] = ..., allowed_mentions: Optional[AllowedMentions] = ..., view: Optional[View] = ..., ) -> Message: ... async def edit( self, content: Optional[str] = MISSING, embed: Optional[Embed] = MISSING, embeds: List[Embed] = MISSING, attachments: List[Attachment] = MISSING, suppress: bool = MISSING, delete_after: Optional[float] = None, allowed_mentions: Optional[AllowedMentions] = MISSING, view: Optional[View] = MISSING, ) -> Message: """|coro| Edits the message. The content must be able to be transformed into a string via ``str(content)``. .. versionchanged:: 1.3 The ``suppress`` keyword-only parameter was added. Parameters ----------- content: Optional[:class:`str`] The new content to replace the message with. Could be ``None`` to remove the content. embed: Optional[:class:`Embed`] The new embed to replace the original with. Could be ``None`` to remove the embed. embeds: List[:class:`Embed`] The new embeds to replace the original with. Must be a maximum of 10. To remove all embeds ``[]`` should be passed. .. versionadded:: 2.0 attachments: List[:class:`Attachment`] A list of attachments to keep in the message. If ``[]`` is passed then all attachments are removed. suppress: :class:`bool` Whether to suppress embeds for the message. This removes all the embeds if set to ``True``. If set to ``False`` this brings the embeds back if they were suppressed. Using this parameter requires :attr:`~.Permissions.manage_messages`. delete_after: Optional[:class:`float`] If provided, the number of seconds to wait in the background before deleting the message we just edited. If the deletion fails, then it is silently ignored. allowed_mentions: Optional[:class:`~discord.AllowedMentions`] Controls the mentions being processed in this message. If this is passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`. The merging behaviour only overrides attributes that have been explicitly passed to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`. If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions` are used instead. .. versionadded:: 1.4 view: Optional[:class:`~discord.ui.View`] The updated view to update this message with. If ``None`` is passed then the view is removed. Raises ------- HTTPException Editing the message failed. Forbidden Tried to suppress a message without permissions or edited a message's content or embed that isn't yours. ~discord.InvalidArgument You specified both ``embed`` and ``embeds`` """ payload: Dict[str, Any] = {} if content is not MISSING: if content is not None: payload['content'] = str(content) else: payload['content'] = None if embed is not MISSING and embeds is not MISSING: raise InvalidArgument('cannot pass both embed and embeds parameter to edit()') if embed is not MISSING: if embed is None: payload['embeds'] = [] else: payload['embeds'] = [embed.to_dict()] elif embeds is not MISSING: payload['embeds'] = [e.to_dict() for e in embeds] if suppress is not MISSING: flags = MessageFlags._from_value(self.flags.value) flags.suppress_embeds = suppress payload['flags'] = flags.value if allowed_mentions is MISSING: if self._state.allowed_mentions is not None and self.author.id == self._state.self_id: payload['allowed_mentions'] = self._state.allowed_mentions.to_dict() else: if allowed_mentions is not None: if self._state.allowed_mentions is not None: payload['allowed_mentions'] = self._state.allowed_mentions.merge(allowed_mentions).to_dict() else: payload['allowed_mentions'] = allowed_mentions.to_dict() if attachments is not MISSING: payload['attachments'] = [a.to_dict() for a in attachments] if view is not MISSING: self._state.prevent_view_updates_for(self.id) if view: payload['components'] = view.to_components() else: payload['components'] = [] data = await self._state.http.edit_message(self.channel.id, self.id, **payload) message = Message(state=self._state, channel=self.channel, data=data) if view and not view.is_finished(): self._state.store_view(view, self.id) if delete_after is not None: await self.delete(delay=delete_after) return message async def publish(self) -> None: """|coro| Publishes this message to your announcement channel. You must have the :attr:`~Permissions.send_messages` permission to do this. If the message is not your own then the :attr:`~Permissions.manage_messages` permission is also needed. Raises ------- Forbidden You do not have the proper permissions to publish this message. HTTPException Publishing the message failed. """ await self._state.http.publish_message(self.channel.id, self.id) async def pin(self, *, reason: Optional[str] = None) -> None: """|coro| Pins the message. You must have the :attr:`~Permissions.manage_messages` permission to do this in a non-private channel context. Parameters ----------- reason: Optional[:class:`str`] The reason for pinning the message. Shows up on the audit log. .. versionadded:: 1.4 Raises ------- Forbidden You do not have permissions to pin the message. NotFound The message or channel was not found or deleted. HTTPException Pinning the message failed, probably due to the channel having more than 50 pinned messages. """ await self._state.http.pin_message(self.channel.id, self.id, reason=reason) self.pinned = True async def unpin(self, *, reason: Optional[str] = None) -> None: """|coro| Unpins the message. You must have the :attr:`~Permissions.manage_messages` permission to do this in a non-private channel context. Parameters ----------- reason: Optional[:class:`str`] The reason for unpinning the message. Shows up on the audit log. .. versionadded:: 1.4 Raises ------- Forbidden You do not have permissions to unpin the message. NotFound The message or channel was not found or deleted. HTTPException Unpinning the message failed. """ await self._state.http.unpin_message(self.channel.id, self.id, reason=reason) self.pinned = False async def add_reaction(self, emoji: EmojiInputType) -> None: """|coro| Add a reaction to the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. You must have the :attr:`~Permissions.read_message_history` permission to use this. If nobody else has reacted to the message using this emoji, the :attr:`~Permissions.add_reactions` permission is required. Parameters ------------ emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to react with. Raises -------- HTTPException Adding the reaction failed. Forbidden You do not have the proper permissions to react to the message. NotFound The emoji you specified was not found. InvalidArgument The emoji parameter is invalid. """ emoji = convert_emoji_reaction(emoji) await self._state.http.add_reaction(self.channel.id, self.id, emoji) async def remove_reaction(self, emoji: Union[EmojiInputType, Reaction], member: Snowflake) -> None: """|coro| Remove a reaction by the member from the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. If the reaction is not your own (i.e. ``member`` parameter is not you) then the :attr:`~Permissions.manage_messages` permission is needed. The ``member`` parameter must represent a member and meet the :class:`abc.Snowflake` abc. Parameters ------------ emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to remove. member: :class:`abc.Snowflake` The member for which to remove the reaction. Raises -------- HTTPException Removing the reaction failed. Forbidden You do not have the proper permissions to remove the reaction. NotFound The member or emoji you specified was not found. InvalidArgument The emoji parameter is invalid. """ emoji = convert_emoji_reaction(emoji) if member.id == self._state.self_id: await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji) else: await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id) async def clear_reaction(self, emoji: Union[EmojiInputType, Reaction]) -> None: """|coro| Clears a specific reaction from the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. You need the :attr:`~Permissions.manage_messages` permission to use this. .. versionadded:: 1.3 Parameters ----------- emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to clear. Raises -------- HTTPException Clearing the reaction failed. Forbidden You do not have the proper permissions to clear the reaction. NotFound The emoji you specified was not found. InvalidArgument The emoji parameter is invalid. """ emoji = convert_emoji_reaction(emoji) await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji) async def clear_reactions(self) -> None: """|coro| Removes all the reactions from the message. You need the :attr:`~Permissions.manage_messages` permission to use this. Raises -------- HTTPException Removing the reactions failed. Forbidden You do not have the proper permissions to remove all the reactions. """ await self._state.http.clear_reactions(self.channel.id, self.id) async def create_thread(self, *, name: str, auto_archive_duration: ThreadArchiveDuration = MISSING) -> Thread: """|coro| Creates a public thread from this message. You must have :attr:`~discord.Permissions.create_public_threads` in order to create a public thread from a message. The channel this message belongs in must be a :class:`TextChannel`. .. versionadded:: 2.0 Parameters ----------- name: :class:`str` The name of the thread. auto_archive_duration: :class:`int` The duration in minutes before a thread is automatically archived for inactivity. If not provided, the channel's default auto archive duration is used. Raises ------- Forbidden You do not have permissions to create a thread. HTTPException Creating the thread failed. InvalidArgument This message does not have guild info attached. Returns -------- :class:`.Thread` The created thread. """ if self.guild is None: raise InvalidArgument('This message does not have guild info attached.') default_auto_archive_duration: ThreadArchiveDuration = getattr(self.channel, 'default_auto_archive_duration', 1440) data = await self._state.http.start_thread_with_message( self.channel.id, self.id, name=name, auto_archive_duration=auto_archive_duration or default_auto_archive_duration, ) return Thread(guild=self.guild, state=self._state, data=data) async def reply(self, content: Optional[str] = None, **kwargs) -> Message: """|coro| A shortcut method to :meth:`.abc.Messageable.send` to reply to the :class:`.Message`. .. versionadded:: 1.6 Raises -------- ~discord.HTTPException Sending the message failed. ~discord.Forbidden You do not have the proper permissions to send the message. ~discord.InvalidArgument The ``files`` list is not of the appropriate size or you specified both ``file`` and ``files``. Returns --------- :class:`.Message` The message that was sent. """ return await self.channel.send(content, reference=self, **kwargs) def to_reference(self, *, fail_if_not_exists: bool = True) -> MessageReference: """Creates a :class:`~discord.MessageReference` from the current message. .. versionadded:: 1.6 Parameters ---------- fail_if_not_exists: :class:`bool` Whether replying using the message reference should raise :class:`HTTPException` if the message no longer exists or Discord could not fetch the message. .. versionadded:: 1.7 Returns --------- :class:`~discord.MessageReference` The reference to this message. """ return MessageReference.from_message(self, fail_if_not_exists=fail_if_not_exists) def to_message_reference_dict(self) -> MessageReferencePayload: data: MessageReferencePayload = { 'message_id': self.id, 'channel_id': self.channel.id, } if self.guild is not None: data['guild_id'] = self.guild.id return data class PartialMessage(Hashable): """Represents a partial message to aid with working messages when only a message and channel ID are present. There are two ways to construct this class. The first one is through the constructor itself, and the second is via the following: - :meth:`TextChannel.get_partial_message` - :meth:`Thread.get_partial_message` - :meth:`DMChannel.get_partial_message` Note that this class is trimmed down and has no rich attributes. .. versionadded:: 1.6 .. container:: operations .. describe:: x == y Checks if two partial messages are equal. .. describe:: x != y Checks if two partial messages are not equal. .. describe:: hash(x) Returns the partial message's hash. Attributes ----------- channel: Union[:class:`TextChannel`, :class:`Thread`, :class:`DMChannel`] The channel associated with this partial message. id: :class:`int` The message ID. """ __slots__ = ('channel', 'id', '_cs_guild', '_state') jump_url: str = Message.jump_url # type: ignore delete = Message.delete publish = Message.publish pin = Message.pin unpin = Message.unpin add_reaction = Message.add_reaction remove_reaction = Message.remove_reaction clear_reaction = Message.clear_reaction clear_reactions = Message.clear_reactions reply = Message.reply to_reference = Message.to_reference to_message_reference_dict = Message.to_message_reference_dict def __init__(self, *, channel: PartialMessageableChannel, id: int): if channel.type not in ( ChannelType.text, ChannelType.news, ChannelType.private, ChannelType.news_thread, ChannelType.public_thread, ChannelType.private_thread, ): raise TypeError(f'Expected TextChannel, DMChannel or Thread not {type(channel)!r}') self.channel: PartialMessageableChannel = channel self._state: ConnectionState = channel._state self.id: int = id def _update(self, data) -> None: # This is used for duck typing purposes. # Just do nothing with the data. pass # Also needed for duck typing purposes # n.b. not exposed pinned = property(None, lambda x, y: None) def __repr__(self) -> str: return f'<PartialMessage id={self.id} channel={self.channel!r}>' @property def created_at(self) -> datetime.datetime: """:class:`datetime.datetime`: The partial message's creation time in UTC.""" return utils.snowflake_time(self.id) @utils.cached_slot_property('_cs_guild') def guild(self) -> Optional[Guild]: """Optional[:class:`Guild`]: The guild that the partial message belongs to, if applicable.""" return getattr(self.channel, 'guild', None) async def fetch(self) -> Message: """|coro| Fetches the partial message to a full :class:`Message`. Raises -------- NotFound The message was not found. Forbidden You do not have the permissions required to get a message. HTTPException Retrieving the message failed. Returns -------- :class:`Message` The full message. """ data = await self._state.http.get_message(self.channel.id, self.id) return self._state.create_message(channel=self.channel, data=data) async def edit(self, **fields: Any) -> Optional[Message]: """|coro| Edits the message. The content must be able to be transformed into a string via ``str(content)``. .. versionchanged:: 1.7 :class:`discord.Message` is returned instead of ``None`` if an edit took place. Parameters ----------- content: Optional[:class:`str`] The new content to replace the message with. Could be ``None`` to remove the content. embed: Optional[:class:`Embed`] The new embed to replace the original with. Could be ``None`` to remove the embed. suppress: :class:`bool` Whether to suppress embeds for the message. This removes all the embeds if set to ``True``. If set to ``False`` this brings the embeds back if they were suppressed. Using this parameter requires :attr:`~.Permissions.manage_messages`. delete_after: Optional[:class:`float`] If provided, the number of seconds to wait in the background before deleting the message we just edited. If the deletion fails, then it is silently ignored. allowed_mentions: Optional[:class:`~discord.AllowedMentions`] Controls the mentions being processed in this message. If this is passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`. The merging behaviour only overrides attributes that have been explicitly passed to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`. If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions` are used instead. view: Optional[:class:`~discord.ui.View`] The updated view to update this message with. If ``None`` is passed then the view is removed. .. versionadded:: 2.0 Raises ------- NotFound The message was not found. HTTPException Editing the message failed. Forbidden Tried to suppress a message without permissions or edited a message's content or embed that isn't yours. Returns --------- Optional[:class:`Message`] The message that was edited. """ try: content = fields['content'] except KeyError: pass else: if content is not None: fields['content'] = str(content) try: embed = fields['embed'] except KeyError: pass else: if embed is not None: fields['embed'] = embed.to_dict() try: suppress: bool = fields.pop('suppress') except KeyError: pass else: flags = MessageFlags._from_value(0) flags.suppress_embeds = suppress fields['flags'] = flags.value delete_after = fields.pop('delete_after', None) try: allowed_mentions = fields.pop('allowed_mentions') except KeyError: pass else: if allowed_mentions is not None: if self._state.allowed_mentions is not None: allowed_mentions = self._state.allowed_mentions.merge(allowed_mentions).to_dict() else: allowed_mentions = allowed_mentions.to_dict() fields['allowed_mentions'] = allowed_mentions try: view = fields.pop('view') except KeyError: # To check for the view afterwards view = None else: self._state.prevent_view_updates_for(self.id) if view: fields['components'] = view.to_components() else: fields['components'] = [] if fields: data = await self._state.http.edit_message(self.channel.id, self.id, **fields) if delete_after is not None: await self.delete(delay=delete_after) if fields: # data isn't unbound msg = self._state.create_message(channel=self.channel, data=data) # type: ignore if view and not view.is_finished(): self._state.store_view(view, self.id) return msg
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/message.py
message.py
from __future__ import annotations from typing import Optional, TYPE_CHECKING from .utils import MISSING, cached_slot_property from .mixins import Hashable from .errors import InvalidArgument from .enums import StagePrivacyLevel, try_enum __all__ = ( 'StageInstance', ) if TYPE_CHECKING: from .types.channel import StageInstance as StageInstancePayload from .state import ConnectionState from .channel import StageChannel from .guild import Guild class StageInstance(Hashable): """Represents a stage instance of a stage channel in a guild. .. versionadded:: 2.0 .. container:: operations .. describe:: x == y Checks if two stage instances are equal. .. describe:: x != y Checks if two stage instances are not equal. .. describe:: hash(x) Returns the stage instance's hash. Attributes ----------- id: :class:`int` The stage instance's ID. guild: :class:`Guild` The guild that the stage instance is running in. channel_id: :class:`int` The ID of the channel that the stage instance is running in. topic: :class:`str` The topic of the stage instance. privacy_level: :class:`StagePrivacyLevel` The privacy level of the stage instance. discoverable_disabled: :class:`bool` Whether discoverability for the stage instance is disabled. """ __slots__ = ( '_state', 'id', 'guild', 'channel_id', 'topic', 'privacy_level', 'discoverable_disabled', '_cs_channel', ) def __init__(self, *, state: ConnectionState, guild: Guild, data: StageInstancePayload) -> None: self._state = state self.guild = guild self._update(data) def _update(self, data: StageInstancePayload): self.id: int = int(data['id']) self.channel_id: int = int(data['channel_id']) self.topic: str = data['topic'] self.privacy_level: StagePrivacyLevel = try_enum(StagePrivacyLevel, data['privacy_level']) self.discoverable_disabled: bool = data.get('discoverable_disabled', False) def __repr__(self) -> str: return f'<StageInstance id={self.id} guild={self.guild!r} channel_id={self.channel_id} topic={self.topic!r}>' @cached_slot_property('_cs_channel') def channel(self) -> Optional[StageChannel]: """Optional[:class:`StageChannel`]: The channel that stage instance is running in.""" # the returned channel will always be a StageChannel or None return self._state.get_channel(self.channel_id) # type: ignore def is_public(self) -> bool: return self.privacy_level is StagePrivacyLevel.public async def edit(self, *, topic: str = MISSING, privacy_level: StagePrivacyLevel = MISSING, reason: Optional[str] = None) -> None: """|coro| Edits the stage instance. You must have the :attr:`~Permissions.manage_channels` permission to use this. Parameters ----------- topic: :class:`str` The stage instance's new topic. privacy_level: :class:`StagePrivacyLevel` The stage instance's new privacy level. reason: :class:`str` The reason the stage instance was edited. Shows up on the audit log. Raises ------ InvalidArgument If the ``privacy_level`` parameter is not the proper type. Forbidden You do not have permissions to edit the stage instance. HTTPException Editing a stage instance failed. """ payload = {} if topic is not MISSING: payload['topic'] = topic if privacy_level is not MISSING: if not isinstance(privacy_level, StagePrivacyLevel): raise InvalidArgument('privacy_level field must be of type PrivacyLevel') payload['privacy_level'] = privacy_level.value if payload: await self._state.http.edit_stage_instance(self.channel_id, **payload, reason=reason) async def delete(self, *, reason: Optional[str] = None) -> None: """|coro| Deletes the stage instance. You must have the :attr:`~Permissions.manage_channels` permission to use this. Parameters ----------- reason: :class:`str` The reason the stage instance was deleted. Shows up on the audit log. Raises ------ Forbidden You do not have permissions to delete the stage instance. HTTPException Deleting the stage instance failed. """ await self._state.http.delete_stage_instance(self.channel_id, reason=reason)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/stage_instance.py
stage_instance.py
from __future__ import annotations import time import random from typing import Callable, Generic, Literal, TypeVar, overload, Union T = TypeVar('T', bool, Literal[True], Literal[False]) __all__ = ( 'ExponentialBackoff', ) class ExponentialBackoff(Generic[T]): """An implementation of the exponential backoff algorithm Provides a convenient interface to implement an exponential backoff for reconnecting or retrying transmissions in a distributed network. Once instantiated, the delay method will return the next interval to wait for when retrying a connection or transmission. The maximum delay increases exponentially with each retry up to a maximum of 2^10 * base, and is reset if no more attempts are needed in a period of 2^11 * base seconds. Parameters ---------- base: :class:`int` The base delay in seconds. The first retry-delay will be up to this many seconds. integral: :class:`bool` Set to ``True`` if whole periods of base is desirable, otherwise any number in between may be returned. """ def __init__(self, base: int = 1, *, integral: T = False): self._base: int = base self._exp: int = 0 self._max: int = 10 self._reset_time: int = base * 2 ** 11 self._last_invocation: float = time.monotonic() # Use our own random instance to avoid messing with global one rand = random.Random() rand.seed() self._randfunc: Callable[..., Union[int, float]] = rand.randrange if integral else rand.uniform # type: ignore @overload def delay(self: ExponentialBackoff[Literal[False]]) -> float: ... @overload def delay(self: ExponentialBackoff[Literal[True]]) -> int: ... @overload def delay(self: ExponentialBackoff[bool]) -> Union[int, float]: ... def delay(self) -> Union[int, float]: """Compute the next delay Returns the next delay to wait according to the exponential backoff algorithm. This is a value between 0 and base * 2^exp where exponent starts off at 1 and is incremented at every invocation of this method up to a maximum of 10. If a period of more than base * 2^11 has passed since the last retry, the exponent is reset to 1. """ invocation = time.monotonic() interval = invocation - self._last_invocation self._last_invocation = invocation if interval > self._reset_time: self._exp = 0 self._exp = min(self._exp + 1, self._max) return self._randfunc(0, self._base * 2 ** self._exp)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/backoff.py
backoff.py
from __future__ import annotations import datetime from typing import Any, Dict, Final, List, Mapping, Protocol, TYPE_CHECKING, Type, TypeVar, Union from . import utils from .colour import Colour __all__ = ( 'Embed', ) class _EmptyEmbed: def __bool__(self) -> bool: return False def __repr__(self) -> str: return 'Embed.Empty' def __len__(self) -> int: return 0 EmptyEmbed: Final = _EmptyEmbed() class EmbedProxy: def __init__(self, layer: Dict[str, Any]): self.__dict__.update(layer) def __len__(self) -> int: return len(self.__dict__) def __repr__(self) -> str: inner = ', '.join((f'{k}={v!r}' for k, v in self.__dict__.items() if not k.startswith('_'))) return f'EmbedProxy({inner})' def __getattr__(self, attr: str) -> _EmptyEmbed: return EmptyEmbed E = TypeVar('E', bound='Embed') if TYPE_CHECKING: from discord.types.embed import Embed as EmbedData, EmbedType T = TypeVar('T') MaybeEmpty = Union[T, _EmptyEmbed] class _EmbedFooterProxy(Protocol): text: MaybeEmpty[str] icon_url: MaybeEmpty[str] class _EmbedFieldProxy(Protocol): name: MaybeEmpty[str] value: MaybeEmpty[str] inline: bool class _EmbedMediaProxy(Protocol): url: MaybeEmpty[str] proxy_url: MaybeEmpty[str] height: MaybeEmpty[int] width: MaybeEmpty[int] class _EmbedVideoProxy(Protocol): url: MaybeEmpty[str] height: MaybeEmpty[int] width: MaybeEmpty[int] class _EmbedProviderProxy(Protocol): name: MaybeEmpty[str] url: MaybeEmpty[str] class _EmbedAuthorProxy(Protocol): name: MaybeEmpty[str] url: MaybeEmpty[str] icon_url: MaybeEmpty[str] proxy_icon_url: MaybeEmpty[str] class Embed: """Represents a Discord embed. .. container:: operations .. describe:: len(x) Returns the total size of the embed. Useful for checking if it's within the 6000 character limit. .. describe:: bool(b) Returns whether the embed has any data set. .. versionadded:: 2.0 Certain properties return an ``EmbedProxy``, a type that acts similar to a regular :class:`dict` except using dotted access, e.g. ``embed.author.icon_url``. If the attribute is invalid or empty, then a special sentinel value is returned, :attr:`Embed.Empty`. For ease of use, all parameters that expect a :class:`str` are implicitly casted to :class:`str` for you. Attributes ----------- title: :class:`str` The title of the embed. This can be set during initialisation. type: :class:`str` The type of embed. Usually "rich". This can be set during initialisation. Possible strings for embed types can be found on discord's `api docs <https://discord.com/developers/docs/resources/channel#embed-object-embed-types>`_ description: :class:`str` The description of the embed. This can be set during initialisation. url: :class:`str` The URL of the embed. This can be set during initialisation. timestamp: :class:`datetime.datetime` The timestamp of the embed content. This is an aware datetime. If a naive datetime is passed, it is converted to an aware datetime with the local timezone. colour: Union[:class:`Colour`, :class:`int`] The colour code of the embed. Aliased to ``color`` as well. This can be set during initialisation. Empty A special sentinel value used by ``EmbedProxy`` and this class to denote that the value or attribute is empty. """ __slots__ = ( 'title', 'url', 'type', '_timestamp', '_colour', '_footer', '_image', '_thumbnail', '_video', '_provider', '_author', '_fields', 'description', ) Empty: Final = EmptyEmbed def __init__( self, *, colour: Union[int, Colour, _EmptyEmbed] = EmptyEmbed, color: Union[int, Colour, _EmptyEmbed] = EmptyEmbed, title: MaybeEmpty[Any] = EmptyEmbed, type: EmbedType = 'rich', url: MaybeEmpty[Any] = EmptyEmbed, description: MaybeEmpty[Any] = EmptyEmbed, timestamp: datetime.datetime = None, ): self.colour = colour if colour is not EmptyEmbed else color self.title = title self.type = type self.url = url self.description = description if self.title is not EmptyEmbed: self.title = str(self.title) if self.description is not EmptyEmbed: self.description = str(self.description) if self.url is not EmptyEmbed: self.url = str(self.url) if timestamp: self.timestamp = timestamp @classmethod def from_dict(cls: Type[E], data: Mapping[str, Any]) -> E: """Converts a :class:`dict` to a :class:`Embed` provided it is in the format that Discord expects it to be in. You can find out about this format in the `official Discord documentation`__. .. _DiscordDocs: https://discord.com/developers/docs/resources/channel#embed-object __ DiscordDocs_ Parameters ----------- data: :class:`dict` The dictionary to convert into an embed. """ # we are bypassing __init__ here since it doesn't apply here self: E = cls.__new__(cls) # fill in the basic fields self.title = data.get('title', EmptyEmbed) self.type = data.get('type', EmptyEmbed) self.description = data.get('description', EmptyEmbed) self.url = data.get('url', EmptyEmbed) if self.title is not EmptyEmbed: self.title = str(self.title) if self.description is not EmptyEmbed: self.description = str(self.description) if self.url is not EmptyEmbed: self.url = str(self.url) # try to fill in the more rich fields try: self._colour = Colour(value=data['color']) except KeyError: pass try: self._timestamp = utils.parse_time(data['timestamp']) except KeyError: pass for attr in ('thumbnail', 'video', 'provider', 'author', 'fields', 'image', 'footer'): try: value = data[attr] except KeyError: continue else: setattr(self, '_' + attr, value) return self def copy(self: E) -> E: """Returns a shallow copy of the embed.""" return self.__class__.from_dict(self.to_dict()) def __len__(self) -> int: total = len(self.title) + len(self.description) for field in getattr(self, '_fields', []): total += len(field['name']) + len(field['value']) try: footer_text = self._footer['text'] except (AttributeError, KeyError): pass else: total += len(footer_text) try: author = self._author except AttributeError: pass else: total += len(author['name']) return total def __bool__(self) -> bool: return any( ( self.title, self.url, self.description, self.colour, self.fields, self.timestamp, self.author, self.thumbnail, self.footer, self.image, self.provider, self.video, ) ) @property def colour(self) -> MaybeEmpty[Colour]: return getattr(self, '_colour', EmptyEmbed) @colour.setter def colour(self, value: Union[int, Colour, _EmptyEmbed]): # type: ignore if isinstance(value, (Colour, _EmptyEmbed)): self._colour = value elif isinstance(value, int): self._colour = Colour(value=value) else: raise TypeError(f'Expected discord.Colour, int, or Embed.Empty but received {value.__class__.__name__} instead.') color = colour @property def timestamp(self) -> MaybeEmpty[datetime.datetime]: return getattr(self, '_timestamp', EmptyEmbed) @timestamp.setter def timestamp(self, value: MaybeEmpty[datetime.datetime]): if isinstance(value, datetime.datetime): if value.tzinfo is None: value = value.astimezone() self._timestamp = value elif isinstance(value, _EmptyEmbed): self._timestamp = value else: raise TypeError(f"Expected datetime.datetime or Embed.Empty received {value.__class__.__name__} instead") @property def footer(self) -> _EmbedFooterProxy: """Returns an ``EmbedProxy`` denoting the footer contents. See :meth:`set_footer` for possible values you can access. If the attribute has no value then :attr:`Empty` is returned. """ return EmbedProxy(getattr(self, '_footer', {})) # type: ignore def set_footer(self: E, *, text: MaybeEmpty[Any] = EmptyEmbed, icon_url: MaybeEmpty[Any] = EmptyEmbed) -> E: """Sets the footer for the embed content. This function returns the class instance to allow for fluent-style chaining. Parameters ----------- text: :class:`str` The footer text. icon_url: :class:`str` The URL of the footer icon. Only HTTP(S) is supported. """ self._footer = {} if text is not EmptyEmbed: self._footer['text'] = str(text) if icon_url is not EmptyEmbed: self._footer['icon_url'] = str(icon_url) return self def remove_footer(self: E) -> E: """Clears embed's footer information. This function returns the class instance to allow for fluent-style chaining. .. versionadded:: 2.0 """ try: del self._footer except AttributeError: pass return self @property def image(self) -> _EmbedMediaProxy: """Returns an ``EmbedProxy`` denoting the image contents. Possible attributes you can access are: - ``url`` - ``proxy_url`` - ``width`` - ``height`` If the attribute has no value then :attr:`Empty` is returned. """ return EmbedProxy(getattr(self, '_image', {})) # type: ignore def set_image(self: E, *, url: MaybeEmpty[Any]) -> E: """Sets the image for the embed content. This function returns the class instance to allow for fluent-style chaining. .. versionchanged:: 1.4 Passing :attr:`Empty` removes the image. Parameters ----------- url: :class:`str` The source URL for the image. Only HTTP(S) is supported. """ if url is EmptyEmbed: try: del self._image except AttributeError: pass else: self._image = { 'url': str(url), } return self @property def thumbnail(self) -> _EmbedMediaProxy: """Returns an ``EmbedProxy`` denoting the thumbnail contents. Possible attributes you can access are: - ``url`` - ``proxy_url`` - ``width`` - ``height`` If the attribute has no value then :attr:`Empty` is returned. """ return EmbedProxy(getattr(self, '_thumbnail', {})) # type: ignore def set_thumbnail(self: E, *, url: MaybeEmpty[Any]) -> E: """Sets the thumbnail for the embed content. This function returns the class instance to allow for fluent-style chaining. .. versionchanged:: 1.4 Passing :attr:`Empty` removes the thumbnail. Parameters ----------- url: :class:`str` The source URL for the thumbnail. Only HTTP(S) is supported. """ if url is EmptyEmbed: try: del self._thumbnail except AttributeError: pass else: self._thumbnail = { 'url': str(url), } return self @property def video(self) -> _EmbedVideoProxy: """Returns an ``EmbedProxy`` denoting the video contents. Possible attributes include: - ``url`` for the video URL. - ``height`` for the video height. - ``width`` for the video width. If the attribute has no value then :attr:`Empty` is returned. """ return EmbedProxy(getattr(self, '_video', {})) # type: ignore @property def provider(self) -> _EmbedProviderProxy: """Returns an ``EmbedProxy`` denoting the provider contents. The only attributes that might be accessed are ``name`` and ``url``. If the attribute has no value then :attr:`Empty` is returned. """ return EmbedProxy(getattr(self, '_provider', {})) # type: ignore @property def author(self) -> _EmbedAuthorProxy: """Returns an ``EmbedProxy`` denoting the author contents. See :meth:`set_author` for possible values you can access. If the attribute has no value then :attr:`Empty` is returned. """ return EmbedProxy(getattr(self, '_author', {})) # type: ignore def set_author(self: E, *, name: Any, url: MaybeEmpty[Any] = EmptyEmbed, icon_url: MaybeEmpty[Any] = EmptyEmbed) -> E: """Sets the author for the embed content. This function returns the class instance to allow for fluent-style chaining. Parameters ----------- name: :class:`str` The name of the author. url: :class:`str` The URL for the author. icon_url: :class:`str` The URL of the author icon. Only HTTP(S) is supported. """ self._author = { 'name': str(name), } if url is not EmptyEmbed: self._author['url'] = str(url) if icon_url is not EmptyEmbed: self._author['icon_url'] = str(icon_url) return self def remove_author(self: E) -> E: """Clears embed's author information. This function returns the class instance to allow for fluent-style chaining. .. versionadded:: 1.4 """ try: del self._author except AttributeError: pass return self @property def fields(self) -> List[_EmbedFieldProxy]: """List[Union[``EmbedProxy``, :attr:`Empty`]]: Returns a :class:`list` of ``EmbedProxy`` denoting the field contents. See :meth:`add_field` for possible values you can access. If the attribute has no value then :attr:`Empty` is returned. """ return [EmbedProxy(d) for d in getattr(self, '_fields', [])] # type: ignore def add_field(self: E, *, name: Any, value: Any, inline: bool = True) -> E: """Adds a field to the embed object. This function returns the class instance to allow for fluent-style chaining. Parameters ----------- name: :class:`str` The name of the field. value: :class:`str` The value of the field. inline: :class:`bool` Whether the field should be displayed inline. """ field = { 'inline': inline, 'name': str(name), 'value': str(value), } try: self._fields.append(field) except AttributeError: self._fields = [field] return self def insert_field_at(self: E, index: int, *, name: Any, value: Any, inline: bool = True) -> E: """Inserts a field before a specified index to the embed. This function returns the class instance to allow for fluent-style chaining. .. versionadded:: 1.2 Parameters ----------- index: :class:`int` The index of where to insert the field. name: :class:`str` The name of the field. value: :class:`str` The value of the field. inline: :class:`bool` Whether the field should be displayed inline. """ field = { 'inline': inline, 'name': str(name), 'value': str(value), } try: self._fields.insert(index, field) except AttributeError: self._fields = [field] return self def clear_fields(self) -> None: """Removes all fields from this embed.""" try: self._fields.clear() except AttributeError: self._fields = [] def remove_field(self, index: int) -> None: """Removes a field at a specified index. If the index is invalid or out of bounds then the error is silently swallowed. .. note:: When deleting a field by index, the index of the other fields shift to fill the gap just like a regular list. Parameters ----------- index: :class:`int` The index of the field to remove. """ try: del self._fields[index] except (AttributeError, IndexError): pass def set_field_at(self: E, index: int, *, name: Any, value: Any, inline: bool = True) -> E: """Modifies a field to the embed object. The index must point to a valid pre-existing field. This function returns the class instance to allow for fluent-style chaining. Parameters ----------- index: :class:`int` The index of the field to modify. name: :class:`str` The name of the field. value: :class:`str` The value of the field. inline: :class:`bool` Whether the field should be displayed inline. Raises ------- IndexError An invalid index was provided. """ try: field = self._fields[index] except (TypeError, IndexError, AttributeError): raise IndexError('field index out of range') field['name'] = str(name) field['value'] = str(value) field['inline'] = inline return self def to_dict(self) -> EmbedData: """Converts this embed object into a dict.""" # add in the raw data into the dict # fmt: off result = { key[1:]: getattr(self, key) for key in self.__slots__ if key[0] == '_' and hasattr(self, key) } # fmt: on # deal with basic convenience wrappers try: colour = result.pop('colour') except KeyError: pass else: if colour: result['color'] = colour.value try: timestamp = result.pop('timestamp') except KeyError: pass else: if timestamp: if timestamp.tzinfo: result['timestamp'] = timestamp.astimezone(tz=datetime.timezone.utc).isoformat() else: result['timestamp'] = timestamp.replace(tzinfo=datetime.timezone.utc).isoformat() # add in the non raw attribute ones if self.type: result['type'] = self.type if self.description: result['description'] = self.description if self.url: result['url'] = self.url if self.title: result['title'] = self.title return result # type: ignore
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/embeds.py
embeds.py
from __future__ import annotations from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union from . import enums, utils from .asset import Asset from .colour import Colour from .invite import Invite from .mixins import Hashable from .object import Object from .permissions import PermissionOverwrite, Permissions __all__ = ( 'AuditLogDiff', 'AuditLogChanges', 'AuditLogEntry', ) if TYPE_CHECKING: import datetime from . import abc from .emoji import Emoji from .guild import Guild from .member import Member from .role import Role from .types.audit_log import ( AuditLogChange as AuditLogChangePayload, AuditLogEntry as AuditLogEntryPayload, ) from .types.channel import PermissionOverwrite as PermissionOverwritePayload from .types.role import Role as RolePayload from .types.snowflake import Snowflake from .user import User from .stage_instance import StageInstance from .sticker import GuildSticker from .threads import Thread def _transform_permissions(entry: AuditLogEntry, data: str) -> Permissions: return Permissions(int(data)) def _transform_color(entry: AuditLogEntry, data: int) -> Colour: return Colour(data) def _transform_snowflake(entry: AuditLogEntry, data: Snowflake) -> int: return int(data) def _transform_channel(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Union[abc.GuildChannel, Object]]: if data is None: return None return entry.guild.get_channel(int(data)) or Object(id=data) def _transform_member_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Union[Member, User, None]: if data is None: return None return entry._get_member(int(data)) def _transform_guild_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Guild]: if data is None: return None return entry._state._get_guild(data) def _transform_overwrites( entry: AuditLogEntry, data: List[PermissionOverwritePayload] ) -> List[Tuple[Object, PermissionOverwrite]]: overwrites = [] for elem in data: allow = Permissions(int(elem['allow'])) deny = Permissions(int(elem['deny'])) ow = PermissionOverwrite.from_pair(allow, deny) ow_type = elem['type'] ow_id = int(elem['id']) target = None if ow_type == '0': target = entry.guild.get_role(ow_id) elif ow_type == '1': target = entry._get_member(ow_id) if target is None: target = Object(id=ow_id) overwrites.append((target, ow)) return overwrites def _transform_icon(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]: if data is None: return None return Asset._from_guild_icon(entry._state, entry.guild.id, data) def _transform_avatar(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]: if data is None: return None return Asset._from_avatar(entry._state, entry._target_id, data) # type: ignore def _guild_hash_transformer(path: str) -> Callable[[AuditLogEntry, Optional[str]], Optional[Asset]]: def _transform(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]: if data is None: return None return Asset._from_guild_image(entry._state, entry.guild.id, data, path=path) return _transform T = TypeVar('T', bound=enums.Enum) def _enum_transformer(enum: Type[T]) -> Callable[[AuditLogEntry, int], T]: def _transform(entry: AuditLogEntry, data: int) -> T: return enums.try_enum(enum, data) return _transform def _transform_type(entry: AuditLogEntry, data: Union[int]) -> Union[enums.ChannelType, enums.StickerType]: if entry.action.name.startswith('sticker_'): return enums.try_enum(enums.StickerType, data) else: return enums.try_enum(enums.ChannelType, data) class AuditLogDiff: def __len__(self) -> int: return len(self.__dict__) def __iter__(self) -> Generator[Tuple[str, Any], None, None]: yield from self.__dict__.items() def __repr__(self) -> str: values = ' '.join('%s=%r' % item for item in self.__dict__.items()) return f'<AuditLogDiff {values}>' if TYPE_CHECKING: def __getattr__(self, item: str) -> Any: ... def __setattr__(self, key: str, value: Any) -> Any: ... Transformer = Callable[["AuditLogEntry", Any], Any] class AuditLogChanges: # fmt: off TRANSFORMERS: ClassVar[Dict[str, Tuple[Optional[str], Optional[Transformer]]]] = { 'verification_level': (None, _enum_transformer(enums.VerificationLevel)), 'explicit_content_filter': (None, _enum_transformer(enums.ContentFilter)), 'allow': (None, _transform_permissions), 'deny': (None, _transform_permissions), 'permissions': (None, _transform_permissions), 'id': (None, _transform_snowflake), 'color': ('colour', _transform_color), 'owner_id': ('owner', _transform_member_id), 'inviter_id': ('inviter', _transform_member_id), 'channel_id': ('channel', _transform_channel), 'afk_channel_id': ('afk_channel', _transform_channel), 'system_channel_id': ('system_channel', _transform_channel), 'widget_channel_id': ('widget_channel', _transform_channel), 'rules_channel_id': ('rules_channel', _transform_channel), 'public_updates_channel_id': ('public_updates_channel', _transform_channel), 'permission_overwrites': ('overwrites', _transform_overwrites), 'splash_hash': ('splash', _guild_hash_transformer('splashes')), 'banner_hash': ('banner', _guild_hash_transformer('banners')), 'discovery_splash_hash': ('discovery_splash', _guild_hash_transformer('discovery-splashes')), 'icon_hash': ('icon', _transform_icon), 'avatar_hash': ('avatar', _transform_avatar), 'rate_limit_per_user': ('slowmode_delay', None), 'guild_id': ('guild', _transform_guild_id), 'tags': ('emoji', None), 'default_message_notifications': ('default_notifications', _enum_transformer(enums.NotificationLevel)), 'region': (None, _enum_transformer(enums.VoiceRegion)), 'rtc_region': (None, _enum_transformer(enums.VoiceRegion)), 'video_quality_mode': (None, _enum_transformer(enums.VideoQualityMode)), 'privacy_level': (None, _enum_transformer(enums.StagePrivacyLevel)), 'format_type': (None, _enum_transformer(enums.StickerFormatType)), 'type': (None, _transform_type), } # fmt: on def __init__(self, entry: AuditLogEntry, data: List[AuditLogChangePayload]): self.before = AuditLogDiff() self.after = AuditLogDiff() for elem in data: attr = elem['key'] # special cases for role add/remove if attr == '$add': self._handle_role(self.before, self.after, entry, elem['new_value']) # type: ignore continue elif attr == '$remove': self._handle_role(self.after, self.before, entry, elem['new_value']) # type: ignore continue try: key, transformer = self.TRANSFORMERS[attr] except (ValueError, KeyError): transformer = None else: if key: attr = key transformer: Optional[Transformer] try: before = elem['old_value'] except KeyError: before = None else: if transformer: before = transformer(entry, before) setattr(self.before, attr, before) try: after = elem['new_value'] except KeyError: after = None else: if transformer: after = transformer(entry, after) setattr(self.after, attr, after) # add an alias if hasattr(self.after, 'colour'): self.after.color = self.after.colour self.before.color = self.before.colour if hasattr(self.after, 'expire_behavior'): self.after.expire_behaviour = self.after.expire_behavior self.before.expire_behaviour = self.before.expire_behavior def __repr__(self) -> str: return f'<AuditLogChanges before={self.before!r} after={self.after!r}>' def _handle_role(self, first: AuditLogDiff, second: AuditLogDiff, entry: AuditLogEntry, elem: List[RolePayload]) -> None: if not hasattr(first, 'roles'): setattr(first, 'roles', []) data = [] g: Guild = entry.guild # type: ignore for e in elem: role_id = int(e['id']) role = g.get_role(role_id) if role is None: role = Object(id=role_id) role.name = e['name'] # type: ignore data.append(role) setattr(second, 'roles', data) class _AuditLogProxyMemberPrune: delete_member_days: int members_removed: int class _AuditLogProxyMemberMoveOrMessageDelete: channel: abc.GuildChannel count: int class _AuditLogProxyMemberDisconnect: count: int class _AuditLogProxyPinAction: channel: abc.GuildChannel message_id: int class _AuditLogProxyStageInstanceAction: channel: abc.GuildChannel class AuditLogEntry(Hashable): r"""Represents an Audit Log entry. You retrieve these via :meth:`Guild.audit_logs`. .. container:: operations .. describe:: x == y Checks if two entries are equal. .. describe:: x != y Checks if two entries are not equal. .. describe:: hash(x) Returns the entry's hash. .. versionchanged:: 1.7 Audit log entries are now comparable and hashable. Attributes ----------- action: :class:`AuditLogAction` The action that was done. user: :class:`abc.User` The user who initiated this action. Usually a :class:`Member`\, unless gone then it's a :class:`User`. id: :class:`int` The entry ID. target: Any The target that got changed. The exact type of this depends on the action being done. reason: Optional[:class:`str`] The reason this action was done. extra: Any Extra information that this entry has that might be useful. For most actions, this is ``None``. However in some cases it contains extra information. See :class:`AuditLogAction` for which actions have this field filled out. """ def __init__(self, *, users: Dict[int, User], data: AuditLogEntryPayload, guild: Guild): self._state = guild._state self.guild = guild self._users = users self._from_data(data) def _from_data(self, data: AuditLogEntryPayload) -> None: self.action = enums.try_enum(enums.AuditLogAction, data['action_type']) self.id = int(data['id']) # this key is technically not usually present self.reason = data.get('reason') self.extra = data.get('options') if isinstance(self.action, enums.AuditLogAction) and self.extra: if self.action is enums.AuditLogAction.member_prune: # member prune has two keys with useful information self.extra: _AuditLogProxyMemberPrune = type( '_AuditLogProxy', (), {k: int(v) for k, v in self.extra.items()} )() elif self.action is enums.AuditLogAction.member_move or self.action is enums.AuditLogAction.message_delete: channel_id = int(self.extra['channel_id']) elems = { 'count': int(self.extra['count']), 'channel': self.guild.get_channel(channel_id) or Object(id=channel_id), } self.extra: _AuditLogProxyMemberMoveOrMessageDelete = type('_AuditLogProxy', (), elems)() elif self.action is enums.AuditLogAction.member_disconnect: # The member disconnect action has a dict with some information elems = { 'count': int(self.extra['count']), } self.extra: _AuditLogProxyMemberDisconnect = type('_AuditLogProxy', (), elems)() elif self.action.name.endswith('pin'): # the pin actions have a dict with some information channel_id = int(self.extra['channel_id']) elems = { 'channel': self.guild.get_channel(channel_id) or Object(id=channel_id), 'message_id': int(self.extra['message_id']), } self.extra: _AuditLogProxyPinAction = type('_AuditLogProxy', (), elems)() elif self.action.name.startswith('overwrite_'): # the overwrite_ actions have a dict with some information instance_id = int(self.extra['id']) the_type = self.extra.get('type') if the_type == '1': self.extra = self._get_member(instance_id) elif the_type == '0': role = self.guild.get_role(instance_id) if role is None: role = Object(id=instance_id) role.name = self.extra.get('role_name') # type: ignore self.extra: Role = role elif self.action.name.startswith('stage_instance'): channel_id = int(self.extra['channel_id']) elems = {'channel': self.guild.get_channel(channel_id) or Object(id=channel_id)} self.extra: _AuditLogProxyStageInstanceAction = type('_AuditLogProxy', (), elems)() # fmt: off self.extra: Union[ _AuditLogProxyMemberPrune, _AuditLogProxyMemberMoveOrMessageDelete, _AuditLogProxyMemberDisconnect, _AuditLogProxyPinAction, _AuditLogProxyStageInstanceAction, Member, User, None, Role, ] # fmt: on # this key is not present when the above is present, typically. # It's a list of { new_value: a, old_value: b, key: c } # where new_value and old_value are not guaranteed to be there depending # on the action type, so let's just fetch it for now and only turn it # into meaningful data when requested self._changes = data.get('changes', []) self.user = self._get_member(utils._get_as_snowflake(data, 'user_id')) # type: ignore self._target_id = utils._get_as_snowflake(data, 'target_id') def _get_member(self, user_id: int) -> Union[Member, User, None]: return self.guild.get_member(user_id) or self._users.get(user_id) def __repr__(self) -> str: return f'<AuditLogEntry id={self.id} action={self.action} user={self.user!r}>' @utils.cached_property def created_at(self) -> datetime.datetime: """:class:`datetime.datetime`: Returns the entry's creation time in UTC.""" return utils.snowflake_time(self.id) @utils.cached_property def target(self) -> Union[Guild, abc.GuildChannel, Member, User, Role, Invite, Emoji, StageInstance, GuildSticker, Thread, Object, None]: try: converter = getattr(self, '_convert_target_' + self.action.target_type) except AttributeError: return Object(id=self._target_id) else: return converter(self._target_id) @utils.cached_property def category(self) -> enums.AuditLogActionCategory: """Optional[:class:`AuditLogActionCategory`]: The category of the action, if applicable.""" return self.action.category @utils.cached_property def changes(self) -> AuditLogChanges: """:class:`AuditLogChanges`: The list of changes this entry has.""" obj = AuditLogChanges(self, self._changes) del self._changes return obj @utils.cached_property def before(self) -> AuditLogDiff: """:class:`AuditLogDiff`: The target's prior state.""" return self.changes.before @utils.cached_property def after(self) -> AuditLogDiff: """:class:`AuditLogDiff`: The target's subsequent state.""" return self.changes.after def _convert_target_guild(self, target_id: int) -> Guild: return self.guild def _convert_target_channel(self, target_id: int) -> Union[abc.GuildChannel, Object]: return self.guild.get_channel(target_id) or Object(id=target_id) def _convert_target_user(self, target_id: int) -> Union[Member, User, None]: return self._get_member(target_id) def _convert_target_role(self, target_id: int) -> Union[Role, Object]: return self.guild.get_role(target_id) or Object(id=target_id) def _convert_target_invite(self, target_id: int) -> Invite: # invites have target_id set to null # so figure out which change has the full invite data changeset = self.before if self.action is enums.AuditLogAction.invite_delete else self.after fake_payload = { 'max_age': changeset.max_age, 'max_uses': changeset.max_uses, 'code': changeset.code, 'temporary': changeset.temporary, 'uses': changeset.uses, } obj = Invite(state=self._state, data=fake_payload, guild=self.guild, channel=changeset.channel) # type: ignore try: obj.inviter = changeset.inviter except AttributeError: pass return obj def _convert_target_emoji(self, target_id: int) -> Union[Emoji, Object]: return self._state.get_emoji(target_id) or Object(id=target_id) def _convert_target_message(self, target_id: int) -> Union[Member, User, None]: return self._get_member(target_id) def _convert_target_stage_instance(self, target_id: int) -> Union[StageInstance, Object]: return self.guild.get_stage_instance(target_id) or Object(id=target_id) def _convert_target_sticker(self, target_id: int) -> Union[GuildSticker, Object]: return self._state.get_sticker(target_id) or Object(id=target_id) def _convert_target_thread(self, target_id: int) -> Union[Thread, Object]: return self.guild.get_thread(target_id) or Object(id=target_id)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/audit_logs.py
audit_logs.py
from __future__ import annotations from typing import Optional, TYPE_CHECKING, Union import os import io __all__ = ( 'File', ) class File: r"""A parameter object used for :meth:`abc.Messageable.send` for sending file objects. .. note:: File objects are single use and are not meant to be reused in multiple :meth:`abc.Messageable.send`\s. Attributes ----------- fp: Union[:class:`os.PathLike`, :class:`io.BufferedIOBase`] A file-like object opened in binary mode and read mode or a filename representing a file in the hard drive to open. .. note:: If the file-like object passed is opened via ``open`` then the modes 'rb' should be used. To pass binary data, consider usage of ``io.BytesIO``. filename: Optional[:class:`str`] The filename to display when uploading to Discord. If this is not given then it defaults to ``fp.name`` or if ``fp`` is a string then the ``filename`` will default to the string given. spoiler: :class:`bool` Whether the attachment is a spoiler. """ __slots__ = ('fp', 'filename', 'spoiler', '_original_pos', '_owner', '_closer') if TYPE_CHECKING: fp: io.BufferedIOBase filename: Optional[str] spoiler: bool def __init__( self, fp: Union[str, bytes, os.PathLike, io.BufferedIOBase], filename: Optional[str] = None, *, spoiler: bool = False, ): if isinstance(fp, io.IOBase): if not (fp.seekable() and fp.readable()): raise ValueError(f'File buffer {fp!r} must be seekable and readable') self.fp = fp self._original_pos = fp.tell() self._owner = False else: self.fp = open(fp, 'rb') self._original_pos = 0 self._owner = True # aiohttp only uses two methods from IOBase # read and close, since I want to control when the files # close, I need to stub it so it doesn't close unless # I tell it to self._closer = self.fp.close self.fp.close = lambda: None if filename is None: if isinstance(fp, str): _, self.filename = os.path.split(fp) else: self.filename = getattr(fp, 'name', None) else: self.filename = filename if spoiler and self.filename is not None and not self.filename.startswith('SPOILER_'): self.filename = 'SPOILER_' + self.filename self.spoiler = spoiler or (self.filename is not None and self.filename.startswith('SPOILER_')) def reset(self, *, seek: Union[int, bool] = True) -> None: # The `seek` parameter is needed because # the retry-loop is iterated over multiple times # starting from 0, as an implementation quirk # the resetting must be done at the beginning # before a request is done, since the first index # is 0, and thus false, then this prevents an # unnecessary seek since it's the first request # done. if seek: self.fp.seek(self._original_pos) def close(self) -> None: self.fp.close = self._closer if self._owner: self._closer()
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/file.py
file.py
from __future__ import annotations from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union from .enums import try_enum, ComponentType, ButtonStyle from .utils import get_slots, MISSING from .partial_emoji import PartialEmoji, _EmojiTag if TYPE_CHECKING: from .types.components import ( Component as ComponentPayload, ButtonComponent as ButtonComponentPayload, SelectMenu as SelectMenuPayload, SelectOption as SelectOptionPayload, ActionRow as ActionRowPayload, ) from .emoji import Emoji __all__ = ( 'Component', 'ActionRow', 'Button', 'SelectMenu', 'SelectOption', ) C = TypeVar('C', bound='Component') class Component: """Represents a Discord Bot UI Kit Component. Currently, the only components supported by Discord are: - :class:`ActionRow` - :class:`Button` - :class:`SelectMenu` This class is abstract and cannot be instantiated. .. versionadded:: 2.0 Attributes ------------ type: :class:`ComponentType` The type of component. """ __slots__: Tuple[str, ...] = ('type',) __repr_info__: ClassVar[Tuple[str, ...]] type: ComponentType def __repr__(self) -> str: attrs = ' '.join(f'{key}={getattr(self, key)!r}' for key in self.__repr_info__) return f'<{self.__class__.__name__} {attrs}>' @classmethod def _raw_construct(cls: Type[C], **kwargs) -> C: self: C = cls.__new__(cls) for slot in get_slots(cls): try: value = kwargs[slot] except KeyError: pass else: setattr(self, slot, value) return self def to_dict(self) -> Dict[str, Any]: raise NotImplementedError class ActionRow(Component): """Represents a Discord Bot UI Kit Action Row. This is a component that holds up to 5 children components in a row. This inherits from :class:`Component`. .. versionadded:: 2.0 Attributes ------------ type: :class:`ComponentType` The type of component. children: List[:class:`Component`] The children components that this holds, if any. """ __slots__: Tuple[str, ...] = ('children',) __repr_info__: ClassVar[Tuple[str, ...]] = __slots__ def __init__(self, data: ComponentPayload): self.type: ComponentType = try_enum(ComponentType, data['type']) self.children: List[Component] = [_component_factory(d) for d in data.get('components', [])] def to_dict(self) -> ActionRowPayload: return { 'type': int(self.type), 'components': [child.to_dict() for child in self.children], } # type: ignore class Button(Component): """Represents a button from the Discord Bot UI Kit. This inherits from :class:`Component`. .. note:: The user constructible and usable type to create a button is :class:`discord.ui.Button` not this one. .. versionadded:: 2.0 Attributes ----------- style: :class:`.ButtonStyle` The style of the button. custom_id: Optional[:class:`str`] The ID of the button that gets received during an interaction. If this button is for a URL, it does not have a custom ID. url: Optional[:class:`str`] The URL this button sends you to. disabled: :class:`bool` Whether the button is disabled or not. label: Optional[:class:`str`] The label of the button, if any. emoji: Optional[:class:`PartialEmoji`] The emoji of the button, if available. """ __slots__: Tuple[str, ...] = ( 'style', 'custom_id', 'url', 'disabled', 'label', 'emoji', ) __repr_info__: ClassVar[Tuple[str, ...]] = __slots__ def __init__(self, data: ButtonComponentPayload): self.type: ComponentType = try_enum(ComponentType, data['type']) self.style: ButtonStyle = try_enum(ButtonStyle, data['style']) self.custom_id: Optional[str] = data.get('custom_id') self.url: Optional[str] = data.get('url') self.disabled: bool = data.get('disabled', False) self.label: Optional[str] = data.get('label') self.emoji: Optional[PartialEmoji] try: self.emoji = PartialEmoji.from_dict(data['emoji']) except KeyError: self.emoji = None def to_dict(self) -> ButtonComponentPayload: payload = { 'type': 2, 'style': int(self.style), 'label': self.label, 'disabled': self.disabled, } if self.custom_id: payload['custom_id'] = self.custom_id if self.url: payload['url'] = self.url if self.emoji: payload['emoji'] = self.emoji.to_dict() return payload # type: ignore class SelectMenu(Component): """Represents a select menu from the Discord Bot UI Kit. A select menu is functionally the same as a dropdown, however on mobile it renders a bit differently. .. note:: The user constructible and usable type to create a select menu is :class:`discord.ui.Select` not this one. .. versionadded:: 2.0 Attributes ------------ custom_id: Optional[:class:`str`] The ID of the select menu that gets received during an interaction. placeholder: Optional[:class:`str`] The placeholder text that is shown if nothing is selected, if any. min_values: :class:`int` The minimum number of items that must be chosen for this select menu. Defaults to 1 and must be between 1 and 25. max_values: :class:`int` The maximum number of items that must be chosen for this select menu. Defaults to 1 and must be between 1 and 25. options: List[:class:`SelectOption`] A list of options that can be selected in this menu. disabled: :class:`bool` Whether the select is disabled or not. """ __slots__: Tuple[str, ...] = ( 'custom_id', 'placeholder', 'min_values', 'max_values', 'options', 'disabled', ) __repr_info__: ClassVar[Tuple[str, ...]] = __slots__ def __init__(self, data: SelectMenuPayload): self.type = ComponentType.select self.custom_id: str = data['custom_id'] self.placeholder: Optional[str] = data.get('placeholder') self.min_values: int = data.get('min_values', 1) self.max_values: int = data.get('max_values', 1) self.options: List[SelectOption] = [SelectOption.from_dict(option) for option in data.get('options', [])] self.disabled: bool = data.get('disabled', False) def to_dict(self) -> SelectMenuPayload: payload: SelectMenuPayload = { 'type': self.type.value, 'custom_id': self.custom_id, 'min_values': self.min_values, 'max_values': self.max_values, 'options': [op.to_dict() for op in self.options], 'disabled': self.disabled, } if self.placeholder: payload['placeholder'] = self.placeholder return payload class SelectOption: """Represents a select menu's option. These can be created by users. .. versionadded:: 2.0 Attributes ----------- label: :class:`str` The label of the option. This is displayed to users. Can only be up to 100 characters. value: :class:`str` The value of the option. This is not displayed to users. If not provided when constructed then it defaults to the label. Can only be up to 100 characters. description: Optional[:class:`str`] An additional description of the option, if any. Can only be up to 100 characters. emoji: Optional[Union[:class:`str`, :class:`Emoji`, :class:`PartialEmoji`]] The emoji of the option, if available. default: :class:`bool` Whether this option is selected by default. """ __slots__: Tuple[str, ...] = ( 'label', 'value', 'description', 'emoji', 'default', ) def __init__( self, *, label: str, value: str = MISSING, description: Optional[str] = None, emoji: Optional[Union[str, Emoji, PartialEmoji]] = None, default: bool = False, ) -> None: self.label = label self.value = label if value is MISSING else value self.description = description if emoji is not None: if isinstance(emoji, str): emoji = PartialEmoji.from_str(emoji) elif isinstance(emoji, _EmojiTag): emoji = emoji._to_partial() else: raise TypeError(f'expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}') self.emoji = emoji self.default = default def __repr__(self) -> str: return ( f'<SelectOption label={self.label!r} value={self.value!r} description={self.description!r} ' f'emoji={self.emoji!r} default={self.default!r}>' ) def __str__(self) -> str: if self.emoji: base = f'{self.emoji} {self.label}' else: base = self.label if self.description: return f'{base}\n{self.description}' return base @classmethod def from_dict(cls, data: SelectOptionPayload) -> SelectOption: try: emoji = PartialEmoji.from_dict(data['emoji']) except KeyError: emoji = None return cls( label=data['label'], value=data['value'], description=data.get('description'), emoji=emoji, default=data.get('default', False), ) def to_dict(self) -> SelectOptionPayload: payload: SelectOptionPayload = { 'label': self.label, 'value': self.value, 'default': self.default, } if self.emoji: payload['emoji'] = self.emoji.to_dict() # type: ignore if self.description: payload['description'] = self.description return payload def _component_factory(data: ComponentPayload) -> Component: component_type = data['type'] if component_type == 1: return ActionRow(data) elif component_type == 2: return Button(data) # type: ignore elif component_type == 3: return SelectMenu(data) # type: ignore else: as_enum = try_enum(ComponentType, component_type) return Component._raw_construct(type=as_enum)
zarenacord
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/components.py
components.py