diff --git a/.ameba.yml b/.ameba.yml new file mode 100644 index 0000000000000000000000000000000000000000..ee779570c94f2f1fac88e8aa4a7cff585fd45574 --- /dev/null +++ b/.ameba.yml @@ -0,0 +1,5 @@ +Style/RedundantReturn: + Description: Reports redundant return expressions + AllowMultiReturn: true + AllowEmptyReturn: true + Enabled: false diff --git a/.rubocop.yml b/.rubocop.yml new file mode 100644 index 0000000000000000000000000000000000000000..18a279c5a3eeb6464286c9b801e4afc66639ca00 --- /dev/null +++ b/.rubocop.yml @@ -0,0 +1,42 @@ +AllCops: + NewCops: enable + +Style/FormatStringToken: + Description: 'Use a consistent style for format string tokens.' + Enabled: false + +Style/CommandLiteral: + Description: 'Use `` or %x around command literals.' + Enabled: false + +Style/PerlBackrefs: + Description: 'Avoid Perl-style regex back references.' + Enabled: false + +Metrics/MethodLength: + Enabled: true + CountComments: false # count full line comments? + CountAsOne: ['array', 'hash', 'heredoc'] + Max: 15 + +Metrics/BlockLength: + Enabled: true + CountComments: false # count full line comments? + CountAsOne: ['array', 'hash', 'heredoc'] + ExcludedMethods: ['refine', 'OptionParser.new'] + Max: 15 + +Style/RedundantReturn: + Enabled: false + +Style/IfUnlessModifier: + Enabled: false + +Style/NumericLiterals: + Enabled: false + +Layout/HeredocIndentation: + Enabled: false + +Style/BlockComments: + Enabled: false diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000000000000000000000000000000000000..285514fb66823a9e3b2fd520440207570510f168 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,23 @@ +# system wide setup + +## debian packages + + sudo apt-get install docker.io + +## openEuler packages + + sudo dnf install docker + +# per-user setup + +## git repo + + # git clone https://gitee.com/wu_fengguang/compass-ci.git + + cd compass-ci + echo "export CCI_SRC=$PWD" >> $HOME/.${SHELL##*/}rc + echo "PATH=$PATH:$PWD/sbin">> $HOME/.${SHELL##*/}rc + +## packages + + gem install rest-client diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..bb1837f2d6e8270f79f3aaf6e5c2d73cc34d7a81 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +The compass-ci is provided under the terms of the Mulan Permissive +Software License Version 2 or any later version: + + SPDX-License-Identifier: MulanPSL-2.0+ + +as provided in + + LICENSES/MulanPSL-2.0 + +In addition, other licenses may also apply to individual source files. +Please see: + + LICENSES/GPL-2.0 + LICENSES/GPL-3.0 + LICENSES/MIT + +for more details. + +Aside from that, individual files can be provided under a dual license, +e.g. + + SPDX-License-Identifier: MulanPSL-2.0 or GPL-2.0 + +All contributions to the compass-ci are subject to this LICENSE file. diff --git a/LICENSES/GPL-2.0 b/LICENSES/GPL-2.0 new file mode 100644 index 0000000000000000000000000000000000000000..018633b2265cc54e43358cfc016e072fbede59e6 --- /dev/null +++ b/LICENSES/GPL-2.0 @@ -0,0 +1,358 @@ +Valid-License-Identifier: GPL-2.0 +Valid-License-Identifier: GPL-2.0-only +Valid-License-Identifier: GPL-2.0+ +Valid-License-Identifier: GPL-2.0-or-later +SPDX-URL: https://spdx.org/licenses/GPL-2.0.html +Usage-Guide: + To use this license in source code, put one of the following SPDX + tag/value pairs into a comment of the file. + For 'GNU General Public License (GPL) version 2 only' use: + SPDX-License-Identifier: GPL-2.0 + or + SPDX-License-Identifier: GPL-2.0-only + For 'GNU General Public License (GPL) version 2 or any later version' use: + SPDX-License-Identifier: GPL-2.0+ + or + SPDX-License-Identifier: GPL-2.0-or-later +License-Text: + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/LICENSES/GPL-3.0 b/LICENSES/GPL-3.0 new file mode 100644 index 0000000000000000000000000000000000000000..b292ed470768c7cb62e91a8a5b43d9f4c1160f54 --- /dev/null +++ b/LICENSES/GPL-3.0 @@ -0,0 +1,692 @@ +Valid-License-Identifier: GPL-3.0 +Valid-License-Identifier: GPL-3.0-only +Valid-License-Identifier: GPL-3.0+ +Valid-License-Identifier: GPL-3.0-or-later +SPDX-URL: https://spdx.org/licenses/GPL-3.0.html +Usage-Guide: + To use this license in source code, put one of the following SPDX + tag/value pairs into a comment of the file. + For 'GNU General Public License (GPL) version 3 only' use: + SPDX-License-Identifier: GPL-3.0 + or + SPDX-License-Identifier: GPL-3.0-only + For 'GNU General Public License (GPL) version 3 or any later version' use: + SPDX-License-Identifier: GPL-3.0+ + or + SPDX-License-Identifier: GPL-3.0-or-later +License-Text: + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/LICENSES/MIT b/LICENSES/MIT new file mode 100644 index 0000000000000000000000000000000000000000..088787026f3fdddb80abc7f963d55ff4d707d3d3 --- /dev/null +++ b/LICENSES/MIT @@ -0,0 +1,29 @@ +Valid-License-Identifier: MIT +SPDX-URL: https://spdx.org/licenses/MIT.html +Usage-Guide: + To use the MIT License put the following SPDX tag/value pair into a + comment of the file: + SPDX-License-Identifier: MIT +License-Text: + +MIT License + +Copyright (c) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/LICENSES/MulanPSL-2.0 b/LICENSES/MulanPSL-2.0 new file mode 100644 index 0000000000000000000000000000000000000000..01a1c125848a8664e968f34da2eb6ee60d0b188e --- /dev/null +++ b/LICENSES/MulanPSL-2.0 @@ -0,0 +1,143 @@ +Valid-License-Identifier: MulanPSL-2.0 +Valid-License-Identifier: MulanPSL-2.0-only +Valid-License-Identifier: MulanPSL-2.0+ +Valid-License-Identifier: MulanPSL-2.0-or-later +SPDX-URL: https://spdx.org/licenses/MulanPSL-2.0.html +Usage-Guide: + To use this License put the following SPDX tag/value pairs into a + comment of the file. + For 'Mulan Permissive Software License Version 2 only' use one of: + SPDX-License-Identifier: MulanPSL-2.0 + SPDX-License-Identifier: MulanPSL-2.0-only + For 'Mulan Permissive Software License Version 2 or any later version' use one of: + SPDX-License-Identifier: MulanPSL-2.0+ + SPDX-License-Identifier: MulanPSL-2.0-or-later +License-Text: + + 木兰宽松许可证, 第2版 + + 木兰宽松许可证, 第2版 + 2020年1月 http://license.coscl.org.cn/MulanPSL2 + + + 您对“软件”的复制、使用、修改及分发受木兰宽松许可证,第2版(“本许可证”)的如下条款的约束: + + 0. 定义 + + “软件”是指由“贡献”构成的许可在“本许可证”下的程序和相关文档的集合。 + + “贡献”是指由任一“贡献者”许可在“本许可证”下的受版权法保护的作品。 + + “贡献者”是指将受版权法保护的作品许可在“本许可证”下的自然人或“法人实体”。 + + “法人实体”是指提交贡献的机构及其“关联实体”。 + + “关联实体”是指,对“本许可证”下的行为方而言,控制、受控制或与其共同受控制的机构,此处的控制是指有受控方或共同受控方至少50%直接或间接的投票权、资金或其他有价证券。 + + 1. 授予版权许可 + + 每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的版权许可,您可以复制、使用、修改、分发其“贡献”,不论修改与否。 + + 2. 授予专利许可 + + 每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的(根据本条规定撤销除外)专利许可,供您制造、委托制造、使用、许诺销售、销售、进口其“贡献”或以其他方式转移其“贡献”。前述专利许可仅限于“贡献者”现在或将来拥有或控制的其“贡献”本身或其“贡献”与许可“贡献”时的“软件”结合而将必然会侵犯的专利权利要求,不包括对“贡献”的修改或包含“贡献”的其他结合。如果您或您的“关联实体”直接或间接地,就“软件”或其中的“贡献”对任何人发起专利侵权诉讼(包括反诉或交叉诉讼)或其他专利维权行动,指控其侵犯专利权,则“本许可证”授予您对“软件”的专利许可自您提起诉讼或发起维权行动之日终止。 + + 3. 无商标许可 + + “本许可证”不提供对“贡献者”的商品名称、商标、服务标志或产品名称的商标许可,但您为满足第4条规定的声明义务而必须使用除外。 + + 4. 分发限制 + + 您可以在任何媒介中将“软件”以源程序形式或可执行形式重新分发,不论修改与否,但您必须向接收者提供“本许可证”的副本,并保留“软件”中的版权、商标、专利及免责声明。 + + 5. 免责声明与责任限制 + + “软件”及其中的“贡献”在提供时不带任何明示或默示的担保。在任何情况下,“贡献者”或版权所有者不对任何人因使用“软件”或其中的“贡献”而引发的任何直接或间接损失承担责任,不论因何种原因导致或者基于何种法律理论,即使其曾被建议有此种损失的可能性。 + + 6. 语言 + “本许可证”以中英文双语表述,中英文版本具有同等法律效力。如果中英文版本存在任何冲突不一致,以中文版为准。 + + 条款结束 + + 如何将木兰宽松许可证,第2版,应用到您的软件 + + 如果您希望将木兰宽松许可证,第2版,应用到您的新软件,为了方便接收者查阅,建议您完成如下三步: + + 1, 请您补充如下声明中的空白,包括软件名、软件的首次发表年份以及您作为版权人的名字; + + 2, 请您在软件包的一级目录下创建以“LICENSE”为名的文件,将整个许可证文本放入该文件中; + + 3, 请将如下声明文本放入每个源文件的头部注释中。 + + Copyright (c) [Year] [name of copyright holder] + [Software Name] is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + + Mulan Permissive Software License,Version 2 + + Mulan Permissive Software License,Version 2 (Mulan PSL v2) + January 2020 http://license.coscl.org.cn/MulanPSL2 + + Your reproduction, use, modification and distribution of the Software shall be subject to Mulan PSL v2 (this License) with the following terms and conditions: + + 0. Definition + + Software means the program and related documents which are licensed under this License and comprise all Contribution(s). + + Contribution means the copyrightable work licensed by a particular Contributor under this License. + + Contributor means the Individual or Legal Entity who licenses its copyrightable work under this License. + + Legal Entity means the entity making a Contribution and all its Affiliates. + + Affiliates means entities that control, are controlled by, or are under common control with the acting entity under this License, ‘control’ means direct or indirect ownership of at least fifty percent (50%) of the voting power, capital or other securities of controlled or commonly controlled entity. + + 1. Grant of Copyright License + + Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable copyright license to reproduce, use, modify, or distribute its Contribution, with modification or not. + + 2. Grant of Patent License + + Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable (except for revocation under this Section) patent license to make, have made, use, offer for sale, sell, import or otherwise transfer its Contribution, where such patent license is only limited to the patent claims owned or controlled by such Contributor now or in future which will be necessarily infringed by its Contribution alone, or by combination of the Contribution with the Software to which the Contribution was contributed. The patent license shall not apply to any modification of the Contribution, and any other combination which includes the Contribution. If you or your Affiliates directly or indirectly institute patent litigation (including a cross claim or counterclaim in a litigation) or other patent enforcement activities against any individual or entity by alleging that the Software or any Contribution in it infringes patents, then any patent license granted to you under this License for the Software shall terminate as of the date such litigation or activity is filed or taken. + + 3. No Trademark License + + No trademark license is granted to use the trade names, trademarks, service marks, or product names of Contributor, except as required to fulfill notice requirements in Section 4. + + 4. Distribution Restriction + + You may distribute the Software in any medium with or without modification, whether in source or executable forms, provided that you provide recipients with a copy of this License and retain copyright, patent, trademark and disclaimer statements in the Software. + + 5. Disclaimer of Warranty and Limitation of Liability + + THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO MATTER HOW IT’S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + 6. Language + + THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION SHALL PREVAIL. + + END OF THE TERMS AND CONDITIONS + + How to Apply the Mulan Permissive Software License,Version 2 (Mulan PSL v2) to Your Software + + To apply the Mulan PSL v2 to your work, for easy identification by recipients, you are suggested to complete following three steps: + + i Fill in the blanks in following statement, including insert your software name, the year of the first publication of your software, and your name identified as the copyright owner; + + ii Create a file named “LICENSE” which contains the whole context of this License in the first directory of your software package; + + iii Attach the statement to the appropriate annotated syntax at the beginning of each source file. + + + Copyright (c) [Year] [name of copyright holder] + [Software Name] is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. diff --git a/README.en.md b/README.en.md index 501a9b902529f4b4299ffec59e532d181182860a..eff6336d9c0a1e17f9423706041b5c923511a984 100644 --- a/README.en.md +++ b/README.en.md @@ -1,4 +1,4 @@ -# crystal-ci +# compass-ci #### Description Crystal CI diff --git a/README.md b/README.md index a7d16ebb899ac5694028d001e553608c8426c5f4..28e6b07490ba3d9a55a5804fcf895c3c9d151981 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# crystal-ci +# compass-ci #### 介绍 Crystal CI diff --git a/container/add-software/Dockerfile b/container/add-software/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..611894379d6831b94adb12b8f777a6f258dcaaeb --- /dev/null +++ b/container/add-software/Dockerfile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM centos:8 + +MAINTAINER Xu Xijian + +RUN yum install -y rsync diff --git a/container/add-software/build b/container/add-software/build new file mode 100755 index 0000000000000000000000000000000000000000..234c8b9e4bc84b1f070486a37fce9cc86688ddf5 --- /dev/null +++ b/container/add-software/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t addsoftware:auto . diff --git a/container/add-software/run b/container/add-software/run new file mode 100755 index 0000000000000000000000000000000000000000..99490ee32311a2d8475daacc7f15915d35163bf4 --- /dev/null +++ b/container/add-software/run @@ -0,0 +1,48 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +[ -L "$1/vmlinuz" -a -n "$2" ] || { + cat << 'EOF' + Example usage: + 1> ./run /os/centos/aarch64/76 pack1 pack2 ... + 2> ./run /os/centos/aarch64/76 $(show-depends-packages centos) + centos is an adaption file contain packages mapping from debian to centos, + the whole path is '$LKP_SRC/distro/adaptation/centos'. +EOF + exit +} + +basedir=$(dirname $1) +basever=$(basename $1) +newver=$basever-$(date +'%Y%m%d%H%M%S') +rootdir=$basedir/$newver +rsync_cmds=( + docker run + --rm + -v "$basedir":/mnt + addsoftware:auto + bash -c " + umask 0022 + rsync -ap /mnt/$basever/* /mnt/$newver + " +) + +packages="${@:2}" +osinfo=(${rootdir//\// }) +os=${osinfo[1]} +arch=${osinfo[2]} +version=${osinfo[3]} +install_cmds=( + docker run + --rm + -v "$rootdir":/mnt/root + addsoftware:auto + yum install -y --skip-broken --installroot=/mnt/root $packages +) + +echo "rsync rootfs from $basever to $newver..." +"${rsync_cmds[@]}" + +echo "install packages under $rootdir..." | tee install-$os-$arch-$version.log +"${install_cmds[@]}" | tee -a install-$os-$arch-$version.log diff --git a/container/alpine/Dockerfile b/container/alpine/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4bc03d6224050f1adee030668ef6abd9c34b24e3 --- /dev/null +++ b/container/alpine/Dockerfile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# 3.12 cannot install crystal +FROM alpine:3.11 + +MAINTAINER Wu Fenguang + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update +RUN apk add shadow bash zsh git openssh openrc rsync crystal shards make gcc g++ tzdata sudo coreutils util-linux libffi-dev openssl-dev libtool yaml-dev vim && shards init + +EXPOSE 2200 +CMD ["/usr/sbin/sshd", "-D", "-p", "2200"] diff --git a/container/alpine/build b/container/alpine/build new file mode 100755 index 0000000000000000000000000000000000000000..bd277c3639906f54976f86a4cee75fc560a45f07 --- /dev/null +++ b/container/alpine/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t alpine:testbed . diff --git a/container/alpine/first-run b/container/alpine/first-run new file mode 100755 index 0000000000000000000000000000000000000000..5bfa259526a90dbda1998735317f7e0b5c06aa41 --- /dev/null +++ b/container/alpine/first-run @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +export OS=alpine +../first-run.sh diff --git a/container/alpine/start b/container/alpine/start new file mode 100755 index 0000000000000000000000000000000000000000..a59ad9b236f3d984c129c713066182db45e18650 --- /dev/null +++ b/container/alpine/start @@ -0,0 +1,26 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm alpine + +cmd=( + docker run + --restart=always + --name alpine + -d + -v alpine-home:/home + -v /etc/localtime:/etc/localtime:ro + -v alpine-root:/root + -v /c:/c + -v /srv/os:/srv/os + -p 2200:2200 + --hostname alpine + --security-opt seccomp=unconfined + alpine:testbed + /usr/sbin/sshd -D -p 2200 +) + +"${cmd[@]}" diff --git a/container/archlinux/Dockerfile b/container/archlinux/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..c0f05d369d9b50cd9f297c18173d497615730c5f --- /dev/null +++ b/container/archlinux/Dockerfile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM lopsided/archlinux + +MAINTAINER Wu Fenguang + +COPY root / +RUN chmod 755 /etc +RUN pacman --needed --noprogressbar --noconfirm -Syu && \ + pacman --needed --noprogressbar --noconfirm -S bash zsh git openssh rsync make gcc tzdata sudo coreutils util-linux vim gawk diff --git a/container/archlinux/build b/container/archlinux/build new file mode 100755 index 0000000000000000000000000000000000000000..81feda2a02f8b32041b77e00ef9a886c15fb810a --- /dev/null +++ b/container/archlinux/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t archlinux:testbed . diff --git a/container/archlinux/first-run b/container/archlinux/first-run new file mode 100755 index 0000000000000000000000000000000000000000..c3d9fc92c12b851e400b337e942e6eaec467ab24 --- /dev/null +++ b/container/archlinux/first-run @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +export OS=archlinux +../first-run.sh diff --git a/container/archlinux/root/etc/pacman.d/mirrorlist b/container/archlinux/root/etc/pacman.d/mirrorlist new file mode 100644 index 0000000000000000000000000000000000000000..37e14425d934586e37ff4f84efc204d5008161b6 --- /dev/null +++ b/container/archlinux/root/etc/pacman.d/mirrorlist @@ -0,0 +1,3 @@ +Server = http://mirrors.tuna.tsinghua.edu.cn/archlinuxarm/$arch/$repo +Server = http://mirrors.163.com/archlinuxarm/$arch/$repo +Server = http://mirror.archlinuxarm.org/$arch/$repo diff --git a/container/archlinux/start b/container/archlinux/start new file mode 100755 index 0000000000000000000000000000000000000000..be274a547bab43ee5735688a3181758d10da03f8 --- /dev/null +++ b/container/archlinux/start @@ -0,0 +1,26 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm archlinux + +cmd=( + docker run + --restart=always + --name archlinux + -v archlinux-home:/home + -v /etc/localtime:/etc/localtime:ro + -v archlinux-root:/root + -v /c:/c + -v /srv/os:/srv/os + -p 2202:2202 + --hostname archlinux + --security-opt seccomp=unconfined + -d + archlinux:testbed + /usr/sbin/sshd -D -p 2202 +) + +"${cmd[@]}" diff --git a/container/assign-account/Dockerfile b/container/assign-account/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..f84b5e6b9e9360ed9cb88d4778eb5bcc75924ab0 --- /dev/null +++ b/container/assign-account/Dockerfile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian +MAINTAINER luanshd0525@163.com +ENV DEBIAN_FRONTEND noninteractive + +COPY conf/sources.list* /etc/apt/ + +RUN apt-get update && \ + apt-get install -y ruby-sinatra ruby-json curl && \ + ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + echo 'Asia/Shanghai' > /etc/timezone +COPY assign-account.rb /usr/local/bin/ +COPY get_account_info.rb /usr/local/bin/ + +CMD ["/usr/local/bin/assign-account.rb"] diff --git a/container/assign-account/answerback-email.rb b/container/assign-account/answerback-email.rb new file mode 100755 index 0000000000000000000000000000000000000000..b5283ba72c6c15f6085c023cc4cf7f38a675a21f --- /dev/null +++ b/container/assign-account/answerback-email.rb @@ -0,0 +1,145 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +=begin + +repo_list: + all repos url list from upstream-repos.git + +API: +call graph: +read_mail_content +send_account_request + email_addr + get email address + email_message_id + get email message_id + check_email_available + check email available + pub_key_value + get pub key + account + send apply account request and return account info + build_message + build email message + send_mail + call send_mail to send mail whit build message + +the returned data for account_info like: +{ + "account" => "guest", + "passwd" => "Use pub_key to login", + "jumper_ip" => "10.10.10.10", + "jumper_port" => "10000" +} +=end + +require 'json' +require 'mail' +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + JUMPER_IP + JUMPER_PORT + CRYSTAL_INTRANET + SEND_MAIL_PORT +] + +defaults = relevant_defaults(names) + +JUMPER_IP = defaults['JUMPER_IP'] +JUMPER_PORT = defaults['JUMPER_PORT'] +CRYSTAL_INTRANET = defaults['CRYSTAL_INTRANET'] +SEND_MAIL_PORT = defaults['SEND_MAIL_PORT'] + +def build_message(email, message_id, infos) + message = <<~EMAIL_MESSAGE + To: #{email} + Message-ID: #{message_id} + Subject: jumper account is ready + + Dear #{email} + + Thank you for joining us. + You can use the following command to login the jumper server: + + login command: + ssh -p #{infos['jumper_port']} #{infos['account']}@#{infos['jumper_ip']} + + account passwd: + account_password: #{infos['passwd']} + + regards + compass-ci + EMAIL_MESSAGE + + return message +end + +def email_addr(mail_content) + msg = 'not an applying account email' + + raise msg unless mail_content.subject =~ /apply ssh account/i + + email = mail_content.from.join(',') + + return email +end + +# def check_email_available(mail_content, email) +# oos_list = File.read('/c/upstream-repos/repo_list').split(/\n/) +# url = mail_content.body.decoded.split(/\n/).find { |line| line =~ /https?:\/\// } +# base_url = url.split('/')[0,5].join('/') +# message = 'The url is not in upstream repo_list' +# +# raise message unless oos_list.include? base_url +# +# url_fdback = %x(curl #{url}) +# email_index = url_fdback.index email +# +# message = 'No commit info found from the url for the email' +# raise message unless email_index +# end + +def email_message_id(mail_content) + message_id = mail_content.message_id + return message_id +end + +def pub_key_value(mail_content) + pub_key = mail_content.body.decoded.split(/\n/).find { |line| line =~ /ssh-rsa/ } + return pub_key +end + +def account_info(pub_key) + account_info_str = %x(curl -XGET '#{JUMPER_IP}:#{JUMPER_PORT}/assign_account' -d "pub_key: #{pub_key}") + account_info = JSON.parse account_info_str + + return account_info +end + +def send_account(mail_content) + email = email_addr(mail_content) + message_id = email_message_id(mail_content) + # check_email_available(mail_content, email) + + pub_key = pub_key_value(mail_content) + acct_info = account_info(pub_key) + + message = build_message(email, message_id, acct_info) + + %x(curl -XPOST '#{CRYSTAL_INTRANET}:#{SEND_MAIL_PORT}/send_mail_text' -d "#{message}") +end + +def read_mail_content(mail_file) + mail_content = Mail.read(mail_file) + + return mail_content +end + +mail_file = ARGV[0] +mail_content = read_mail_content(mail_file) +send_account(mail_content) diff --git a/container/assign-account/assign-account.rb b/container/assign-account/assign-account.rb new file mode 100755 index 0000000000000000000000000000000000000000..e356c1867243f3089932c8af9a70f1d11dbd5569 --- /dev/null +++ b/container/assign-account/assign-account.rb @@ -0,0 +1,26 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'sinatra' +require 'open3' +require 'json' +require 'yaml' +require_relative 'get_account_info.rb' + +set :bind, '0.0.0.0' +set :port, 29999 + +get '/assign_account' do + begin + data = YAML.safe_load request.body.read + rescue StandardError => e + puts e.message + end + + ref_account_info = AccountStorage.new(data) + account_info = ref_account_info.setup_jumper_account_info + + return account_info.to_json +end diff --git a/container/assign-account/build b/container/assign-account/build new file mode 100755 index 0000000000000000000000000000000000000000..39843418a00a87ce9ce44b10c377e386ee486f61 --- /dev/null +++ b/container/assign-account/build @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t assign-account:latest . diff --git a/container/assign-account/conf/sources.list b/container/assign-account/conf/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..8179e01cadd258c86414c028f9eb913fa44e58d1 --- /dev/null +++ b/container/assign-account/conf/sources.list @@ -0,0 +1,9 @@ +deb [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib + +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib diff --git a/container/assign-account/get_account_info.rb b/container/assign-account/get_account_info.rb new file mode 100755 index 0000000000000000000000000000000000000000..6c211a436f285a944e1a58c2a2330451cf6c693b --- /dev/null +++ b/container/assign-account/get_account_info.rb @@ -0,0 +1,127 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +=begin + +ACCOUNT_DIR dir layout: +tree +├── assigned-users +│   ├── user1 +│   ├── user2 +│   ├── user3 +│   ├── ... +├── available-users +│   ├── user11 +│   ├── user12 +│   ├── user13 +│   ├── ... +└── jumper-info + +assigned-users: store assigned user files +available-users: store available user files + user file name is the username + the file content is user's password +jumper-info: store jumper server ip:port for the service + +API: + +call graph: +setup_jumper_account_info + read_account_info + build_account_name + read_jumper_info + setup_authorized_key + +the returned data for setup_jumper_account_info like: +{ + "account" => "guest", + "passwd" => "Use pub_key to login", + "jumper_ip" => "10.10.10.10", + "jumper_port" => "10000" +} + +=end + +# get jumper and account info +class AccountStorage + ACCOUNT_DIR = '/opt/account_data/' + + def initialize(data) + @account_dir = ACCOUNT_DIR + @data = data + end + + def read_account_info + available_dir = File.join(@account_dir, 'available-users') + files = Dir.open(available_dir).to_a + files -= ['.', '..'] + + message = 'no more available users' + raise message if files.empty? + + account_info = build_account_name(available_dir, files) + + return account_info + end + + def build_account_name(available_dir, files) + files.sort + account_info = [] + account_info.push files[0] + source_file = File.join(available_dir, files[0]) + account_info.push File.readlines(source_file)[0].chomp + + dest_dir = File.join(@account_dir, 'assigned-users') + FileUtils.mv(source_file, dest_dir) + + return account_info + end + + def read_jumper_info + jumper_file = File.join(@account_dir, 'jumper-info') + + raise "#{jumper_file} not exist" unless File.exist? jumper_file + raise "#{jumper_file} empty" if File.empty? jumper_file + + jumper_info = File.read(jumper_file).split(/\n/)[0].split(':') + + return jumper_info + end + + def setup_jumper_account_info + account_info = read_account_info + jumper_info = read_jumper_info + pub_key = @data['pub_key'] + + jumper_ip = jumper_info[0].chomp + jumper_port = jumper_info[1].chomp + account = account_info[0] + passwd = if pub_key + 'Use pub_key to login' + else + account_info[1] + end + jumper_account_info = { + 'account' => account, + 'passwd' => passwd, + 'jumper_ip' => jumper_ip, + 'jumper_port' => jumper_port + } + + setup_authorized_key(account, pub_key) + return jumper_account_info + end + + def setup_authorized_key(account, pub_key) + ssh_dir = File.join('/home/', account, '.ssh') + Dir.mkdir ssh_dir, 0o700 + Dir.chdir ssh_dir + f = File.new('authorized_keys', 'w') + f.puts pub_key + f.close + File.chmod 0o600, 'authorized_keys' + %x(chown -R #{account}:#{account} #{ssh_dir}) + end +end diff --git a/container/assign-account/start b/container/assign-account/start new file mode 100755 index 0000000000000000000000000000000000000000..22909820ce09e97a2753ca21ba7173c9e01376d5 --- /dev/null +++ b/container/assign-account/start @@ -0,0 +1,33 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm assign-account + +cmd=( + docker run + --restart=always + --name=assign-account + -d + -v /etc/passwd:/etc/passwd:ro + -v /etc/group:/etc/group:ro + -v /srv/account-info:/opt/account_data/:rw + -v /home:/home:rw + -p 29999:29999 + assign-account +) + +"${cmd[@]}" + +cat << EOF +port: 29999 +--- +Client: +./assign-tool.rb email_file + +email subject: "apply ssh account" + +Refer to compass-ci/doc/apply-ssh-account.md for more information. +EOF diff --git a/container/compile b/container/compile new file mode 100755 index 0000000000000000000000000000000000000000..3b97924f43fc7d32c0f8cb30cb5c5facc9ded131 --- /dev/null +++ b/container/compile @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +DEFAULT_CCI=/c/cci +DEFAULT_LKP=/c/lkp-tests + +[[ $CCI_SRC ]] || CCI_SRC=$DEFAULT_CCI +[[ $LKP_SRC ]] || LKP_SRC=$DEFAULT_LKP + +if [ ! $1 ]; then + echo "Usage: $0 [scheduler | taskqueue ...]" + exit 1 +fi + +DIR=$(realpath "$1") +if [ ! -d "$DIR" ]; then + echo "Service dir $DIR not exists" + exit 1 +fi +service=${DIR##*/} + +cmd=( + docker run + --rm + -e LKP_SRC=$DEFAULT_LKP + -e CRYSTAL_PATH="lib:/usr/share/crystal/app/lib:/usr/lib/crystal/shards:/usr/lib/crystal/core:${DEFAULT_LKP}/lib:${DEFAULT_CCI}/src" + -u $UID + -v $LKP_SRC:$DEFAULT_LKP + -v ${CCI_SRC}/src:${DEFAULT_CCI}/src + -v $DIR:${DEFAULT_CCI}/compile + -w ${DEFAULT_CCI}/compile + alpine:scheduler-dev + sh -c "crystal build ${DEFAULT_CCI}/src/${service}.cr" +) + +"${cmd[@]}" diff --git a/container/conserver/Dockerfile b/container/conserver/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..28d1b22512143513c88c770d1c6be4b04f0067db --- /dev/null +++ b/container/conserver/Dockerfile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +MAINTAINER Bai Jing <799286817@qq.com> + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update +RUN apk add gcc make g++ bash ipmitool +RUN wget https://github.com/bstansell/conserver/releases/download/v8.2.4/conserver-8.2.4.tar.gz && \ + tar -xf conserver-8.2.4.tar.gz && \ + cd conserver-8.2.4 && \ + ./configure && make && make install && mkdir -p /var/consoles/ + +RUN echo "console 782/tcp conserver" >> /etc/services +COPY ipmi-sol /usr/local/bin/ +COPY conserver /usr/local/bin/ +COPY conserver.cf /etc/conserver.cf +ENTRYPOINT ["/usr/local/bin/conserver"] diff --git a/container/conserver/build b/container/conserver/build new file mode 100755 index 0000000000000000000000000000000000000000..25c785ccda06cd1fb246ab72803d87b806bc95bb --- /dev/null +++ b/container/conserver/build @@ -0,0 +1,7 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +sh generate_conserver || exit 1 + +docker build -t conserver_server . diff --git a/container/conserver/ipmi-sol b/container/conserver/ipmi-sol new file mode 100755 index 0000000000000000000000000000000000000000..8b6d06d453481f3c035bbebc304c37eb141bd8eb --- /dev/null +++ b/container/conserver/ipmi-sol @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +ipmitool -N 5 -I lanplus -H $1 -U "$USER" -E sol activate diff --git a/container/crystal-base/Dockerfile b/container/crystal-base/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..03fe5e6d615507aec769768a4df8ce623a4d3456 --- /dev/null +++ b/container/crystal-base/Dockerfile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +MAINTAINER chief + +ONBUILD RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +ONBUILD RUN apk update +ONBUILD RUN apk add bash zsh git rsync curl crystal shards tzdata sudo \ + yaml-dev openssl-dev ncurses-static g++ make cmake \ + zlib-dev zlib-static openssl-libs-static llvm5 llvm5-dev \ + llvm5-libs llvm5-static musl-dev xz-dev libxml2-dev \ + vim ruby-dev + diff --git a/container/crystal-base/build b/container/crystal-base/build new file mode 100755 index 0000000000000000000000000000000000000000..4df65a57a913d5e1178fdf9e121b0d8649eda0e9 --- /dev/null +++ b/container/crystal-base/build @@ -0,0 +1,11 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ -n "$SQUID_HOST" ] && [ -n "$SQUID_PORT" ]; then + proxy="http://$SQUID_HOST:$SQUID_PORT/" +else + proxy= +fi + +docker build --build-arg HTTP_PROXY=$proxy -t alpine:crystal-base . diff --git a/container/crystal-compiler/Dockerfile b/container/crystal-compiler/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7cefdbcde412fc56b5b3cd5a0d3bdef8b773387f --- /dev/null +++ b/container/crystal-compiler/Dockerfile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +MAINTAINER Cao Xueliang + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk add gcc g++ make libffi-dev openssl-dev libtool yaml-dev crystal shards && shards init + + diff --git a/container/crystal-compiler/build b/container/crystal-compiler/build new file mode 100755 index 0000000000000000000000000000000000000000..cee43ebf27fe99e830a3cf8802e16448aed970e0 --- /dev/null +++ b/container/crystal-compiler/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t alpine:crystal-compiler . diff --git a/container/crystal-compiler/install b/container/crystal-compiler/install new file mode 100755 index 0000000000000000000000000000000000000000..0fa194344cccd3b1b41f187cdf784ba56f17539f --- /dev/null +++ b/container/crystal-compiler/install @@ -0,0 +1,6 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +DIR=$(dirname $(realpath $0)) +sudo ln -s $DIR/run /usr/local/bin/crystal diff --git a/container/crystal-compiler/run b/container/crystal-compiler/run new file mode 100755 index 0000000000000000000000000000000000000000..200722b462884144365c45b7087b750f458d47a7 --- /dev/null +++ b/container/crystal-compiler/run @@ -0,0 +1,25 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +pj_dir=$(git rev-parse --show-toplevel 2>/dev/null) +if [[ $pj_dir ]]; then + v_dir=$pj_dir +else + v_dir=$PWD +fi + +opt_build=build +[[ "$1" = build ]] && opt_build= + +cmd=( + docker run + -u $UID + --rm + -v $v_dir:$v_dir + -w $PWD + alpine:crystal-compiler + crystal $opt_build --static "$@" +) + +"${cmd[@]}" diff --git a/container/crystal-shards/Dockerfile b/container/crystal-shards/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..0c5713ceb0178c8a864e47b60c550c350d0fd9e0 --- /dev/null +++ b/container/crystal-shards/Dockerfile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:crystal-base + +MAINTAINER chief + +ARG GITCACHE_HOST +ARG GITCACHE_PORT +RUN [ -n "$GITCACHE_HOST" ] && echo -e "[url \"http://$GITCACHE_HOST:${GITCACHE_PORT:-5000}/\"]\n\tinsteadOf = https://" >> /etc/gitconfig + +copy shard.yml /usr/share/crystal/app/shard.yml + +WORKDIR /usr/share/crystal/app +RUN shards +RUN sed -i 's:data):data, headers\: HTTP\:\:Headers{"Content-Type" => "application/json"}):' /usr/share/crystal/app/lib/elasticsearch-crystal/src/elasticsearch/api/namespace/common.cr; \ +sed -i '99s/arguments\[:id]/arguments\[:id]?/' /usr/share/crystal/app/lib/elasticsearch-crystal/src/elasticsearch/api/actions/index.cr; \ +sed -i 's/, Utils.__listify(arguments\[:q].as(String))/ /' /usr/share/crystal/app/lib/elasticsearch-crystal/src/elasticsearch/api/actions/search.cr;\ +sed -i '205a \ params.clear' /usr/share/crystal/app/lib/elasticsearch-crystal/src/elasticsearch/api/actions/search.cr; + +copy shard-amqp.yml /usr/share/crystal/app/shard.yml +RUN shards + +CMD ["bash"] + diff --git a/container/crystal-shards/build b/container/crystal-shards/build new file mode 100755 index 0000000000000000000000000000000000000000..784f5719e094c19129b3fa4889fd35c1fb10a60f --- /dev/null +++ b/container/crystal-shards/build @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# GITCACHE_HOST and GITCACHE_PORT parameters come from gitcache_host and gitcache_port in the job.yaml file you submitted. +# If job.yaml does not have these two parameters, git will use the local proxy cache, +# GITCACHE_HOST defaults to the local ip, GITCACHE_PORT default port is 5000 +. ../defconfig.sh +load_cci_defaults + +docker build --build-arg GITCACHE_HOST=$GITCACHE_HOST --build-arg GITCACHE_PORT=$GITCACHE_PORT -t alpine:crystal-shards . diff --git a/container/crystal-shards/build-depends b/container/crystal-shards/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..6627b6f8e3dce7d83156bf339fff455aaa0a9b10 --- /dev/null +++ b/container/crystal-shards/build-depends @@ -0,0 +1 @@ +crystal-base diff --git a/container/crystal-shards/shard-amqp.yml b/container/crystal-shards/shard-amqp.yml new file mode 100644 index 0000000000000000000000000000000000000000..aea87ccd56ced77b2f4e07613a2279cd4d483d7c --- /dev/null +++ b/container/crystal-shards/shard-amqp.yml @@ -0,0 +1,7 @@ +name: monitor +version: 0.1.0 + +dependencies: + amqp-client: + github: cloudamqp/amqp-client.cr + version: 0.5.14 diff --git a/container/crystal-shards/shard.yml b/container/crystal-shards/shard.yml new file mode 100644 index 0000000000000000000000000000000000000000..f0ca0b0ead826329bed0fff9b86cd59cad1f9beb --- /dev/null +++ b/container/crystal-shards/shard.yml @@ -0,0 +1,32 @@ +name: scheduler +version: 0.1.0 + +authors: + - tongqunfeng + +targets: + scheduler: + main: src/scheduler.cr + +crystal: 0.33.0 + +license: MIT + +dependencies: + kemal: + github: kemalcr/kemal + redis: + github: stefanwille/crystal-redis + version: ~> 2.5.3 + elasticsearch-crystal: + github: paktek123/elasticsearch-crystal + version: ~> 0.14 + any_merge: + github: icyleaf/any_merge + branch: master + deep-merge: + gitlab: peterhoeg/deep-merge.cr + json_on_steroids: + github: anykeyh/json_on_steroids + rate_limiter: + github: z64/rate_limiter diff --git a/container/debian/Dockerfile b/container/debian/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1d310dacf456334339754f366414994fc11757bb --- /dev/null +++ b/container/debian/Dockerfile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian + +MAINTAINER Wu Fenguang + +ENV DEBIAN_FRONTEND noninteractive + +COPY root / + +RUN sed -i "s/^exit [0-9]*/exit 0/" /usr/sbin/policy-rc.d +RUN apt-get update && \ + apt-get install -y --no-install-recommends apt-utils > /dev/null 2>&1 && \ + apt-get install -y runit openssh-server zsh vim rsync git make gcc g++ tzdata sudo && \ + mkdir -p /run/sshd + diff --git a/container/debian/build b/container/debian/build new file mode 100755 index 0000000000000000000000000000000000000000..6bff5792170afe4a16fcc6bc5d6309fb0186fc8a --- /dev/null +++ b/container/debian/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t debian:testbed . diff --git a/container/debian/first-run.sh b/container/debian/first-run.sh new file mode 100755 index 0000000000000000000000000000000000000000..0b1468c2bc90874bcc607779cc1d115e3e473ab5 --- /dev/null +++ b/container/debian/first-run.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +export OS=debian +../first-run.sh diff --git a/container/debian/root/etc/apt/sources.list b/container/debian/root/etc/apt/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..87b68bc46b0e2eb5be255b4d2338225e04f6ce50 --- /dev/null +++ b/container/debian/root/etc/apt/sources.list @@ -0,0 +1,7 @@ +deb http://mirrors.163.com/debian/ stable main non-free contrib + +deb http://mirrors.163.com/debian/ testing main non-free contrib +deb-src http://mirrors.163.com/debian/ testing main non-free contrib + +deb http://mirrors.163.com/debian/ sid main non-free contrib +deb-src http://mirrors.163.com/debian/ sid main non-free contrib diff --git a/container/debian/start b/container/debian/start new file mode 100755 index 0000000000000000000000000000000000000000..760a0a92e8c9c385155e56f704bb7b6219182b9a --- /dev/null +++ b/container/debian/start @@ -0,0 +1,27 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm debian + +cmd=( + docker run + --restart=always + --name debian + -v debian-home:/home + -v /etc/localtime:/etc/localtime:ro + -v debian-root:/root + -v /c:/c + -v /srv/os:/srv/os + -p 2201:2201 + -p 1301:1301 + --hostname debian + --security-opt seccomp=unconfined + -d + debian:testbed + /usr/sbin/sshd -D -p 2201 +) + +"${cmd[@]}" diff --git a/container/defconfig.rb b/container/defconfig.rb new file mode 100755 index 0000000000000000000000000000000000000000..b500f85a512caae5e972bd67ce1d07afbdc7ed1c --- /dev/null +++ b/container/defconfig.rb @@ -0,0 +1,30 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'yaml' + +def cci_defaults + hash = {} + Dir.glob(['/etc/compass-ci/defaults/*.yaml', + "#{ENV['HOME']}/.config/compass-ci/defaults/*.yaml"]).each do |file| + hash.update YAML.load_file(file) + end + hash +end + +def relevant_defaults(names) + cci_defaults.select { |k, _| names.include? k } +end + +def docker_env(hash) + hash.map { |k, v| ['-e', "#{k}=#{v}"] }.flatten +end + +def docker_rm(container) + res = %x(docker ps -aqf name="^#{container}$") + return if res.empty? + + system "docker stop #{container} && docker rm -f #{container}" +end diff --git a/container/defconfig.sh b/container/defconfig.sh new file mode 100755 index 0000000000000000000000000000000000000000..8a5d08c70d6a6a6f0012fda219301727dacf54ba --- /dev/null +++ b/container/defconfig.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $LKP_SRC/lib/yaml.sh + +load_cci_defaults() +{ + shopt -s nullglob + + for i in /etc/compass-ci/defaults/*.yaml $HOME/.config/compass-ci/defaults/*.yaml + do + create_yaml_variables "$i" + done +} + +docker_rm() +{ + container=$1 + [ -n "$(docker ps -aqf name="^${container}$")" ] || return 0 + docker stop $container + docker rm -f $container +} diff --git a/container/delimiter/Dockerfile b/container/delimiter/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4163ceab7046219dac9ae942a15b6cde2e91704c --- /dev/null +++ b/container/delimiter/Dockerfile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories && \ + apk update && \ + apk add --no-cache 'ruby-dev' \ + 'g++' 'gcc' 'pcre' 'libevent' 'make' 'git' 'bash' 'grep' 'coreutils' + +RUN umask 002 && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + gem install rest-client activesupport git json yaml threadpool elasticsearch faye-websocket diff --git a/container/delimiter/build b/container/delimiter/build new file mode 100755 index 0000000000000000000000000000000000000000..fc1af95f1fdcdbcff135a6d08e7ce00fb4b09dbd --- /dev/null +++ b/container/delimiter/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t delimiter . diff --git a/container/delimiter/start b/container/delimiter/start new file mode 100755 index 0000000000000000000000000000000000000000..7227ff5b164fb7a1b09ca25688cad86b97aa3da4 --- /dev/null +++ b/container/delimiter/start @@ -0,0 +1,45 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + ES_HOST + ES_PORT + MAIL_HOST + MAIL_PORT + MONITOR_HOST + MONITOR_PORT + TASKQUEUE_HOST + TASKQUEUE_PORT + GIT_MIRROR_HOST +] + +defaults = relevant_defaults(names) +env = docker_env(defaults) + +DEFAULT_LKP = '/c/lkp-tests' +DEFAULT_CCI = '/c/compass-ci' +docker_rm 'delimiter' + +cmd = %w[ + docker run + --name delimiter + --restart=always + -d +] + env + %W[ + -e LKP_SRC=#{DEFAULT_LKP} + -e CCI_SRC=#{DEFAULT_CCI} + -v #{ENV['LKP_SRC']}:#{DEFAULT_LKP} + -v /etc/localtime:/etc/localtime:ro + -v #{ENV['CCI_SRC']}:#{DEFAULT_CCI} + -w #{DEFAULT_CCI}/src + delimiter +] + +cmd += ['sh', '-c', 'umask 002 && ruby ./delimiter.rb'] + +system(*cmd) diff --git a/container/delimiter/start-depends b/container/delimiter/start-depends new file mode 100755 index 0000000000000000000000000000000000000000..95df1a16a877d2da334e0b2ad97fef6864bfc42a --- /dev/null +++ b/container/delimiter/start-depends @@ -0,0 +1,2 @@ +taskqueue +send-mail diff --git a/container/dnsmasq/Dockerfile b/container/dnsmasq/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..edc06da5e1324bfbcba0c40c6f8c68914bea1b6f --- /dev/null +++ b/container/dnsmasq/Dockerfile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk add dnsmasq +RUN mkdir -p /var/log/dnsmasq + +EXPOSE 67/udp 69/udp + +CMD ["dnsmasq", "-k"] diff --git a/container/dnsmasq/FAQ.md b/container/dnsmasq/FAQ.md new file mode 100644 index 0000000000000000000000000000000000000000..6146fd9d537005a05e78788b956b5d5c3d115166 --- /dev/null +++ b/container/dnsmasq/FAQ.md @@ -0,0 +1,20 @@ +## find IP address out according MAC +``` +1. find your pxe server container (usually named dnsmasq) +2. docker exec -it dnsmasq sh +3. grep [testbox_IP] /var/lib/misc/dnsmasq.leases +4. find your testbox MAC +``` + +## Add dnsmasq service logs +``` +1. add log config in dnsmasq.d/dnsmasq.conf + log-queries + log-facility=/var/log/dnsmasq/dnsmasq.log +2. rerun script run: ./run +3. docker exec -it dnsmasq sh +4. tail -f /var/log/dnsmasq/dnsmasq.log +5. check the output on your terminal +``` + +## TODO diff --git a/container/dnsmasq/build b/container/dnsmasq/build new file mode 100755 index 0000000000000000000000000000000000000000..8a22575412b3e31b02c1d81a137559d35fdb0c6d --- /dev/null +++ b/container/dnsmasq/build @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +[[ -e /tftpboot/boot.ipxe ]] || { + cp tftpboot/boot.ipxe /tftpboot/boot.ipxe +} + +docker build -t dnsmasq:alpine . diff --git a/container/dnsmasq/dnsmasq.d/dnsmasq.conf b/container/dnsmasq/dnsmasq.d/dnsmasq.conf new file mode 100644 index 0000000000000000000000000000000000000000..a51b43e9e58843086a560c501062a6c3f2c32133 --- /dev/null +++ b/container/dnsmasq/dnsmasq.d/dnsmasq.conf @@ -0,0 +1,30 @@ +port=0 +interface=br0 + +enable-tftp +tftp-root=/tftpboot + +dhcp-authoritative + +# dhcp-lease-max: +# - limits dnsmasq to the specified maximum number of DHCP leases. +# - the default is 1000. +dhcp-lease-max=100000 + +# The lease time is in seconds, or minutes (eg 45m) or hours (eg 1h) or +# "infinite". If not given, the default lease time is one hour for IPv4 +# and one day for IPv6. +# http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html +dhcp-range=172.18.10.10,172.18.255.250,168h # br0 + +dhcp-match=set:ipxe,175 +dhcp-match=set:pxeclient,60,PXEClient* + +dhcp-boot=tag:!ipxe,/tftpboot/ipxe/bin-arm64-efi/snp.efi +dhcp-boot=tag:ipxe,boot.ipxe + +log-queries +log-facility=/var/log/dnsmasq/dnsmasq.log +# more info: +# https://wiki.archlinux.org/index.php/Dnsmasq / pxe-service +# man dnsmasq / --pxe-service section diff --git a/container/dnsmasq/start b/container/dnsmasq/start new file mode 100755 index 0000000000000000000000000000000000000000..3cad801ec81c642ce950582628abd5038989f024 --- /dev/null +++ b/container/dnsmasq/start @@ -0,0 +1,27 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm dnsmasq + +kill $(ps -ef| grep dnsmasq| grep libvirt| awk '{print $2}') 2> /dev/null + +cmd=( + docker run + --restart=always + --cap-add NET_ADMIN + --net=host + --publish 67:67/udp + --publish 69:69/udp + -v $PWD/dnsmasq.d:/etc/dnsmasq.d + -v /etc/localtime:/etc/localtime:ro + -v /tftpboot:/tftpboot:ro + --detach + --name dnsmasq + dnsmasq:alpine + dnsmasq -k +) + +"${cmd[@]}" diff --git a/container/dnsmasq/tftpboot/boot.ipxe b/container/dnsmasq/tftpboot/boot.ipxe new file mode 100644 index 0000000000000000000000000000000000000000..be05eb547bb8bc1fd3356054c1c4db1682a8a95a --- /dev/null +++ b/container/dnsmasq/tftpboot/boot.ipxe @@ -0,0 +1,8 @@ +#!ipxe + +set scheduler 172.17.0.1 +set port 3000 + +chain http://${scheduler}:${port}/boot.ipxe/mac/${mac:hexhyp} + +exit diff --git a/container/dracut-initrd/Dockerfile b/container/dracut-initrd/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4208b98f59a5a0f7a708cefc8635ed9fc3fa7140 --- /dev/null +++ b/container/dracut-initrd/Dockerfile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian + +MAINTAINER Wu Fenguang + +ADD bin /usr/local/bin +COPY conf/sources.list* /etc/apt/ +COPY conf/add-cifs.conf /etc/dracut.conf.d/ +WORKDIR /usr/local/bin + +RUN setup-dracut.sh diff --git a/container/dracut-initrd/bin/cifs-lib.sh b/container/dracut-initrd/bin/cifs-lib.sh new file mode 100644 index 0000000000000000000000000000000000000000..6475dc7c691e2377c4eb98b204b3642bdbf3b37a --- /dev/null +++ b/container/dracut-initrd/bin/cifs-lib.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# From: https://github.com/dracutdevs/dracut/blob/master/modules.d/95cifs/cifs-lib.sh +# SPDX-License-Identifier: GPL-2.0 + +# cifs_to_var CIFSROOT +# use CIFSROOT to set $server, $path, and $options. +# CIFSROOT is something like: cifs://[[:]]@/ +# NETIF is used to get information from DHCP options, if needed. + +type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh + +cifs_to_var() { + local cifsuser; local cifspass + # Check required arguments + # $1 example: "cifs://172.168.x.x/os/xx,vers=1.0,xx,xx" + server=${1##cifs://} + cifsuser=${server%@*} + cifspass=${cifsuser#*:} + + # store cifs custom mount opts + initial_options=${server#*,} + + if [ "$cifspass" != "$cifsuser" ]; then + cifsuser=${cifsuser%:*} + else + cifspass=$(getarg cifspass) + fi + if [ "$cifsuser" != "$server" ]; then + server="${server#*@}" + else + cifsuser=$(getarg cifsuser) + fi + + path=${server#*/} + + # remove cifs custom mount opts from ${path} + path=${path%%,*} + + server=${server%%/*} + + # append cifs custom mount opts to ${options} + # allow guest mount type + if [ ! "$cifsuser" ]; then + options="${initial_options}" + else + if [ ! "$cifspass" ]; then + options="username=$cifsuser,${initial_options}" + else + options="username=$cifsuser,password=$cifspass,${initial_options}" + fi + fi +} diff --git a/container/dracut-initrd/bin/overlay-lkp.sh b/container/dracut-initrd/bin/overlay-lkp.sh new file mode 100644 index 0000000000000000000000000000000000000000..1f5819ba34b09acb0a1bc973ae916fa310e83f11 --- /dev/null +++ b/container/dracut-initrd/bin/overlay-lkp.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# transfer LKP dirs +[ -d /lkp ] || return 0 + +cp -a /lkp "$NEWROOT"/ + +for i in /lkp/lkp/src/rootfs/addon/* /usr/local/* /opt/* +do + dir=$(basename "$i") + + [ "$i" != "${i%/\*}" ] && continue # skip: i='dir/*' + [ -d "$NEWROOT/$dir" ] || + mkdir -p "$NEWROOT/$dir" + + for j in "$i"/* "$i"/.??* + do + [ "$j" != "${j%/\*}" ] && continue # skip: j='dir/*' + + [ -f "$j" ] && { + cp -a "$j" "$NEWROOT/$dir"/ + continue + } + + subdir=$(basename "$j") + + [ -d "$NEWROOT/$dir/$subdir" ] || + mkdir -p "$NEWROOT/$dir/$subdir" + + for k in "$j"/* + do + [ "$k" != "${k%/\*}" ] && continue # skip: k='dir/*' + + cp -a "$j"/* "$NEWROOT/$dir/$subdir"/ + done + done +done + +[ -d /usr/src ] && + cp -a /usr/src "$NEWROOT"/usr/ + +kmdir=/lib/modules/$(uname -r) +if test -d "$kmdir" && ! test -d "$NEWROOT/$kmdir"; then + cp -an "$kmdir" "$NEWROOT"/lib/modules/ + cp -an /lib/firmware "$NEWROOT"/lib/ +fi diff --git a/container/dracut-initrd/bin/setup-dracut.sh b/container/dracut-initrd/bin/setup-dracut.sh new file mode 100755 index 0000000000000000000000000000000000000000..2e676deb4ffdc11204ed6971453ea67c4e757124 --- /dev/null +++ b/container/dracut-initrd/bin/setup-dracut.sh @@ -0,0 +1,18 @@ +#!/bin/sh -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +export DEBIAN_FRONTEND=noninteractive + +apt-get update +apt-get install -y --no-install-recommends apt-utils >/dev/null 2>&1 +apt-get install -y nfs-common netbase cifs-utils kmod +apt-get install -y dracut dracut-network dracut-config-generic + +apt-get clean +rm -rf /var/lib/apt/lists/* + +# Replace the runtime shell script with a custom shell script +cp -a /usr/local/bin/cifs-lib.sh /usr/lib/dracut/modules.d/95cifs/ + +cat overlay-lkp.sh >> /usr/lib/dracut/modules.d/90overlay-root/overlay-mount.sh diff --git a/container/dracut-initrd/build b/container/dracut-initrd/build new file mode 100755 index 0000000000000000000000000000000000000000..181b8967595b518ef52c534c5b18e6c15159f07f --- /dev/null +++ b/container/dracut-initrd/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t debian:dracut . diff --git a/container/dracut-initrd/conf/add-cifs.conf b/container/dracut-initrd/conf/add-cifs.conf new file mode 100644 index 0000000000000000000000000000000000000000..b8048a113abe294ce3b3856a3e267a7e126b73a4 --- /dev/null +++ b/container/dracut-initrd/conf/add-cifs.conf @@ -0,0 +1 @@ +add_dracutmodules+=" cifs " diff --git a/container/dracut-initrd/conf/sources.list b/container/dracut-initrd/conf/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..8179e01cadd258c86414c028f9eb913fa44e58d1 --- /dev/null +++ b/container/dracut-initrd/conf/sources.list @@ -0,0 +1,9 @@ +deb [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib + +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib diff --git a/container/dracut-initrd/run b/container/dracut-initrd/run new file mode 100755 index 0000000000000000000000000000000000000000..b0c9b3052faae3bea72889f504cf964ea339b1ea --- /dev/null +++ b/container/dracut-initrd/run @@ -0,0 +1,53 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# create initramfs from specified os lib modules +# input: +# specified os lib modules directory +# output: +# initramfs image file of the specified os version + +[ -d "$1" ] || { + echo "Example usage: + ./run /srv/os/debian/aarch64/sid/lib/modules/5.4.0-4-arm64 + " + exit +} + +modules_dir=$1 + +[ -d "${modules_dir}/kernel" ] || { + echo "[INFO] cannot find kernel dir under ${modules_dir} ." + + OS_PATH=${OS_PATH-"/os"} + modules_dir="${OS_PATH%/}/$1" + + echo "[INFO] finding kernel dir under ${modules_dir} ..." + + [ -d "${modules_dir}/kernel" ] || { + echo "[ERROR] cannot find kernel dir under ${modules_dir} !!!" + exit + } +} + +kver=$(basename "$modules_dir") +root=${modules_dir%/lib/modules/*} + +kernel_modules=/lib/modules/$kver +initrd_output=/boot/initramfs.lkp-${kver}.img + +cmd=( + docker run + --rm + -v $root/boot:/boot + -v $root/lib/modules:/lib/modules + debian:dracut + bash -c + "dracut --force --kver $kver -k $kernel_modules $initrd_output && + chmod 644 $initrd_output" + + # example: + # dracut --kver 5.4.0-4-arm64 -k /os/debian/aarch64/sid/lib/modules/5.4.0-4-arm64 /os/debian/aarch64/sid/boot/initramfs.lkp-5.4.0-4-arm64.img +) + +"${cmd[@]}" diff --git a/container/es/Dockerfile b/container/es/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e626dae971f03f12c3afa2b012101d3c711f24aa --- /dev/null +++ b/container/es/Dockerfile @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk add --no-cache elasticsearch curl + +RUN rm -rf /etc/init.d/elasticsearch \ + && rm -rf /usr/share/java/elasticsearch/plugins \ + && mv /usr/share/java/elasticsearch /usr/share/es \ + && echo "===> Creating Elasticsearch Paths..." \ + && for path in \ + /srv/es \ + /usr/share/es/logs \ + /usr/share/es/config \ + /usr/share/es/config/scripts \ + /usr/share/es/tmp \ + /usr/share/es/plugins \ + ; do \ + mkdir -p "$path"; \ + done \ + && cp /etc/elasticsearch/*.* /usr/share/es/config \ + && chown -R 1090:1090 /usr/share/es \ + && chown -R 1090:1090 /srv/es; + +RUN sed -i 's:#path.data\: /path/to/data:path.data\: /srv/es:' /usr/share/es/config/elasticsearch.yml; +RUN sed -i 's:#network.host\: _site_:network.host\: 0.0.0.0:' /usr/share/es/config/elasticsearch.yml; +RUN sed -i "s/-Xms256m/-Xms20g/g" /usr/share/es/config/jvm.options +RUN sed -i "s/-Xmx256m/-Xmx20g/g" /usr/share/es/config/jvm.options + +WORKDIR /usr/share/es + +ENV PATH /usr/share/es/bin:$PATH +ENV ES_TMPDIR /usr/share/es/tmp + +VOLUME ["/srv/es"] + +EXPOSE 9200 9300 + +USER 1090 +CMD ["elasticsearch"] + diff --git a/container/es/build b/container/es/build new file mode 100755 index 0000000000000000000000000000000000000000..d5607e0741fc26e16282c0fbd07671cda2bdab80 --- /dev/null +++ b/container/es/build @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t es643b:alpine311 . --network=host + diff --git a/container/es/start b/container/es/start new file mode 100755 index 0000000000000000000000000000000000000000..c268a34224a9646020a2e4411dc0c14a1e844188 --- /dev/null +++ b/container/es/start @@ -0,0 +1,21 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm es-server01 + +cmd=( + docker run + --restart=always + -d + -p 9200:9200 + -p 9300:9300 + -v /srv/es:/srv/es + -v /etc/localtime:/etc/localtime:ro + --name es-server01 + es643b:alpine311 +) + +"${cmd[@]}" diff --git a/container/extract-stats/Dockerfile b/container/extract-stats/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..31f4ef0cabe41a75c184f6f2ca3de50213e81df5 --- /dev/null +++ b/container/extract-stats/Dockerfile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories && \ + apk update && \ + apk add --no-cache 'ruby-dev' \ + 'g++' 'gcc' 'pcre' 'libevent' 'make' 'git' 'cpio' 'bash' 'grep' + +RUN umask 002 && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + gem install rest-client activesupport git json yaml elasticsearch + +ENV RUNTIME_DIR /c/cci/extract + +RUN mkdir -p $RUNTIME_DIR && \ + chown -R 1090:1090 /c + +WORKDIR $RUNTIME_DIR + +COPY --chown=1090:1090 extract-stats . diff --git a/container/extract-stats/build b/container/extract-stats/build new file mode 100755 index 0000000000000000000000000000000000000000..1aa998269c19070e6f7013f6152c81be5db682b6 --- /dev/null +++ b/container/extract-stats/build @@ -0,0 +1,10 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +CCI_SRC=$CCI_SRC +bash $CCI_SRC/container/compile . + +docker build -t extract-stats . + +rm extract-stats diff --git a/container/extract-stats/build-depends b/container/extract-stats/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..5dd403e166afc1ed9b15b32328320e82db6f5155 --- /dev/null +++ b/container/extract-stats/build-depends @@ -0,0 +1 @@ +scheduler-dev diff --git a/container/extract-stats/start b/container/extract-stats/start new file mode 100755 index 0000000000000000000000000000000000000000..25bdd2568a714deab6832b1ff620774308f40e88 --- /dev/null +++ b/container/extract-stats/start @@ -0,0 +1,51 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + REDIS_HOST + REDIS_PORT + ES_HOST + ES_PORT + FLUENTD_HOST + FLUENTD_PORT + MAIL_HOST + MAIL_PORT +] + +defaults = relevant_defaults(names) +env = docker_env(defaults) + +DEFAULT_LKP = '/c/lkp-tests' +DEFAULT_CCI = '/c/compass-ci' +FLUENTD_HOST = defaults['FLUENTD_HOST'] || '172.17.0.1' +FLUENTD_PORT = defaults['FLUENTD_PORT'] || '24224' +docker_rm "extract-stats" + +cmd = %w[ + docker run + --name extract-stats + --restart=always + -d +] + env + %W[ + -e LKP_SRC=#{DEFAULT_LKP} + -e CCI_SRC=#{DEFAULT_CCI} + -v #{ENV['LKP_SRC']}:#{DEFAULT_LKP} + -v /etc/localtime:/etc/localtime:ro + -v #{ENV['CCI_SRC']}:#{DEFAULT_CCI} + -v /srv/result:/result + --log-driver=fluentd + --log-opt fluentd-address=#{FLUENTD_HOST}:#{FLUENTD_PORT} + --log-opt mode=non-blocking + --log-opt max-buffer-size=4m + --log-opt tag=extract-stats + extract-stats +] + +cmd += ['sh', '-c', 'umask 002 && ./extract-stats'] + +system(*cmd) diff --git a/container/extract-stats/start-depends b/container/extract-stats/start-depends new file mode 100755 index 0000000000000000000000000000000000000000..4a016e6cf33a210dedec9ea06212c74b8d4d9087 --- /dev/null +++ b/container/extract-stats/start-depends @@ -0,0 +1,2 @@ +taskqueue +fluentd diff --git a/container/first-run.sh b/container/first-run.sh new file mode 100755 index 0000000000000000000000000000000000000000..2a7fd12badc1c5d8703e5d32978bbb7536efb5aa --- /dev/null +++ b/container/first-run.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ -f /etc/ssh/team.pub ]; then + ssh_keys="$( + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update + +RUN apk add --no-cache 'ruby-dev' \ + 'g++' 'gcc' 'pcre' 'libevent' 'make' 'git' 'cpio' 'bash' + +RUN umask 002 && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + gem install fluentd && \ + gem install fluent-plugin-rabbitmq && \ + gem install fluent-plugin-elasticsearch && \ + gem install fluent-plugin-tail-ex && \ + gem install fluent-plugin-tail-multiline && \ + gem install json && \ + gem install async && \ + gem install webrick && \ + gem install io-console && \ + gem install etc + +EXPOSE 24224 24224/udp diff --git a/container/fluentd-base/build b/container/fluentd-base/build new file mode 100755 index 0000000000000000000000000000000000000000..32e0a38e1e3d5a2ba0b848e11530e0c65665a4c4 --- /dev/null +++ b/container/fluentd-base/build @@ -0,0 +1,6 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +system 'docker build -t fluentd-base:alpine .' diff --git a/container/fluentd/Dockerfile b/container/fluentd/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..192ece3ebb15322c1cda461d5a982549e16cc175 --- /dev/null +++ b/container/fluentd/Dockerfile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM fluentd-base:alpine + +COPY --chown=1090:1090 docker-fluentd.conf /fluentd/etc/docker-fluentd.conf diff --git a/container/fluentd/build b/container/fluentd/build new file mode 100755 index 0000000000000000000000000000000000000000..3696a5ff3a64d0d6c58b2f5e32bd778b64f22144 --- /dev/null +++ b/container/fluentd/build @@ -0,0 +1,6 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +system 'docker build -t fluentd:alpine .' diff --git a/container/fluentd/build-depends b/container/fluentd/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..29ee9d39ead81d5869915ecbba7dcca939fe79fc --- /dev/null +++ b/container/fluentd/build-depends @@ -0,0 +1 @@ +fluentd-base diff --git a/container/fluentd/docker-fluentd.conf b/container/fluentd/docker-fluentd.conf new file mode 100644 index 0000000000000000000000000000000000000000..8e76441bcaec3a40c3fe81e3e576243668e73301 --- /dev/null +++ b/container/fluentd/docker-fluentd.conf @@ -0,0 +1,97 @@ + + @type forward + bind 0.0.0.0 + + + + @type tail + path /srv/cci/serial/logs/* + pos_file /srv/cci/serial/fluentd-pos/serial.log.pos + tag serial.* + path_key serial_path + refresh_interval 1s + + @type none + + + + + @type record_transformer + enable_ruby + + time ${time.strftime('%Y-%m-%d %H:%M:%S%z')} + + + + + @type copy + + @type rabbitmq + host 172.17.0.1 + exchange logging-test + exchange_type fanout + exchange_durable false + heartbeat 10 + + @type json + + + + + + @type parser + format json + emit_invalid_record_to_error false + key_name log + reserve_data true + + + + @type copy + + + @type stdout + + @type stdout + output_type single_value + message_key log + add_newline true + + + + + @type elasticsearch + host 172.17.0.1 + port 9202 + suppress_type_name true + flush_interval 1s + index_name ${tag} + ssl_verify false + num_threads 2 + + + + @type rabbitmq + host 172.17.0.1 + exchange logging-test + exchange_type fanout + exchange_durable false + heartbeat 10 + + @type json + + + + + + diff --git a/container/fluentd/start b/container/fluentd/start new file mode 100755 index 0000000000000000000000000000000000000000..8ad7c1ea85542d97b0d035039d3eda07ad8d0b8f --- /dev/null +++ b/container/fluentd/start @@ -0,0 +1,28 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative '../defconfig.rb' + +docker_rm "fluentd" + +cmd = %w[ + docker run + --restart=always + --name fluentd + -v /etc/localtime:/etc/localtime:ro + -v /srv/cci/serial/logs:/srv/cci/serial/logs:ro + -v /srv/cci/serial/fluentd-pos:/srv/cci/serial/fluentd-pos + -d + -u 1090:1090 + -p 24224:24224/tcp + -p 24224:24224/udp + -e FLUENTD_CONF=docker-fluentd.conf + --log-driver json-file + --log-opt max-size=1g + fluentd:alpine +] +cmd += ['sh', '-c', 'umask 002 & fluentd -c /fluentd/etc/docker-fluentd.conf'] + +system(*cmd) diff --git a/container/git-daemon/Dockerfile b/container/git-daemon/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8f71581edb67fe1474b5a6e7ec6b4073b3deee51 --- /dev/null +++ b/container/git-daemon/Dockerfile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update && \ + apk upgrade && \ + apk add --no-cache git-daemon && \ + mkdir /git + + +VOLUME /git/ + +EXPOSE 9418 + +CMD ["git", "daemon", "--verbose", "--export-all", "--base-path=/git/", "--reuseaddr", "/git/"] diff --git a/container/git-daemon/build b/container/git-daemon/build new file mode 100755 index 0000000000000000000000000000000000000000..d81e4b9fc34b7ff60937a83937eeb31484e15209 --- /dev/null +++ b/container/git-daemon/build @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t apline311:git-daemon . diff --git a/container/git-daemon/start b/container/git-daemon/start new file mode 100755 index 0000000000000000000000000000000000000000..5b4c90b9aad8c7b64611ab44c0a155f5712e3eb7 --- /dev/null +++ b/container/git-daemon/start @@ -0,0 +1,15 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm git-daemon + +docker run --restart=always -d -p 9418:9418 -v /srv/git:/git -v /etc/localtime:/etc/localtime:ro --name git-daemon apline311:git-daemon + + +# test + +echo you can use git clone command: git clone git://127.0.0.1/\$project_name + diff --git a/container/git-mirror/Dockerfile b/container/git-mirror/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9c582f7dc9aa8c3c39c00ab09b52f4c943f775b7 --- /dev/null +++ b/container/git-mirror/Dockerfile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.8 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + apk upgrade && \ + apk add --no-cache git && \ + apk add ruby-dev make gcc g++ + +RUN umask 002 && \ + gem install bunny json PriorityQueue --no-rdoc --no-ri + diff --git a/container/git-mirror/build b/container/git-mirror/build new file mode 100755 index 0000000000000000000000000000000000000000..66967e41dfe5e031b7b0a38c7aa62ba3744a4f3f --- /dev/null +++ b/container/git-mirror/build @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t alpine38:git-mirror . + diff --git a/container/git-mirror/start b/container/git-mirror/start new file mode 100755 index 0000000000000000000000000000000000000000..dd4fd6313f45ec4354210edf407f21403b95ed30 --- /dev/null +++ b/container/git-mirror/start @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative '../defconfig.rb' + +DOCKER_CCI = '/c/compass-ci' +DOCKER_REPO = '/c/upstream-repos' +docker_rm "git-mirror" + +cmd = %W[ + docker run + --restart=always + --name git-mirror + -u 1090:1999 + -d + -e CCI_SRC=#{DOCKER_CCI} + -e REPO_SRC=#{DOCKER_REPO} + -v #{ENV['CCI_SRC']}:#{DOCKER_CCI} + -v /etc/localtime:/etc/localtime:ro + -v #{ENV['REPO_SRC']}:#{DOCKER_REPO} + -v /srv/git:/srv/git + -w /c/compass-ci/sbin + alpine38:git-mirror +] + +cmd += ['sh', '-c', 'umask 002 && ./git-mirror.rb'] +system(*cmd) diff --git a/container/git-mirror/start-depends b/container/git-mirror/start-depends new file mode 100755 index 0000000000000000000000000000000000000000..5232abf91d439ecb85809c7a3396e48e35a7d362 --- /dev/null +++ b/container/git-mirror/start-depends @@ -0,0 +1 @@ +rabbitmq diff --git a/container/gitcache/Dockerfile b/container/gitcache/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..d92a1412dae4a0001b940003b7d356c7e089427c --- /dev/null +++ b/container/gitcache/Dockerfile @@ -0,0 +1,15 @@ +# Origin: https://github.com/git-cloner/gitcache +# Copyright (C) 2019-2020 Eric D.little51 +# SPDX-License-Identifier: GPL-3.0 + +FROM golang:alpine + +ENV GO111MODULE on +ENV GOPROXY https://goproxy.cn + + +RUN apk add --no-cache git && \ + git clone https://github.com/git-cloner/gitcache /gitcache && \ + cd /gitcache && go build + +ENTRYPOINT ["/gitcache/gitcache"] diff --git a/container/gitcache/build b/container/gitcache/build new file mode 100755 index 0000000000000000000000000000000000000000..5135f7c836ad08074b2fd2557d0bcec29589919e --- /dev/null +++ b/container/gitcache/build @@ -0,0 +1,6 @@ +#!/bin/bash +# Origin: https://github.com/git-cloner/gitcache +# Copyright (C) 2019-2020 Eric D.little51 +# SPDX-License-Identifier: GPL-3.0 + +docker build -t gitcache . diff --git a/container/gitcache/start b/container/gitcache/start new file mode 100755 index 0000000000000000000000000000000000000000..0e1de1bf86e4f895c5244a3c94e9ec2db5993381 --- /dev/null +++ b/container/gitcache/start @@ -0,0 +1,20 @@ +#!/bin/bash +# Origin: https://github.com/git-cloner/gitcache +# Copyright (C) 2019-2020 Eric D.little51 +# SPDX-License-Identifier: GPL-3.0 + +. $CCI_SRC/container/defconfig.sh + +docker_rm gitcache + +cmd=( + docker run -dt + --name gitcache + --restart=always + -p 5000:5000 + -v /srv/cache/gitcache/:/var/gitcache/ + -v /etc/localtime:/etc/localtime:ro + gitcache +) + +"${cmd[@]}" diff --git a/container/initrd-http/Dockerfile b/container/initrd-http/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8a3bdcb6c8f000bbe874797e75c11fc8f755a5b3 --- /dev/null +++ b/container/initrd-http/Dockerfile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM nginx:alpine +ADD root / diff --git a/container/initrd-http/build b/container/initrd-http/build new file mode 100755 index 0000000000000000000000000000000000000000..1b039dcfb901183967b9f2658ab962b590a18266 --- /dev/null +++ b/container/initrd-http/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t initrd-http . diff --git a/container/initrd-http/root/etc/nginx/conf.d/default.conf b/container/initrd-http/root/etc/nginx/conf.d/default.conf new file mode 100644 index 0000000000000000000000000000000000000000..f2834c7d1eec09787bc5a3c13c002de7208a646d --- /dev/null +++ b/container/initrd-http/root/etc/nginx/conf.d/default.conf @@ -0,0 +1,16 @@ +server { + listen 80; + server_name "initrd-http"; + server_tokens off; + + root /usr/share/nginx/html; + index index.html; + + location /favicon.ico { + log_not_found off; + } + + location /initrd { + autoindex on; + } +} diff --git a/container/initrd-http/start b/container/initrd-http/start new file mode 100755 index 0000000000000000000000000000000000000000..d326372b98d2c3d1c8465891a95bc2a9cb13dbe0 --- /dev/null +++ b/container/initrd-http/start @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. ../defconfig.sh + +load_cci_defaults + +docker_rm initrd-http + +cmd=( + docker run + --restart=always +# --name initrd-http + -p ${INITRD_HTTP_PORT:-8800}:80 + -v /srv/initrd:/usr/share/nginx/html/initrd:ro + -v /etc/localtime:/etc/localtime:ro + -d + initrd-http +) + +"${cmd[@]}" diff --git a/container/kibana/Dockerfile b/container/kibana/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..35802fe7485c57c27139af19dea6f98d6acea1fd --- /dev/null +++ b/container/kibana/Dockerfile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM gagara/kibana-oss-arm64:7.6.2 + +# docker image borrowed from hub.docker.com/r/gagara/kibana-oss-arm64 + +MAINTAINER Wu Zhende + +RUN sed -i 's/server.host: "0"/server.host: "0.0.0.0"/' config/kibana.yml diff --git a/container/kibana/build b/container/kibana/build new file mode 100755 index 0000000000000000000000000000000000000000..a7e47172b648ec428dbf4d3131e446e8e74cb7ea --- /dev/null +++ b/container/kibana/build @@ -0,0 +1,6 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +system 'docker build -t kibana:7.6.2 .' diff --git a/container/kibana/start b/container/kibana/start new file mode 100755 index 0000000000000000000000000000000000000000..6a504fb88e4f87b18e9c5142dae82543d959220a --- /dev/null +++ b/container/kibana/start @@ -0,0 +1,21 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative '../defconfig.rb' + +docker_rm "kibana" + +cmd = %w[ + docker run + --restart=always + --name kibana + -v /etc/localtime:/etc/localtime:ro + -d + --link logging-es:elasticsearch + -p 11309:5601 + kibana:7.6.2 +] + +system(*cmd) diff --git a/container/kibana/start-depends b/container/kibana/start-depends new file mode 100755 index 0000000000000000000000000000000000000000..66c19963181d5bdb2ffbed63e5ada52083aa800c --- /dev/null +++ b/container/kibana/start-depends @@ -0,0 +1 @@ +logging-es diff --git a/container/lkp-initrd/Dockerfile b/container/lkp-initrd/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..ea9fe5b4d67266c4034e081a72053a43dc50f30c --- /dev/null +++ b/container/lkp-initrd/Dockerfile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +MAINTAINER Wu Fenguang + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +COPY sbin /usr/local/sbin +RUN /usr/local/sbin/setup.sh + +WORKDIR /root diff --git a/container/lkp-initrd/bin/pack-lkp.sh b/container/lkp-initrd/bin/pack-lkp.sh new file mode 100755 index 0000000000000000000000000000000000000000..92eaa98f78d869224242c2c4272bb7bbfa4e2d8c --- /dev/null +++ b/container/lkp-initrd/bin/pack-lkp.sh @@ -0,0 +1,10 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +export OWNER=root.root +export LKP_USER=lkp +export USER=lkp + +umask 002 +$LKP_SRC/sbin/pack -f -a $ARCH lkp-src diff --git a/container/lkp-initrd/build b/container/lkp-initrd/build new file mode 100755 index 0000000000000000000000000000000000000000..2383323e01d558b97996a3319d38f3f12e0b368a --- /dev/null +++ b/container/lkp-initrd/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t alpine:lkp . diff --git a/container/lkp-initrd/run b/container/lkp-initrd/run new file mode 100755 index 0000000000000000000000000000000000000000..522f5098ddc2ed81066ac44d027296b98471c7c2 --- /dev/null +++ b/container/lkp-initrd/run @@ -0,0 +1,26 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +DIR=$(dirname $(realpath $0)) +. $(dirname $DIR)/defconfig.sh + +load_cci_defaults + +[[ $ARCH ]] || ARCH=$(uname -m) +[[ $LKP_SRC ]] || LKP_SRC=/c/lkp-tests + +cmd=( + docker run + --rm + -e ARCH=$ARCH + -e LKP_SRC=$LKP_SRC + -v $LKP_SRC:$LKP_SRC + -v $DIR/bin:/root/bin + -v /srv/initrd/lkp/${lkp_initrd_user:-latest}:/osimage/user/lkp + alpine:lkp + /root/bin/pack-lkp.sh +) + +"${cmd[@]}" +echo "result: /srv/initrd/lkp/${lkp_initrd_user:-latest}/lkp-${ARCH}.cgz" diff --git a/container/lkp-initrd/sbin/setup.sh b/container/lkp-initrd/sbin/setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..98dae9a33b9ed8e7a0ba61ff55fa4b37338fb082 --- /dev/null +++ b/container/lkp-initrd/sbin/setup.sh @@ -0,0 +1,6 @@ +#!/bin/sh -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +apk add bash gcc make libc-dev findutils cpio gzip +adduser -Du 1090 lkp # lkp group also created diff --git a/container/logging-es/Dockerfile b/container/logging-es/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7f91f65bad94b9b7007b35bc4a1e8a43c285ec25 --- /dev/null +++ b/container/logging-es/Dockerfile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM gagara/elasticsearch-oss-arm64:7.6.2 + +# docker image borrowed from hub.docker.com/r/gagara/elasticsearch-oss-arm64 + +MAINTAINER Wu Zhende + +RUN sed -i 's:#network.host\: _site_:network.host\: 0.0.0.0:' /usr/share/elasticsearch/config/elasticsearch.yml; +RUN sed -i '$a path.data: /srv/es/logging-es' /usr/share/elasticsearch/config/elasticsearch.yml; +RUN sed -i '$a node.name: node-1' /usr/share/elasticsearch/config/elasticsearch.yml; +RUN sed -i '$a cluster.initial_master_nodes: ["node-1"]' /usr/share/elasticsearch/config/elasticsearch.yml; + +RUN mkdir /usr/share/elasticsearch/tmp && \ + chown -R 1090:1090 /usr/share/elasticsearch + +WORKDIR /usr/share/elasticsearch + +ENV PATH /usr/share/elasticsearch/bin:$PATH +ENV ES_TMPDIR /usr/share/elasticsearch/tmp + +EXPOSE 9202 9302 + +USER 1090 + +CMD ["elasticsearch"] diff --git a/container/logging-es/build b/container/logging-es/build new file mode 100755 index 0000000000000000000000000000000000000000..0ddecbf04e49cdee8686b7b697d0c4aa3575e1f9 --- /dev/null +++ b/container/logging-es/build @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t logging-es:7.6.2 . diff --git a/container/logging-es/start b/container/logging-es/start new file mode 100755 index 0000000000000000000000000000000000000000..9be7a5bff81f3dfa98abc8f94b6c606103c4524b --- /dev/null +++ b/container/logging-es/start @@ -0,0 +1,21 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm logging-es + +cmd=( + docker run + --restart=always + -v /etc/localtime:/etc/localtime:ro + -d + -p 9202:9200 + -p 9302:9300 + -v /srv/es/logging-es:/srv/es/logging-es + --name logging-es + logging-es:7.6.2 +) + +"${cmd[@]}" diff --git a/container/manjaro/Dockerfile b/container/manjaro/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..f6cec4868f5e7b41507542fe543c317bd5270ae2 --- /dev/null +++ b/container/manjaro/Dockerfile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM manjaroarm/manjaro-aarch64-base + +MAINTAINER Wu Fenguang + +COPY root / +RUN chmod 755 /etc /etc/pacman.d +RUN pacman --needed --noprogressbar --noconfirm -Sy && \ + pacman --needed --noprogressbar --noconfirm -S bash zsh git openssh rsync make gcc tzdata sudo coreutils util-linux vim +RUN ssh-keygen -t rsa -P '' -f /etc/ssh/ssh_host_rsa_key + +CMD ["sh"] diff --git a/container/manjaro/build b/container/manjaro/build new file mode 100755 index 0000000000000000000000000000000000000000..3ce3049fa99f742b4947259d8a533a3a80e0c126 --- /dev/null +++ b/container/manjaro/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t manjaro:testbed . diff --git a/container/manjaro/first-run b/container/manjaro/first-run new file mode 100755 index 0000000000000000000000000000000000000000..5c85a1c9b051f757726c236c86fd4554541cf7b1 --- /dev/null +++ b/container/manjaro/first-run @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +export OS=manjaro +../first-run.sh diff --git a/container/manjaro/root/etc/pacman.d/mirrorlist b/container/manjaro/root/etc/pacman.d/mirrorlist new file mode 100644 index 0000000000000000000000000000000000000000..4ff4847755ce2cd1f8d4b40cce9a7b5a206a81b5 --- /dev/null +++ b/container/manjaro/root/etc/pacman.d/mirrorlist @@ -0,0 +1,2 @@ +Server = http://mirrors.huaweicloud.com/manjaro/arm-stable/$repo/$arch +Server = http://mirrors.ustc.edu.cn/manjaro-arm/stable/$repo/$arch/ diff --git a/container/manjaro/root/etc/pacman.d/mirrors/China b/container/manjaro/root/etc/pacman.d/mirrors/China new file mode 100644 index 0000000000000000000000000000000000000000..61c94addcfdcf26f6357a4d5cb1777405b8ab427 --- /dev/null +++ b/container/manjaro/root/etc/pacman.d/mirrors/China @@ -0,0 +1 @@ +Server = http://mirrors.ustc.edu.cn/manjaro-arm/stable/$repo/$arch/ diff --git a/container/manjaro/start b/container/manjaro/start new file mode 100755 index 0000000000000000000000000000000000000000..d92b15cd36b448a9a212dc51825f0c59ac69fd2b --- /dev/null +++ b/container/manjaro/start @@ -0,0 +1,25 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. ../defconfig.sh + +docker_rm manjaro +cmd=( + docker run + --restart=always + -v manjaro-home:/home + -v /etc/localtime:/etc/localtime:ro + -v manjaro-root:/root + -v /c:/c + -v /srv/os:/srv/os + -p 2203:2203 + --name manjaro + --hostname manjaro + --security-opt seccomp=unconfined + -d + manjaro:testbed + /usr/sbin/sshd -D -p 2203 +) + +"${cmd[@]}" diff --git a/container/monitoring/Dockerfile b/container/monitoring/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5ceaf4f0d43ebec198cc93f1f818da9cb26ce879 --- /dev/null +++ b/container/monitoring/Dockerfile @@ -0,0 +1,26 @@ +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update + +RUN apk add --no-cache 'ruby-dev' \ + 'g++' 'gcc' 'pcre' 'libevent' 'make' 'git' 'cpio' 'bash' + +RUN umask 002 && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + gem install rest-client && \ + gem install activesupport && \ + gem install git && \ + gem install json && \ + gem install yaml + + +ENV MONITOR_RUNTIME_DIR /c/cci/monitor + +RUN mkdir -p $MONITOR_RUNTIME_DIR && \ + chown -R 1090:1090 /c + +WORKDIR $MONITOR_RUNTIME_DIR + +COPY --chown=1090:1090 monitoring . diff --git a/container/monitoring/build b/container/monitoring/build new file mode 100755 index 0000000000000000000000000000000000000000..bcb0c37c6411a18fae1067498e441bb1410262a3 --- /dev/null +++ b/container/monitoring/build @@ -0,0 +1,18 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new ['MONITOR_PORT'] +defaults = relevant_defaults(names) + +MONITOR_PORT = (defaults['MONITOR_PORT'] || '11310') + +CCI_SRC = ENV['CCI_SRC'] +system "bash #{CCI_SRC}/container/compile ." +system "docker build -t monitoring-#{MONITOR_PORT} ." + +system 'rm monitoring' diff --git a/container/monitoring/build-depends b/container/monitoring/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..5dd403e166afc1ed9b15b32328320e82db6f5155 --- /dev/null +++ b/container/monitoring/build-depends @@ -0,0 +1 @@ +scheduler-dev diff --git a/container/monitoring/start b/container/monitoring/start new file mode 100755 index 0000000000000000000000000000000000000000..199f091371f802a82582aa1e18e7d78ed84ece65 --- /dev/null +++ b/container/monitoring/start @@ -0,0 +1,38 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + MONITOR_PORT + MQ_PORT + MQ_HOST +] + +defaults = relevant_defaults(names) +env = docker_env(defaults) + +DEFAULT_CCI = '/c/cci' +MONITOR_PORT = defaults['MONITOR_PORT'] || '11310' +docker_rm "monitoring-#{MONITOR_PORT}" + +cmd = %W[ + docker run + --restart=always + --name monitoring-#{MONITOR_PORT} + -d + -u 1090:1090 + -p #{MONITOR_PORT}:#{MONITOR_PORT} +] + env + %W[ + -e CCI_SRC=#{DEFAULT_CCI} + -v /etc/localtime:/etc/localtime:ro + -v /srv/result:/srv/result + monitoring-#{MONITOR_PORT} +] + +cmd += ['sh', '-c', 'umask 002 && ./monitoring'] + +system(*cmd) diff --git a/container/monitoring/start-depends b/container/monitoring/start-depends new file mode 100755 index 0000000000000000000000000000000000000000..5232abf91d439ecb85809c7a3396e48e35a7d362 --- /dev/null +++ b/container/monitoring/start-depends @@ -0,0 +1 @@ +rabbitmq diff --git a/container/netdata/Dockerfile b/container/netdata/Dockerfile new file mode 100755 index 0000000000000000000000000000000000000000..65a2338538b69f1fa2531f47c475b429ae10ab6e --- /dev/null +++ b/container/netdata/Dockerfile @@ -0,0 +1,9 @@ +#!/bin/bash +# Origin: https://hub.docker.com/r/netdata/netdata +# SPDX-License-Identifier: GPL-3.0+ + +FROM netdata/netdata + +RUN usermod -u 1090 netdata && \ + groupmod -g 1090 netdata + diff --git a/container/netdata/build b/container/netdata/build new file mode 100755 index 0000000000000000000000000000000000000000..809b4458d95730f633c880c06590ab3521c98415 --- /dev/null +++ b/container/netdata/build @@ -0,0 +1,5 @@ +#!/bin/bash +# Origin: https://hub.docker.com/r/netdata/netdata +# SPDX-License-Identifier: GPL-3.0+ + +docker build -t netdata/netdatalkp . diff --git a/container/netdata/start b/container/netdata/start new file mode 100755 index 0000000000000000000000000000000000000000..a3009a98ae1a24cc1236b56ce906a30519cdaf19 --- /dev/null +++ b/container/netdata/start @@ -0,0 +1,24 @@ +#!/bin/bash +# Origin: https://hub.docker.com/r/netdata/netdata +# SPDX-License-Identifier: GPL-3.0+ + +. $CCI_SRC/container/defconfig.sh + +docker_rm netdata + +cmd=( + docker run -d + --restart=always + --name netdata + -p 19999:19999 + -v /proc:/host/proc:ro + -v /sys:/host/sys:ro + -v /etc/localtime:/etc/localtime:ro + -v /var/run/docker.sock:/var/run/docker.sock:ro + -v /srv/cache/netdata_cache:/var/cache/netdata + -v /srv/cache/netdata_lib:/var/lib/netdata + --cap-add SYS_PTRACE + --security-opt apparmor=unconfined + netdata/netdatalkp +) +"${cmd[@]}" diff --git a/container/os-cifs/Dockerfile b/container/os-cifs/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..66f978df489a6cb48947bc7a1150ac8760cf37fc --- /dev/null +++ b/container/os-cifs/Dockerfile @@ -0,0 +1,22 @@ +# Origin: https://github.com/Stanback/alpine-samba +# Copyright (C) 2016-2020 Eric D. Stanback +# SPDX-License-Identifier: GPL-3.0 + +FROM alpine:edge + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN adduser -u 1090 -D lkp + +RUN apk add --update \ + samba-common-tools \ + samba-client \ + samba-server \ + bash \ + && rm -rf /var/cache/apk/* + +COPY ./smb.conf /etc/samba/ + +EXPOSE 445/tcp + +ENTRYPOINT ["smbd", "--foreground", "--no-process-group", "--log-stdout"] diff --git a/container/os-cifs/build b/container/os-cifs/build new file mode 100755 index 0000000000000000000000000000000000000000..56ebbed65364d54e94a2b28a941278a246ac0280 --- /dev/null +++ b/container/os-cifs/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t alpine/samba . diff --git a/container/os-cifs/smb.conf b/container/os-cifs/smb.conf new file mode 100644 index 0000000000000000000000000000000000000000..5ff68413cdd9e1a6cbab47484fdb4641092b13d2 --- /dev/null +++ b/container/os-cifs/smb.conf @@ -0,0 +1,62 @@ +# refer to https://lkml.org/lkml/2019/7/16/716 and https://lkml.org/lkml/2019/9/19/586 +[global] + workgroup = MYGROUP + server string = Samba Server + map to guest = Bad User + load printers = no + printing = bsd + printcap name = /dev/null + disable spoolss = yes + disable netbios = yes + server role = standalone + server services = -dns, -nbt + smb ports = 445 + create mode = 0777 + directory mode = 0777 + guest only = yes + guest ok = yes + server min protocol = NT1 + unix extensions = yes + mangled names = no +[os] + path = /srv/os/ + comment = os + browseable = yes + read only = yes + public = yes + # Added these two params because there are files in the /srv/os + # that can only be accessed by root. + force user = root + force group = root +[os-rw] + path = /srv/os/ + comment = os-rw + browseable = yes + writable = yes + public = yes + force user = root + force group = root +[initrd] + path = /srv/initrd/ + comment = initrd + browseable = yes + writable = yes + public = yes + force user = lkp + force group = lkp +[osimage] + path = /srv/initrd/ + comment = initrd + browseable = yes + writable = yes + public = yes + force user = lkp + force group = lkp +[result] + path = /srv/result/ + comment = result + browseable = yes + writable = yes + public = yes + force user = lkp + force group = lkp diff --git a/container/os-cifs/start b/container/os-cifs/start new file mode 100755 index 0000000000000000000000000000000000000000..6688e271ec3c3be4bd33955f5deebe07bf47f200 --- /dev/null +++ b/container/os-cifs/start @@ -0,0 +1,26 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +lsmod | grep -q "^cifs\s" || { + sudo modprobe cifs +} + +docker_rm samba + +cmd=( + docker run -dt + -p 445:445 + -v /srv/os:/srv/os:ro + -v /etc/localtime:/etc/localtime:ro + -v /srv/os:/srv/os-rw + -v /srv/initrd:/srv/initrd + -v /srv/result:/srv/result + --name samba + --restart=always + alpine/samba +) + +"${cmd[@]}" diff --git a/container/os-http/Dockerfile b/container/os-http/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8a3bdcb6c8f000bbe874797e75c11fc8f755a5b3 --- /dev/null +++ b/container/os-http/Dockerfile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM nginx:alpine +ADD root / diff --git a/container/os-http/build b/container/os-http/build new file mode 100755 index 0000000000000000000000000000000000000000..c1cb0086f804395980945b3128bd99cfa64b3db0 --- /dev/null +++ b/container/os-http/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t os-http . diff --git a/container/os-http/root/etc/nginx/conf.d/default.conf b/container/os-http/root/etc/nginx/conf.d/default.conf new file mode 100644 index 0000000000000000000000000000000000000000..06c12e3d30382ad60c945d04fb6b2612aae1bbb6 --- /dev/null +++ b/container/os-http/root/etc/nginx/conf.d/default.conf @@ -0,0 +1,16 @@ +server { + listen 80; + server_name "os-http"; + server_tokens off; + + root /usr/share/nginx/html; + index index.html; + + location /favicon.ico { + log_not_found off; + } + + location /os { + autoindex on; + } +} diff --git a/container/os-http/start b/container/os-http/start new file mode 100755 index 0000000000000000000000000000000000000000..ded7f9bb50b00d7de9a9e8802ec8192c88982326 --- /dev/null +++ b/container/os-http/start @@ -0,0 +1,21 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +. ../defconfig.sh + +load_cci_defaults + +docker_rm os-http + +cmd=( + docker run + --restart=always + --name os-http + -p ${OS_HTTP_PORT:-8000}:80 + -v /srv/os:/usr/share/nginx/html/os:ro + -v /etc/localtime:/etc/localtime:ro + -d + os-http +) + +"${cmd[@]}" diff --git a/container/os-nfs/Dockerfile b/container/os-nfs/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..78fa5c3bc1ef9411f17626b52cb8eb13ef3a1f7f --- /dev/null +++ b/container/os-nfs/Dockerfile @@ -0,0 +1,29 @@ +# https://github.com/ehough/docker-nfs-server +# Copyright (C) 2017-2020 Eric D. Hough +# SPDX-License-Identifier: GPL-3.0 + +ARG BUILD_FROM=alpine:3.11 + +FROM $BUILD_FROM + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk --update --no-cache add bash nfs-utils && \ + \ + # remove the default config files + rm -v /etc/idmapd.conf /etc/exports + +# wiki.linux-nfs.org/wiki/index.php/Nfsv4_configuration +RUN mkdir -p /var/lib/nfs/rpc_pipefs && \ + mkdir -p /var/lib/nfs/v4recovery && \ + echo "rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs defaults 0 0" >> /etc/fstab && \ + echo "nfsd /proc/fs/nfsd nfsd defaults 0 0" >> /etc/fstab + +EXPOSE 2049 + +# Need a volume to fix error "exportfs: /exports does not support NFS export" +VOLUME /exports + +COPY ./entrypoint.sh /usr/local/bin +ADD root / +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/container/os-nfs/build b/container/os-nfs/build new file mode 100755 index 0000000000000000000000000000000000000000..eea5a7d19728aaaa7a56c5ac433751ccd70bc17f --- /dev/null +++ b/container/os-nfs/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t os-nfs . diff --git a/container/os-nfs/entrypoint.sh b/container/os-nfs/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..e6a2ec2b3706d03c6a9fce2402c5cbf3fda59783 --- /dev/null +++ b/container/os-nfs/entrypoint.sh @@ -0,0 +1,881 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0 +# +# ehough/docker-nfs-server: A lightweight, robust, flexible, and containerized NFS server. +# +# https://hub.docker.com/r/erichough/nfs-server +# https://github.com/ehough/docker-nfs-server +# +# Copyright (C) 2017-2020 Eric D. Hough +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +###################################################################################### +### constants +###################################################################################### + +readonly ENV_VAR_NFS_DISABLE_VERSION_3='NFS_DISABLE_VERSION_3' +readonly ENV_VAR_NFS_SERVER_THREAD_COUNT='NFS_SERVER_THREAD_COUNT' +readonly ENV_VAR_NFS_ENABLE_KERBEROS='NFS_ENABLE_KERBEROS' +readonly ENV_VAR_NFS_PORT_MOUNTD='NFS_PORT_MOUNTD' +readonly ENV_VAR_NFS_PORT='NFS_PORT' +readonly ENV_VAR_NFS_PORT_STATD_IN='NFS_PORT_STATD_IN' +readonly ENV_VAR_NFS_PORT_STATD_OUT='NFS_PORT_STATD_OUT' +readonly ENV_VAR_NFS_VERSION='NFS_VERSION' +readonly ENV_VAR_NFS_LOG_LEVEL='NFS_LOG_LEVEL' + +readonly DEFAULT_NFS_PORT=2049 +readonly DEFAULT_NFS_PORT_MOUNTD=32767 +readonly DEFAULT_NFS_PORT_STATD_IN=32765 +readonly DEFAULT_NFS_PORT_STATD_OUT=32766 +readonly DEFAULT_NFS_VERSION='4.2' + +readonly PATH_BIN_EXPORTFS='/usr/sbin/exportfs' +readonly PATH_BIN_IDMAPD='/usr/sbin/rpc.idmapd' +readonly PATH_BIN_MOUNTD='/usr/sbin/rpc.mountd' +readonly PATH_BIN_NFSD='/usr/sbin/rpc.nfsd' +readonly PATH_BIN_RPCBIND='/sbin/rpcbind' +readonly PATH_BIN_RPC_SVCGSSD='/usr/sbin/rpc.svcgssd' +readonly PATH_BIN_STATD='/sbin/rpc.statd' + +readonly PATH_FILE_ETC_EXPORTS='/etc/exports' +readonly PATH_FILE_ETC_IDMAPD_CONF='/etc/idmapd.conf' +readonly PATH_FILE_ETC_KRB5_CONF='/etc/krb5.conf' +readonly PATH_FILE_ETC_KRB5_KEYTAB='/etc/krb5.keytab' + +readonly MOUNT_PATH_NFSD='/proc/fs/nfsd' +readonly MOUNT_PATH_RPC_PIPEFS='/var/lib/nfs/rpc_pipefs' + +readonly REGEX_EXPORTS_LINES_TO_SKIP='^\s*#|^\s*$' + +readonly LOG_LEVEL_INFO='INFO' +readonly LOG_LEVEL_DEBUG='DEBUG' + +readonly STATE_LOG_LEVEL='log_level' +readonly STATE_IS_LOGGING_DEBUG='is_logging_debug' +readonly STATE_IS_LOGGING_INFO='is_logging_info' +readonly STATE_NFSD_THREAD_COUNT='nfsd_thread_count' +readonly STATE_NFSD_PORT='nfsd_port' +readonly STATE_MOUNTD_PORT='mountd_port' +readonly STATE_STATD_PORT_IN='statd_port_in' +readonly STATE_STATD_PORT_OUT='statd_port_out' +readonly STATE_NFS_VERSION='nfs_version' + +# "state" is our only global variable, which is an associative array of normalized data +declare -A state + + +###################################################################################### +### logging +###################################################################################### + +log() { + + echo "----> $1" +} + +log_warning() { + + log "WARNING: $1" +} + +log_error() { + + log '' + log "ERROR: $1" + log '' +} + +log_header() { + + echo " +================================================================== + ${1^^} +==================================================================" +} + + +###################################################################################### +### error handling +###################################################################################### + +bail() { + + log_error "$1" + exit 1 +} + +on_failure() { + + # shellcheck disable=SC2181 + if [[ $? -eq 0 ]]; then + return + fi + + case "$1" in + warn) + log_warning "$2" + ;; + stop) + log_error "$2" + stop + ;; + *) + bail "$2" + ;; + esac +} + + +###################################################################################### +### process control +###################################################################################### + +term_process() { + + local -r base=$(basename "$1") + local -r pid=$(pidof "$base") + + if [[ -n $pid ]]; then + log "terminating $base" + kill "$pid" + on_failure warn "unable to terminate $base" + else + log "$base was not running" + fi +} + + +###################################################################################### +### teardown +###################################################################################### + +stop_mount() { + + local -r path=$1 + local -r type=$(basename "$path") + + if mount | grep -Eq ^"$type on $path\\s+"; then + + local args=() + if is_logging_debug; then + args+=('-v') + log "un-mounting $type filesystem from $path" + fi + args+=("$path") + + umount "${args[@]}" + on_failure warn "unable to un-mount $type filesystem from $path" + + else + log "no active mount at $path" + fi +} + +stop_nfsd() { + + log 'terminating nfsd' + $PATH_BIN_NFSD 0 + on_failure warn 'unable to terminate nfsd. if it had started already, check Docker host for lingering [nfsd] processes' +} + +stop_exportfs() { + + local args=('-ua') + if is_logging_debug; then + args+=('-v') + fi + + log 'un-exporting filesystem(s)' + $PATH_BIN_EXPORTFS "${args[@]}" + on_failure warn 'unable to un-export filesystem(s)' +} + +stop() { + + log_header 'terminating ...' + + if is_kerberos_requested; then + term_process "$PATH_BIN_RPC_SVCGSSD" + fi + + stop_nfsd + + if is_idmapd_requested; then + term_process "$PATH_BIN_IDMAPD" + fi + + if is_nfs3_enabled; then + term_process "$PATH_BIN_STATD" + fi + + term_process "$PATH_BIN_MOUNTD" + stop_exportfs + term_process "$PATH_BIN_RPCBIND" + stop_mount "$MOUNT_PATH_NFSD" + stop_mount "$MOUNT_PATH_RPC_PIPEFS" + + log_header 'terminated' + + exit 0 +} + + +###################################################################################### +### runtime environment detection +###################################################################################### + +is_kerberos_requested() { + + [[ -n "${!ENV_VAR_NFS_ENABLE_KERBEROS}" ]] && return 0 || return 1 +} + +is_nfs3_enabled() { + + [[ -z "${!ENV_VAR_NFS_DISABLE_VERSION_3}" ]] && return 0 || return 1 +} + +is_idmapd_requested() { + + [[ -f "$PATH_FILE_ETC_IDMAPD_CONF" ]] && return 0 || return 1 +} + +is_logging_debug() { + + [[ -n ${state[$STATE_IS_LOGGING_DEBUG]} ]] && return 0 || return 1 +} + +is_kernel_module_loaded() { + + local -r module=$1 + + if lsmod | grep -Eq "^$module\\s+" || [[ -d "/sys/module/$module" ]]; then + + if is_logging_debug; then + log "kernel module $module is loaded" + fi + return 0 + fi + + log "kernel module $module is missing" + return 1 +} + +is_granted_linux_capability() { + + if capsh --print | grep -Eq "^Current: = .*,?${1}(,|$)"; then + return 0 + fi + + return 1 +} + + +###################################################################################### +### runtime configuration assertions +###################################################################################### + +assert_file_provided() { + + if [[ ! -f "$1" ]]; then + bail "please provide $1 to the container" + fi +} + +assert_kernel_mod() { + + local -r module=$1 + + if is_kernel_module_loaded "$module"; then + return + fi + + if [[ ! -d /lib/modules ]] || ! is_granted_linux_capability 'sys_module'; then + bail "$module module is not loaded in the Docker host's kernel (try: modprobe $module)" + fi + + log "attempting to load kernel module $module" + modprobe -v "$module" + on_failure bail "unable to dynamically load kernel module $module. try modprobe $module on the Docker host" + + if ! is_kernel_module_loaded "$module"; then + bail "modprobe claims that it loaded kernel module $module, but it still appears to be missing" + fi +} + +assert_port() { + + local -r variable_name=$1 + local -r value=${!variable_name} + + if [[ -n "$value" && ( "$value" -lt 1 || "$value" -gt 65535 ) ]]; then + bail "please set $variable_name to an integer between 1 and 65535 inclusive" + fi +} + + +###################################################################################### +### initialization +###################################################################################### + +init_state_logging() { + + # if the user didn't request a specific log level, the default is INFO + local incoming_log_level="${!ENV_VAR_NFS_LOG_LEVEL:-$LOG_LEVEL_INFO}" + local -r normalized_log_level="${incoming_log_level^^}" + + if ! echo "$normalized_log_level" | grep -Eq 'DEBUG|INFO'; then + bail "the only acceptable values for $ENV_VAR_NFS_LOG_LEVEL are: DEBUG, INFO" + fi + + state[$STATE_LOG_LEVEL]=$normalized_log_level; + state[$STATE_IS_LOGGING_INFO]=1 + + if [[ $normalized_log_level = "$LOG_LEVEL_DEBUG" ]]; then + state[$STATE_IS_LOGGING_DEBUG]=1 + log "log level set to $LOG_LEVEL_DEBUG" + fi +} + +init_state_nfsd_thread_count() { + + local count + + if [[ -n "${!ENV_VAR_NFS_SERVER_THREAD_COUNT}" ]]; then + + count="${!ENV_VAR_NFS_SERVER_THREAD_COUNT}" + + if [[ $count -lt 1 ]]; then + bail "please set $ENV_VAR_NFS_SERVER_THREAD_COUNT to a positive integer" + fi + + if is_logging_debug; then + log "will use requested rpc.nfsd thread count of $count" + fi + + else + + count="$(grep -Ec ^processor /proc/cpuinfo)" + on_failure bail "unable to detect CPU count. set $ENV_VAR_NFS_SERVER_THREAD_COUNT environment variable" + + if is_logging_debug; then + log "will use $count rpc.nfsd server thread(s) (1 thread per CPU)" + fi + + fi + + state[$STATE_NFSD_THREAD_COUNT]=$count +} + +init_state_ports() { + + assert_port "$ENV_VAR_NFS_PORT" + assert_port "$ENV_VAR_NFS_PORT_MOUNTD" + assert_port "$ENV_VAR_NFS_PORT_STATD_IN" + assert_port "$ENV_VAR_NFS_PORT_STATD_OUT" + + state[$STATE_NFSD_PORT]=${!ENV_VAR_NFS_PORT:-$DEFAULT_NFS_PORT} + state[$STATE_MOUNTD_PORT]=${!ENV_VAR_NFS_PORT_MOUNTD:-$DEFAULT_NFS_PORT_MOUNTD} + state[$STATE_STATD_PORT_IN]=${!ENV_VAR_NFS_PORT_STATD_IN:-$DEFAULT_NFS_PORT_STATD_IN} + state[$STATE_STATD_PORT_OUT]=${!ENV_VAR_NFS_PORT_STATD_OUT:-$DEFAULT_NFS_PORT_STATD_OUT} +} + +init_state_nfs_version() { + + local -r requested_version="${!ENV_VAR_NFS_VERSION:-$DEFAULT_NFS_VERSION}" + + echo "$requested_version" | grep -Eq '^3$|^4(\.[1-2])?$' + on_failure bail "please set $ENV_VAR_NFS_VERSION to one of: 4.2, 4.1, 4, 3" + + if ! is_nfs3_enabled && [[ "$requested_version" = '3' ]]; then + bail 'you cannot simultaneously enable and disable NFS version 3' + fi + + state[$STATE_NFS_VERSION]=$requested_version +} + +init_trap() { + + trap stop SIGTERM SIGINT +} + +init_exports() { + + # first, see if it's bind-mounted + if mount | grep -Eq "^[^ ]+ on $PATH_FILE_ETC_EXPORTS type "; then + + if is_logging_debug; then + log "$PATH_FILE_ETC_EXPORTS is bind-mounted" + fi + + # maybe it's baked-in to the image + elif [[ -f $PATH_FILE_ETC_EXPORTS && -r $PATH_FILE_ETC_EXPORTS && -s $PATH_FILE_ETC_EXPORTS ]]; then + + if is_logging_debug; then + log "$PATH_FILE_ETC_EXPORTS is baked into the image" + fi + + # fall back to environment variables + else + + local count_valid_exports=0 + local exports='' + local candidate_export_vars + local candidate_export_var + + # collect all candidate environment variable names + candidate_export_vars=$(compgen -A variable | grep -E 'NFS_EXPORT_[0-9]+' | sort) + on_failure bail 'failed to detect NFS_EXPORT_* variables' + + if [[ -z "$candidate_export_vars" ]]; then + bail "please provide $PATH_FILE_ETC_EXPORTS to the container or set at least one NFS_EXPORT_* environment variable" + fi + + log "building $PATH_FILE_ETC_EXPORTS from environment variables" + + for candidate_export_var in $candidate_export_vars; do + + local line="${!candidate_export_var}" + + # skip comments and empty lines + if [[ "$line" =~ $REGEX_EXPORTS_LINES_TO_SKIP ]]; then + log_warning "skipping $candidate_export_var environment variable since it contains only whitespace or a comment" + continue; + fi + + local line_as_array + read -r -a line_as_array <<< "$line" + local dir="${line_as_array[0]}" + + if [[ ! -d "$dir" ]]; then + log_warning "skipping $candidate_export_var environment variable since $dir is not a container directory" + continue + fi + + if [[ $count_valid_exports -gt 0 ]]; then + exports=$exports$'\n' + fi + + exports=$exports$line + + (( count_valid_exports++ )) + + done + + log "collected $count_valid_exports valid export(s) from NFS_EXPORT_* environment variables" + + if [[ $count_valid_exports -eq 0 ]]; then + bail 'no valid exports' + fi + + echo "$exports" > $PATH_FILE_ETC_EXPORTS + on_failure bail "unable to write to $PATH_FILE_ETC_EXPORTS" + fi + + # make sure we have at least one export + grep -Evq "$REGEX_EXPORTS_LINES_TO_SKIP" $PATH_FILE_ETC_EXPORTS + on_failure bail "$PATH_FILE_ETC_EXPORTS has no exports" +} + +init_runtime_assertions() { + + if ! is_granted_linux_capability 'cap_sys_admin'; then + bail 'missing CAP_SYS_ADMIN. be sure to run this image with --cap-add SYS_ADMIN or --privileged' + fi + + # check kernel modules + assert_kernel_mod nfs + assert_kernel_mod nfsd + + # perform Kerberos assertions + if is_kerberos_requested; then + + assert_file_provided "$PATH_FILE_ETC_KRB5_KEYTAB" + assert_file_provided "$PATH_FILE_ETC_KRB5_CONF" + + assert_kernel_mod rpcsec_gss_krb5 + fi +} + + +###################################################################################### +### boot helpers +###################################################################################### + +boot_helper_mount() { + + local -r path=$1 + local -r type=$(basename "$path") + local args=('-t' "$type" "$path") + + if is_logging_debug; then + args+=('-vvv') + log "mounting $type filesystem onto $path" + fi + + mount "${args[@]}" + on_failure stop "unable to mount $type filesystem onto $path" +} + +boot_helper_get_version_flags() { + + local -r requested_version="${state[$STATE_NFS_VERSION]}" + local flags=('--nfs-version' "$requested_version" '--no-nfs-version' 2) + + if ! is_nfs3_enabled; then + flags+=('--no-nfs-version' 3) + fi + + if [[ "$requested_version" = '3' ]]; then + flags+=('--no-nfs-version' 4) + fi + + echo "${flags[@]}" +} + +boot_helper_start_daemon() { + + local -r msg="$1" + local -r daemon="$2" + shift 2 + local -r daemon_args=("$@") + + log "$msg" + "$daemon" "${daemon_args[@]}" + on_failure stop "$daemon failed" +} + +boot_helper_start_non_daemon() { + + local -r msg="$1" + local -r process="$2" + shift 2 + local -r process_args=("$@") + + log "$msg" + "$process" "${process_args[@]}" & + + local -r bg_pid=$! + + # somewhat arbitrary assumption that if the process isn't dead already, it will die within a millisecond. for our + # purposes this works just fine, but if someone has a better solution please open a PR. + sleep .001 + kill -0 $bg_pid 2> /dev/null + on_failure stop "$process failed" +} + +###################################################################################### +### primary boot +###################################################################################### + +boot_main_mounts() { + + # http://wiki.linux-nfs.org/wiki/index.php/Nfsv4_configuration + boot_helper_mount "$MOUNT_PATH_RPC_PIPEFS" + boot_helper_mount "$MOUNT_PATH_NFSD" +} + +boot_main_exportfs() { + + local args=('-ar') + if is_logging_debug; then + args+=('-v') + fi + + boot_helper_start_daemon 'starting exportfs' $PATH_BIN_EXPORTFS "${args[@]}" +} + +boot_main_mountd() { + + # https://linux.die.net/man/8/rpc.mountd + # + # --debug turn on debugging. Valid kinds are: all, auth, call, general and parse. + # --port specifies the port number used for RPC listener sockets + + local version_flags + read -r -a version_flags <<< "$(boot_helper_get_version_flags)" + local -r port="${state[$STATE_MOUNTD_PORT]}" + local args=('--port' "$port" "${version_flags[@]}") + if is_logging_debug; then + args+=('--debug' 'all') + fi + + # yes, rpc.mountd is required even for NFS v4: https://forums.gentoo.org/viewtopic-p-7724856.html#7724856 + boot_helper_start_daemon "starting rpc.mountd on port $port" $PATH_BIN_MOUNTD "${args[@]}" +} + +boot_main_rpcbind() { + + # https://linux.die.net/man/8/rpcbind + # + # -d run in debug mode. in this mode, rpcbind will not fork when it starts, will print additional information during + # operation, and will abort on certain errors if -a is also specified. with this option, the name-to-address + # translation consistency checks are shown in detail + # -s cause rpcbind to change to the user daemon as soon as possible. this causes rpcbind to use non-privileged ports + # for outgoing connections, preventing non-privileged clients from using rpcbind to connect to services from a + # privileged port + + local args=('-s') + if is_logging_debug; then + arg+=('-d') + fi + boot_helper_start_daemon 'starting rpcbind' $PATH_BIN_RPCBIND "${args[@]}" +} + +boot_main_idmapd() { + + if ! is_idmapd_requested; then + return + fi + + # https://linux.die.net/man/8/rpc.idmapd + # + # -S Server-only: perform no idmapping for any NFS client, even if one is detected + # -v increases the verbosity level (can be specified multiple times + # -f runs rpc.idmapd in the foreground and prints all output to the terminal + + local args=('-S') + local func=boot_helper_start_daemon + if is_logging_debug; then + args+=('-vvv' '-f') + func=boot_helper_start_non_daemon + fi + + $func 'starting rpc.idmapd' $PATH_BIN_IDMAPD "${args[@]}" +} + +boot_main_statd() { + + if ! is_nfs3_enabled; then + return + fi + + # https://linux.die.net/man/8/rpc.statd + # + # --no-syslog causes rpc.statd to write log messages on stderr instead of to the system log, if the -F option was + # also specified + # --foreground keeps rpc.statd attached to its controlling terminal so that NSM operation can be monitored + # directly or run under a debugger. if this option is not specified, rpc.statd backgrounds itself + # soon after it starts + # --no-notify prevents rpc.statd from running the sm-notify command when it starts up, preserving the existing + # NSM state number and monitor list + # --outgoing-port specifies the source port number the sm-notify command should use when sending reboot notifications + # --port specifies the port number used for RPC listener sockets + + local -r port_in="${state[$STATE_STATD_PORT_IN]}" + local -r port_out="${state[$STATE_STATD_PORT_OUT]}" + local args=('--no-notify' '--port' "$port_in" '--outgoing-port' "$port_out") + local func=boot_helper_start_daemon + + if is_logging_debug; then + args+=('--no-syslog' '--foreground') + func=boot_helper_start_non_daemon + fi + + $func "starting rpc.statd on port $port_in (outgoing from port $port_out)" $PATH_BIN_STATD "${args[@]}" +} + +boot_main_nfsd() { + + # https://linux.die.net/man/8/rpc.nfsd + # + # --debug enable logging of debugging messages + # --port specify a diferent port to listen on for NFS requests. by default, rpc.nfsd will listen on port 2049 + # --tcp explicitly enable TCP connections from clients + # --udp explicitly enable UCP connections from clients + # nproc specify the number of NFS server threads. by default, just one thread is started. however, for optimum + # performance several threads should be used. the actual figure depends on the number of and the work load + # created by the NFS clients, but a useful starting point is 8 threads. effects of modifying that number can + # be checked using the nfsstat(8) program + + local version_flags + read -r -a version_flags <<< "$(boot_helper_get_version_flags)" + local -r threads="${state[$STATE_NFSD_THREAD_COUNT]}" + local -r port="${state[$STATE_NFSD_PORT]}" + local args=('--tcp' '--udp' '--port' "$port" "${version_flags[@]}" "$threads") + + if is_logging_debug; then + args+=('--debug') + fi + + boot_helper_start_daemon "starting rpc.nfsd on port $port with $threads server thread(s)" $PATH_BIN_NFSD "${args[@]}" + + # rpcbind isn't required for NFSv4, but if it's not running then nfsd takes over 5 minutes to start up. + # it's a bug in either nfs-utils or the kernel, and the code of both is over my head. + # so as a workaround we start rpcbind always and (in v4-only scenarios) kill it after nfsd starts up + if ! is_nfs3_enabled; then + term_process "$PATH_BIN_RPCBIND" + fi +} + +boot_main_svcgssd() { + + if ! is_kerberos_requested; then + return + fi + + # https://linux.die.net/man/8/rpc.svcgssd + # + # -f runs rpc.svcgssd in the foreground and sends output to stderr (as opposed to syslogd) + # -v increases the verbosity of the output (can be specified multiple times) + # -r if the rpcsec_gss library supports setting debug level, increases the verbosity of the output (can be specified + # multiple times) + # -i if the nfsidmap library supports setting debug level, increases the verbosity of the output (can be specified + # multiple times) + + local args=('-f') + if is_logging_debug; then + args+=('-vvv' '-rrr' '-iii') + fi + + boot_helper_start_non_daemon 'starting rpc.svcgssd' $PATH_BIN_RPC_SVCGSSD "${args[@]}" +} + + +###################################################################################### +### boot summary +###################################################################################### + +summarize_nfs_versions() { + + local -r reqd_version="${state[$STATE_NFS_VERSION]}" + local versions='' + + case "$reqd_version" in + 4\.2) + versions='4.2, 4.1, 4' + ;; + 4\.1) + versions='4.1, 4' + ;; + 4) + versions='4' + ;; + *) + versions='3' + ;; + esac + + if is_nfs3_enabled && [[ "$reqd_version" =~ ^4 ]]; then + versions="$versions, 3" + fi + + log "list of enabled NFS protocol versions: $versions" +} + +summarize_exports() { + + log 'list of container exports:' + + # if debug is enabled, read /var/lib/nfs/etab as it contains the "real" export data. but it also contains more + # information that most people will usually need to see + local file_to_read="$PATH_FILE_ETC_EXPORTS" + if is_logging_debug; then + file_to_read='/var/lib/nfs/etab' + fi + + while read -r export; do + + # skip comments and empty lines + if [[ "$export" =~ $REGEX_EXPORTS_LINES_TO_SKIP ]]; then + continue; + fi + + # log it w/out leading and trailing whitespace + log " $(echo -e "$export" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + + done < "$file_to_read" +} + +summarize_ports() { + + local -r port_nfsd="${state[$STATE_NFSD_PORT]}" + local -r port_mountd="${state[$STATE_MOUNTD_PORT]}" + local -r port_statd_in="${state[$STATE_STATD_PORT_IN]}" + + if ! is_nfs3_enabled; then + log "list of container ports that should be exposed: $port_nfsd (TCP)" + else + log 'list of container ports that should be exposed:' + log ' 111 (TCP and UDP)' + log " $port_nfsd (TCP and UDP)" + log " $port_statd_in (TCP and UDP)" + log " $port_mountd (TCP and UDP)" + fi +} + + +###################################################################################### +### main routines +###################################################################################### + +init() { + + log_header 'setting up ...' + + init_state_logging + init_state_nfsd_thread_count + init_state_ports + init_state_nfs_version + init_exports + init_runtime_assertions + init_trap + + log 'setup complete' +} + +boot() { + + log_header 'starting services ...' + + boot_main_mounts + boot_main_rpcbind + boot_main_exportfs + boot_main_mountd + boot_main_statd + boot_main_idmapd + boot_main_nfsd + boot_main_svcgssd + + log 'all services started normally' +} + +summarize() { + + log_header 'server startup complete' + + summarize_nfs_versions + summarize_exports + summarize_ports +} + +hangout() { + + log_header 'ready and waiting for NFS client connections' + + # wait forever or until we get SIGTERM or SIGINT + # https://stackoverflow.com/a/41655546/229920 + # https://stackoverflow.com/a/27694965/229920 + while :; do sleep 2073600 & wait; done +} + +main() { + + init + boot + summarize + hangout +} + +main diff --git a/container/os-nfs/root/etc/exports b/container/os-nfs/root/etc/exports new file mode 100644 index 0000000000000000000000000000000000000000..b2481cd84f24006a39228d0c94abfaecd2531247 --- /dev/null +++ b/container/os-nfs/root/etc/exports @@ -0,0 +1,7 @@ +/exports *(ro,async,fsid=0,crossmnt,no_subtree_check,no_root_squash) +/exports/os *(ro,async,fsid=1,no_subtree_check,no_root_squash) +/exports/os-rw *(rw,async,fsid=2,no_subtree_check,no_root_squash) +/exports/osimage *(rw,async,fsid=3,no_subtree_check,insecure,all_squash,anonuid=1090,anongid=1090) +/exports/initrd *(rw,async,fsid=4,no_subtree_check,insecure,all_squash,anonuid=1090,anongid=1090) +/exports/result *(rw,async,fsid=5,no_subtree_check,insecure,all_squash,anonuid=1090,anongid=1090) +/exports/data *(rw,async,fsid=6,no_subtree_check,insecure,all_squash,anonuid=1090,anongid=1090) diff --git a/container/os-nfs/start b/container/os-nfs/start new file mode 100755 index 0000000000000000000000000000000000000000..8b8ff5f844b334ceb9428a4e12e99b60983a4c1c --- /dev/null +++ b/container/os-nfs/start @@ -0,0 +1,39 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +lsmod | grep -q "^nfs\s" || { + sudo modprobe nfs +} +lsmod | grep -q "^nfsd\s" || { + sudo modprobe nfsd +} + +# Several nfs servers running will cause conflict, so rm an old before running a new nfs server docker. +docker_rm os-nfs + +cmd=( + docker run + --restart=always +# -it + --name os-nfs # avoid multiple containers + -e NFS_DISABLE_VERSION_3=1 + --mount type=tmpfs,destination=/exports + -v /srv/os:/exports/os:ro + -v /etc/localtime:/etc/localtime:ro + -v /srv/os:/exports/os-rw # for install/setup + -v /srv/result:/exports/result + -v /srv/data:/exports/data + -v /srv/initrd:/exports/initrd + -v /srv/initrd:/exports/osimage # for lkp compatibility + --cap-add SYS_ADMIN +# --privileged + --net=host # w/o it, docker-proxy only listens on tcp6.. +# -p 2049:2049 + -d # comment out to see debug output + os-nfs +) + +"${cmd[@]}" diff --git a/container/qcow2rootfs/Dockerfile b/container/qcow2rootfs/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a0f8b042520bb9c1206786dce5f7fd8c1df21409 --- /dev/null +++ b/container/qcow2rootfs/Dockerfile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM centos:8 + +MAINTAINER Yu Chuan <13186087857@163.com>, Shi Zhichao + +RUN rpm --import /etc/pki/rpm-gpg/RPM* + +RUN yum makecache && \ + yum install -y --skip-broken libvirt libguestfs-tools xz expect openssl && \ + yum clean all + +ENV EXTRACT_USER=extract +ENV EXTRACT_ROOT=/root/extract + +RUN useradd $EXTRACT_USER && \ + usermod -aG kvm,qemu,root $EXTRACT_USER diff --git a/container/qcow2rootfs/bin/common b/container/qcow2rootfs/bin/common new file mode 100755 index 0000000000000000000000000000000000000000..0d109e39c3fc4fd9594e4475752d8183be4b229b --- /dev/null +++ b/container/qcow2rootfs/bin/common @@ -0,0 +1,245 @@ +#! /bin/bash -eu +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# This file is called by script run running on host +# define functions and set host environment variables + +check_cmd_input() { + ( [ $# -eq 2 ] && [ -f "$1" ] ) || { + usage + exit 1 + } + + check_qcow2_file "$1" + init_rootfs_dir "$2" + + export EXTRACT_ROOT='/root/extract' + export RUN_DIR=$(dirname $(realpath "$0")) + export QCOW2_FILE=$(realpath "$1") + export QCOW2_DIR=$(dirname "$QCOW2_FILE") + export QCOW2_NAME=$(basename "$QCOW2_FILE") + export ROOTFS_DIR=$(realpath "$2") +} + +set_env_vars() { + cat > "$RUN_DIR/bin/env.list" < + + src_qcow2_file_abspath: source .qcow2 file absolute path with suffix: [qcow2, qcow2.xz]. + dst_rootfs_new_abspath: destination absolute path to create for rootfs + + Example: + ./run /tmp/openEuler-qcow2/lts.qcow2 /tmp/openeuler-rootfs/ + " +} + +check_qcow2_file() { + local allow_qcow2_suffix_list qcow2_name qcow2_suffix + allow_qcow2_suffix_list=('qcow2' 'qcow2.xz') + qcow2_name=$(basename "$1") + qcow2_suffix=${qcow2_name##*.} + [ "$qcow2_suffix" == 'qcow2' ] || { + qcow2_suffix=$(echo "$qcow2_name" |awk -F '.' '{print $(NF-1)"."$NF}') + echo "${allow_qcow2_suffix_list[@]}" |grep -wq "$qcow2_suffix" || { + echo "[ERROR] Only support: .qcow2 .qcow2.xz file!" + exit 2 + } + } +} + +init_rootfs_dir() { + [ -d "$1" ] && return + + local limit_prompt_times current_prompt_time + limit_prompt_times=3 + current_prompt_time=0 + while true + do + read -r -p "[WARNING] Do you want to create \"$1\"? [y|n]> " if_create + + [ "$if_create" == 'y' ] && break + [ "$if_create" == 'n' ] && echo "[ERROR] User cancelled running." && exit + + current_prompt_time=$((current_prompt_time + 1)) + [ $current_prompt_time -ge $limit_prompt_times ] && { + echo "[ERROR] Exit for Prompt times limit." + exit 3 + } + done +} + +check_passwd_file() { + export ROOT_NEW_PASSWD= + [ -f "$1" ] || { + echo "[INFO] No password file specified and root password kept." + return + } + + export ROOT_NEW_PASSWD=$(cat "$1") +} + +check_container_running() { + local container check_times check_interval cur_times + container=$1 + docker ps -a |grep -q "$container" || { + echo "[ERROR] Container $container do not exist!" + exit 4 + } + + echo "Checking whether container $container is running..." + check_times=5 + check_interval=3 + cur_times=0 + while [ $check_times -gt $cur_times ] + do + docker ps |grep "$container" |grep -iq 'up ' && { + echo "Container $container is running now" + break + } + cur_times=$((cur_times + 1)) + sleep $check_interval + done + + trap ' + [ -f $RUN_DIR/bin/env.list ] && rm -f $RUN_DIR/bin/env.list + docker stop $container >/dev/null + echo "[INFO] Container $container stopped." + ' EXIT +} + +get_rootfs_kernel() { + echo "Finding vmlinuz under $ROOTFS_DIR/boot ..." + cd "$ROOTFS_DIR" || { + echo "Failed to change into dir \"$ROOTFS_DIR\"" + exit 5 + } + local vmlinuz_file vmlinuz kernel + vmlinuz_file=$(find ./boot -name "vmlinuz-*" |grep -v rescue) && export ROOTFS_VMLINUZ_FILE=$vmlinuz_file + vmlinuz=$(basename "$vmlinuz_file") && export ROOTFS_VMLINUZ=$vmlinuz + kernel=${vmlinuz:8} && export ROOTFS_KERNEL=$kernel + + echo + echo "vmlinuz: $ROOTFS_VMLINUZ_FILE" + echo "kernel: $ROOTFS_KERNEL" +} + +create_get_initrd() { + echo "Creating initrd.lkp via container/dracut-initrd..." + cd "$CCI_SRC/container/dracut-initrd" || { + echo "Failed to change into $CCI_SRC/container/dracut-initrd" + exit 6 + } + ./run "$ROOTFS_DIR/lib/modules/$ROOTFS_KERNEL" + + echo "Finding initrd.lkp under $ROOTFS_DIR/boot ..." + cd "$ROOTFS_DIR" || { + echo "Failed to change into dir \"$ROOTFS_DIR\"" + exit 7 + } + local initrd_lkp + initrd_lkp=$(find ./boot -name "initramfs.lkp*") && export ROOTFS_INITRD_LKP=$initrd_lkp + + [ -f "$ROOTFS_INITRD_LKP" ] || { + echo "Failed to generate \"$ROOTFS_INITRD_LKP\"" + exit 8 + } + + echo + echo "initrd_lkp: $ROOTFS_INITRD_LKP" +} + +create_links_vmlinuz_initrd() { + + get_rootfs_kernel + create_get_initrd + + echo "Creating links to initrd.lkp and vmlinuz..." + local cmds + cmds=( + docker exec + "$container" + bash -c " + cd $EXTRACT_ROOT/rootfs-dir + chmod a+r $ROOTFS_INITRD_LKP + ln -sf $ROOTFS_VMLINUZ_FILE vmlinuz + ln -sf $ROOTFS_INITRD_LKP initrd.lkp + " + ) + if "${cmds[@]}" ; then + echo "[INFO] Create links to initrd.lkp and vmlinuz success!" + else + echo "[WARNING] Create links to initrd.lkp and vmlinuz failed!" + fi +} + +get_vmlinuz_start_postion() { + local vmlinuz=$1 + + local vmlinuz_start_line + local vmlinuz_start_postion + # '1f 8b 08' - https://www.filesignatures.net/index.php?page=search&search=1F8B08&mode=SIG + vmlinuz_start_line=$(od -A d -t x1 ${vmlinuz} | grep "1f 8b 08") + vmlinuz_start_postion=${vmlinuz_start_line%% *} + + [ -z ${vmlinuz_start_postion} ] && { + echo "[WARNING] identify vmlinuz failed." + export VMLINUZ_START_POSTION=-1 + return + } + + [ X${vmlinuz_start_postion} == X0000000 ] || { + local vmlinuz_start_postion_tmp + vmlinuz_start_postion_tmp=$(echo ${vmlinuz_start_line} | awk '{for(i=1;i<=NF;i++)if($i=="1f")print i-1}') + vmlinuz_start_postion=$((${vmlinuz_start_postion} + ${vmlinuz_start_postion_tmp} -1)) + } + + export VMLINUZ_START_POSTION=${vmlinuz_start_postion} +} + +unzip_vmlinuz() { + get_rootfs_kernel + + file ${ROOTFS_VMLINUZ_FILE} | grep -q gzip || return + + get_vmlinuz_start_postion ${ROOTFS_VMLINUZ_FILE} + [ $VMLINUZ_START_POSTION -eq -1 ] && return + + local unzip_str + + if [ $VMLINUZ_START_POSTION -eq 0 ]; then + unzip_str="dd if=$ROOTFS_VMLINUZ bs=1 | zcat > $ROOTFS_VMLINUZ.tmp" + else + unzip_str="dd if=$ROOTFS_VMLINUZ bs=1 skip=$VMLINUZ_START_POSTION | zcat > $ROOTFS_VMLINUZ.tmp" + fi + + echo "Unzipping vmlinuz..." + local cmds + cmds=( + docker exec + "$container" + bash -c " + cd $EXTRACT_ROOT/rootfs-dir/boot + $unzip_str + mv $ROOTFS_VMLINUZ.tmp $ROOTFS_VMLINUZ + chmod o+r $ROOTFS_VMLINUZ + " + ) + if "${cmds[@]}" ; then + echo "[INFO] Unzipping vmlinuz success!" + else + echo "[WARNING] Unzipping vmlinuz failed!" + fi +} diff --git a/container/qcow2rootfs/bin/extract b/container/qcow2rootfs/bin/extract new file mode 100755 index 0000000000000000000000000000000000000000..bf2155f5c230c1cba1ee3796dedbc83f9d997608 --- /dev/null +++ b/container/qcow2rootfs/bin/extract @@ -0,0 +1,21 @@ +#!/usr/bin/expect +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# +# This file is called by script main in container +# convert qcow2 file to tar.gz + +set qcow2_file [lindex $argv 0] +set tar_out_file [lindex $argv 1] + +puts "[exec date +'%D_%T'] Entering guest filesystem shell and mount qcow2 file: $qcow2_file" +set timeout 30 +spawn /usr/bin/guestfish -i --ro -a $qcow2_file +expect ">" + +puts "[exec date +'%D_%T'] Compressing file to $tar_out_file" +send "tar-out / $tar_out_file compress:gzip numericowner:true xattrs:true selinux:true acls:true\n" +set timeout 600 +puts "Please wait until command returned..." +expect ">" +send "exit" diff --git a/container/qcow2rootfs/bin/main b/container/qcow2rootfs/bin/main new file mode 100755 index 0000000000000000000000000000000000000000..b70ec776664969c46029035cecf6ea6d8fac8ac8 --- /dev/null +++ b/container/qcow2rootfs/bin/main @@ -0,0 +1,92 @@ +#!/bin/bash -eu +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# +# This file as the main program running in container + +tar_out_file="$ROOTFS_DIR/qcow2-out.tar.gz" + +# store the original user and group of qcow2 file +qcow2_usr=$(ls -nl "$EXTRACT_ROOT" |grep -w qcow2-dir |awk '{print $3}') +qcow2_grp=$(ls -nl "$EXTRACT_ROOT" |grep -w qcow2-dir |awk '{print $4}') + +trap_excepts() { + # trap Ctrl-C to avoid authority problem happening + trap ' + echo "[WARNNING] Detected interrupt, restoring the owner of qcow2 file" + chown $qcow2_usr:$qcow2_grp $QCOW2_FILE + echo "[INFO] User cancelled." + ' SIGINT ERR +} + +unzip_qcow2_xz() { + local qcow2_suffix="${QCOW2_NAME##*.}" + [ "$qcow2_suffix" == 'qcow2' ] || { + echo "Unzip .qcow2.xz file to .qcow2 file..." + ( + cd "$QCOW2_DIR" + /usr/bin/unxz -fk "$QCOW2_NAME" + ) + export QCOW2_NAME="${QCOW2_NAME%.*}" + } +} + +extract_rootfs() { + echo "[WARNNING] Temporarily change owner of qcow2 file" + chown "$EXTRACT_USER":"$EXTRACT_USER" "$QCOW2_FILE" + chmod 775 "$ROOTFS_DIR" + + echo "[$(date +'%D %T')] Converting .qcow2 to .tar.gz, please wait several minutes..." + su - "$EXTRACT_USER" -c " + export LIBGUESTFS_BACKEND=direct + $EXTRACT_ROOT/bin/extract $QCOW2_FILE $tar_out_file + " + + echo "[$(date +'%D %T')] Depressing rootfs to $ROOTFS_DIR" + ([ -f "$tar_out_file" ] && /usr/bin/tar -xf "$tar_out_file" -C "$ROOTFS_DIR") || exit 4 +} + +pre_config_rootfs() { + [ -n "$ROOT_NEW_PASSWD" ] && { + echo "Changing root password" + passwd_md5=$(openssl passwd -1 "$ROOT_NEW_PASSWD") + sed -i -r "s/^root:[^:]*:(.*)/root:${passwd_md5//\//\\/}:\1/" "$ROOTFS_DIR/etc/shadow" + sed -i 's/[# ]PermitRootLogin.*/PermitRootLogin yes/' "$ROOTFS_DIR/etc/ssh/sshd_config" + } + + # no mapping via rootfs + echo "Comment out all lines in \$rootfs/etc/fstab" + sed -i -r 's/^([^#].*)/#\1/' "$ROOTFS_DIR/etc/fstab" + + # private ssh key mode shall be 600 + for key in $ROOTFS_DIR/etc/ssh/ssh*key + do + [ "$ROOTFS_DIR/etc/ssh/ssh*key" == "$key" ] && continue + chmod 600 $key + done + + echo "Pre config authority of rootfs layout" + chmod +w -R "$ROOTFS_DIR" + chmod a+r -R "$ROOTFS_DIR/boot" + + # disable selinux + echo "Disable selinux" + if [ -f ${ROOTFS_DIR}/etc/selinux/config ]; then + sed -i -r 's/SELINUX=enforcing/SELINUX=disabled/g' "$ROOTFS_DIR/etc/selinux/config" + fi +} + +clean_environment() { + echo "[INFO] Program exiting, clean up environment..." + echo "Delete .tar.gz file" + [ -f $tar_out_file ] && rm -f $tar_out_file + + echo "[WARNNING] Restoring the owner of qcow2 file" + chown "$qcow2_usr":"$qcow2_grp" "$QCOW2_FILE" +} + +trap_excepts +unzip_qcow2_xz +extract_rootfs +pre_config_rootfs +clean_environment diff --git a/container/qcow2rootfs/build b/container/qcow2rootfs/build new file mode 100755 index 0000000000000000000000000000000000000000..565de24cb2fe58b474bf0f2f84406beba1dba703 --- /dev/null +++ b/container/qcow2rootfs/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t qcow2rootfs:auto . diff --git a/container/qcow2rootfs/run b/container/qcow2rootfs/run new file mode 100755 index 0000000000000000000000000000000000000000..08b435405fcbd97cc41053f091b5c4b456bfcc4e --- /dev/null +++ b/container/qcow2rootfs/run @@ -0,0 +1,45 @@ +#!/bin/bash -eu +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +source "$(dirname $(realpath $0))/bin/common" + +# password for rootfs under $HOME/.config/compass-ci/rootfs.passwd with mode 700 +# - password will be changed based on specified config file +# - password won't be changed if there's no the same file +# - password based remote login will be disabled +root_pwd_file="$HOME/.config/compass-ci/rootfs.passwd" + +# check and initial host environment variables +check_cmd_input "$@" +check_passwd_file "$root_pwd_file" + +set_env_vars + +start=( + docker run + --rm + --privileged=true + -d + -v $RUN_DIR/bin:$EXTRACT_ROOT/bin + -v $QCOW2_DIR:$EXTRACT_ROOT/qcow2-dir + -v $ROOTFS_DIR:$EXTRACT_ROOT/rootfs-dir + --env-file $RUN_DIR/bin/env.list + qcow2rootfs:auto + /usr/sbin/init +) + +echo "[INFO] Creating qcow2rootfs container..." +container=$("${start[@]}" |awk '{print substr($0,1,10)}') +check_container_running "$container" + +extract=( + docker exec + $container + $EXTRACT_ROOT/bin/main $QCOW2_FILE +) + +"${extract[@]}" + +create_links_vmlinuz_initrd +unzip_vmlinuz diff --git a/container/qemu-efi/Dockerfile b/container/qemu-efi/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e1087c54c558a6d483821b1dadb12ab8de518f9b --- /dev/null +++ b/container/qemu-efi/Dockerfile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian + +ENV DEBIAN_FRONTEND noninteractive + +COPY conf/sources.list* /etc/apt/ +RUN apt-get update && \ + apt-get install -y --no-install-recommends apt-utils > /dev/null 2>&1 && \ + apt-get install -y qemu-efi-aarch64 diff --git a/container/qemu-efi/build b/container/qemu-efi/build new file mode 100755 index 0000000000000000000000000000000000000000..1238e6780b37b3dda033e52e5e499d805817b2a8 --- /dev/null +++ b/container/qemu-efi/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t debian:qemu-efi . diff --git a/container/qemu-efi/conf/sources.list b/container/qemu-efi/conf/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..8179e01cadd258c86414c028f9eb913fa44e58d1 --- /dev/null +++ b/container/qemu-efi/conf/sources.list @@ -0,0 +1,9 @@ +deb [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib + +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib diff --git a/container/qemu-efi/install b/container/qemu-efi/install new file mode 100755 index 0000000000000000000000000000000000000000..96be9aa5c226cf8bcb3799ec2eeca491eed230c3 --- /dev/null +++ b/container/qemu-efi/install @@ -0,0 +1,15 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. '../defconfig.sh' + +cmd=( + docker run + --name qemu-efi + debian:qemu-efi +) + +"${cmd[@]}" +docker cp qemu-efi:/usr/share/qemu-efi-aarch64 /usr/share/qemu-efi-aarch64 +docker_rm qemu-efi diff --git a/container/rabbitmq/start b/container/rabbitmq/start new file mode 100755 index 0000000000000000000000000000000000000000..7e2541736308cf4ba8ce81eca775d35fdb6c8923 --- /dev/null +++ b/container/rabbitmq/start @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm rabbitmq +docker pull rabbitmq:3-management + +cmd=( + docker run + --restart=always + -d + --name rabbitmq + -p 5672:5672 + -p 15672:15672 + -v /etc/localtime:/etc/localtime:ro + rabbitmq:3-management +) + +"${cmd[@]}" + diff --git a/container/redis/Dockerfile b/container/redis/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9dd778272059bbdf2339b84898a993271e2bacf4 --- /dev/null +++ b/container/redis/Dockerfile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk add --no-cache 'redis' + +RUN sed -i 's:dir /var/lib/redis:dir /srv/redis:' /etc/redis.conf; \ + sed -i 's:protected-mode yes:protected-mode no:' /etc/redis.conf; \ + sed -i 's:bind 127.0.0.1:#bind 127.0.0.1:' /etc/redis.conf + +VOLUME /srv/redis +WORKDIR /srv/redis + +EXPOSE 6379 +CMD ["redis-server", "/etc/redis.conf"] + diff --git a/container/redis/build b/container/redis/build new file mode 100755 index 0000000000000000000000000000000000000000..e5230b0aec98a26698fa90896d6746f793b8c374 --- /dev/null +++ b/container/redis/build @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t redis507n:alpine311 . + diff --git a/container/redis/start b/container/redis/start new file mode 100755 index 0000000000000000000000000000000000000000..827ad0b91884c98e7d82bc4fd4a4f660b6244f0e --- /dev/null +++ b/container/redis/start @@ -0,0 +1,18 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. +# WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. +# WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled. + +. $CCI_SRC/container/defconfig.sh + +docker_rm redis507s01 + +docker run --restart=always -d -p 6379:6379 -v /srv/redis/alpine/server01:/srv/redis -v /etc/localtime:/etc/localtime:ro --name redis507s01 redis507n:alpine311 + +# test +echo you can use cmd: redis-cli to start a redis client +echo and then use cmd: keys * + diff --git a/container/registry/build b/container/registry/build new file mode 100644 index 0000000000000000000000000000000000000000..ca8680aa7fbaa955252354c64ea75286a74cdc08 --- /dev/null +++ b/container/registry/build @@ -0,0 +1,5 @@ +#!/bin/bash +# Origin: https://docs.docker.com/registry +# SPDX-License-Identifier: Apache license + +docker pull registry diff --git a/container/registry/config.yml b/container/registry/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..1217593d85190ea5ca810ec8a3a075e40aec7cee --- /dev/null +++ b/container/registry/config.yml @@ -0,0 +1,18 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /srv/cache/registry/ +http: + addr: :5001 + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/container/registry/start b/container/registry/start new file mode 100644 index 0000000000000000000000000000000000000000..c97e9e26f945e9f90e5f20a6cb4256f2a112c461 --- /dev/null +++ b/container/registry/start @@ -0,0 +1,17 @@ +#!/bin/bash +# Origin: https://docs.docker.com/registry +# SPDX-License-Identifier: Apache license + +. $CCI_SRC/container/defconfig.sh + +docker_rm registry + +cmd=( + docker run -dt + --name registry + -p 5001:5001 + -v /srv/cache/registry/:/srv/cache/registry/ + -v $PWD/config.yml:/etc/docker/registry/config.yml + registry +) +"${cmd[@]}" diff --git a/container/remote-git/Dockerfile b/container/remote-git/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..d9ef6bda83a83fb624e2a2002287bed9c92e8c9f --- /dev/null +++ b/container/remote-git/Dockerfile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian + +ENV DEBIAN_FRONTEND noninteractive + +COPY conf/sources.list* /etc/apt/ +RUN apt-get update && \ + apt-get install -y git ruby-json ruby-sinatra + +COPY remote-git.rb /usr/local/bin/ + +CMD ["/usr/local/bin/remote-git.rb"] diff --git a/container/remote-git/build b/container/remote-git/build new file mode 100755 index 0000000000000000000000000000000000000000..dad1a9b71b257c382e5222a0fd5ea236bf67facb --- /dev/null +++ b/container/remote-git/build @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t debian:remote-git . diff --git a/container/remote-git/conf/sources.list b/container/remote-git/conf/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..8179e01cadd258c86414c028f9eb913fa44e58d1 --- /dev/null +++ b/container/remote-git/conf/sources.list @@ -0,0 +1,9 @@ +deb [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib + +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib diff --git a/container/remote-git/remote-git.rb b/container/remote-git/remote-git.rb new file mode 100755 index 0000000000000000000000000000000000000000..085f4db28399d104f0330dbd18361072d1121317 --- /dev/null +++ b/container/remote-git/remote-git.rb @@ -0,0 +1,70 @@ +#!/usr/bin/ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'sinatra' +require 'json' +require 'open3' + +set :bind, '0.0.0.0' +set :port, 8100 + +GIT = '/srv/git' +ILLEGAL_SHELL_CHAR = %w[& $].freeze + +post '/git_command' do + request.body.rewind + begin + data = JSON.parse request.body.read + rescue JSON::ParserError + return [400, headers.update({ 'errcode' => '100', 'errmsg' => 'parse json error' }), ''] + end + puts '-' * 50 + puts 'post body:', data + + begin + # check if the parameters are complete + check_params_complete(data) + # check whether the git_command parameter meets the requirements + check_git_params(data['git_command']) + # check if git_command contains illegal characters + check_illegal_char(data['git_command']) + # check if git repository exists + repo_path = File.join(GIT, data['git_repo']) + raise JSON.dump({ 'errcode' => '200', 'errmsg' => 'repository not exists' }) unless File.exist?(repo_path) + rescue StandardError => e + puts 'error message: ', e.message + return [400, headers.update(JSON.parse(e.message)), ''] + end + + # execute git command + Dir.chdir(repo_path) + _stdin, stdout, _stderr, wait_thr = Open3.popen3(*data['git_command']) + out = stdout.read.force_encoding('ISO-8859-1').encode('UTF-8') + exit_code = wait_thr.value.to_i + + [200, headers.update({ 'errcode' => '0', 'exit_code' => exit_code.to_s }), out] +end + +def check_git_params(git_command) + raise JSON.dump({ 'errcode' => '104', 'errmsg' => 'git_command params type error' }) if git_command.class != Array + raise JSON.dump({ 'errcode' => '105', 'errmsg' => 'git_command length error' }) if git_command.length < 2 + raise JSON.dump({ 'errcode' => '107', 'errmsg' => 'not git-* command' }) unless git_command[0].start_with? 'git-' + + git_command[0] = "/usr/lib/git-core/#{git_command[0]}" + return nil +end + +def check_params_complete(params) + raise JSON.dump({ 'errcode' => '101', 'errmsg' => 'no git_repo params' }) unless params.key?('git_repo') + raise JSON.dump({ 'errcode' => '102', 'errmsg' => 'no git_command params' }) unless params.key?('git_command') +end + +def check_illegal_char(git_command) + detected_string = git_command.join + ILLEGAL_SHELL_CHAR.each do |char| + raise JSON.dump({ 'errcode' => '108', 'errmsg' => 'git_command params illegal' }) if detected_string.include?(char) + end + nil +end diff --git a/container/remote-git/start b/container/remote-git/start new file mode 100755 index 0000000000000000000000000000000000000000..e19be8989597e3753d46d656c498aa500f7e90ca --- /dev/null +++ b/container/remote-git/start @@ -0,0 +1,26 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm remote-git + +cmd=( + docker run + --restart=always + --name remote-git + -u nobody + -d + -p 8100:8100 + -v /srv/git:/srv/git + -v /etc/localtime:/etc/localtime:ro + debian:remote-git +) + +"${cmd[@]}" + +cat < + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN umask 002 && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + gem install rest-client activesupport bigdecimal open3 && \ + gem install git json yaml && \ + gem install cucumber + +WORKDIR /usr/share/crystal/app +COPY --from=alpine:crystal-shards /usr/share/crystal/app /usr/share/crystal/app + +CMD ["bash"] + diff --git a/container/scheduler-dev/build b/container/scheduler-dev/build new file mode 100755 index 0000000000000000000000000000000000000000..1e4c57ece2431fdc797938e62ee6cfbb641a94cf --- /dev/null +++ b/container/scheduler-dev/build @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t alpine:scheduler-dev . diff --git a/container/scheduler-dev/build-depends b/container/scheduler-dev/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..f243ee9744e1c84ecd32ae61d9147055a3730533 --- /dev/null +++ b/container/scheduler-dev/build-depends @@ -0,0 +1,2 @@ +crystal-base +crystal-shards diff --git a/container/scheduler-https/build b/container/scheduler-https/build new file mode 100755 index 0000000000000000000000000000000000000000..8442c50786b80ffa332ddc066550d9571b839e27 --- /dev/null +++ b/container/scheduler-https/build @@ -0,0 +1,7 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +# nothing to do +# just basic container struct "build file" diff --git a/container/scheduler-https/build-depends b/container/scheduler-https/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..17e0b594c4814433b9db8191c429e254fa1e6938 --- /dev/null +++ b/container/scheduler-https/build-depends @@ -0,0 +1 @@ +scheduler diff --git a/container/scheduler-https/start b/container/scheduler-https/start new file mode 100755 index 0000000000000000000000000000000000000000..6b06769c5bb3ea9e7046a0efd70051725516f65b --- /dev/null +++ b/container/scheduler-https/start @@ -0,0 +1,72 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + SCHED_HOST + SCHED_PORT + SCHED_HTTPS_HOST + SCHED_HTTPS_PORT + REDIS_HOST + REDIS_PORT + ES_HOST + ES_PORT + OS_HTTP_HOST + OS_HTTP_PORT + INITRD_HTTP_HOST + INITRD_HTTP_PORT + FLUENTD_HOST + FLUENTD_PORT + TASKQUEUE_PORT + TASKQUEUE_HOST +] + +defaults = relevant_defaults(names) +env = docker_env(defaults) + +DEFAULT_LKP = '/c/lkp-tests' +DEFAULT_CCI = '/c/compass-ci' +SCHED_PORT = defaults['SCHED_PORT'] || '3000' +SCHED_HOST = defaults['SCHED_HOST'] || '172.17.0.1' +SCHED_HTTPS_PORT = defaults['SCHED_HTTPS_PORT'] || 11306 +SCHED_HTTPS_HOST = defaults['SCHED_HTTPS_HOST'] || 'api.compass-ci.openeuler.org' +FLUENTD_HOST = defaults['FLUENTD_HOST'] || '172.17.0.1' +FLUENTD_PORT = defaults['FLUENTD_PORT'] || '24224' + +docker_rm "s001-alpine-#{SCHED_HTTPS_PORT}" + +cmd = %W[ + docker run + --restart=always + --name s001-alpine-#{SCHED_HTTPS_PORT} + -d + -u 1090:1090 + -p #{SCHED_HTTPS_PORT}:#{SCHED_PORT} +] + env + %W[ + -e LKP_SRC=#{DEFAULT_LKP} + -e CCI_SRC=#{DEFAULT_CCI} + -e SCHED_HOST=#{SCHED_HOST} + -e SCHED_PORT=#{SCHED_PORT} + -v #{ENV['LKP_SRC']}:#{DEFAULT_LKP} + -v #{ENV['CCI_SRC']}:#{DEFAULT_CCI} + -v /srv/cci/scheduler/alpine:/srv/cci/scheduler + -v /etc/localtime:/etc/localtime:ro + -v /srv/result:/srv/result + -v /srv/initrd:/srv/initrd:ro + -v /srv/os:/srv/os:ro + -v /etc/compass-ci/ca:/ca + --log-driver=fluentd + --log-opt fluentd-address=#{FLUENTD_HOST}:#{FLUENTD_PORT} + --log-opt mode=non-blocking + --log-opt max-buffer-size=4m + --log-opt tag=scheduler-#{SCHED_HTTPS_PORT} + sch-ruby-a:v0.00d-#{SCHED_PORT} +] + +cmd += ['sh', '-c', 'umask 002 && ./scheduler --ssl --ssl-key-file /ca/server.key --ssl-cert-file /ca/server.crt'] + +system(*cmd) diff --git a/container/scheduler/Dockerfile b/container/scheduler/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..edac026d0f3c74e7ec2fb2576bd139482011d6ce --- /dev/null +++ b/container/scheduler/Dockerfile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk update + +RUN apk add --no-cache 'ruby-dev' \ + 'g++' 'gcc' 'pcre' 'libevent' 'make' 'git' 'cpio' 'bash' + +RUN umask 002 && \ + echo ':sources: ["http://rubygems.org"]' >> ~/.gemrc && \ + gem install rest-client && \ + gem install activesupport && \ + gem install git && \ + gem install json && \ + gem install yaml + + +ENV SCHED_RUNTIME_DIR /c/cci/scheduler + +RUN mkdir -p $SCHED_RUNTIME_DIR && \ + chown -R 1090:1090 /c + +WORKDIR $SCHED_RUNTIME_DIR + +COPY --chown=1090:1090 scheduler . +COPY --chown=1090:1090 create-job-cpio.sh . diff --git a/container/scheduler/build b/container/scheduler/build new file mode 100755 index 0000000000000000000000000000000000000000..367aa4e2512d773b02df3302da785ca88f964c1c --- /dev/null +++ b/container/scheduler/build @@ -0,0 +1,21 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new ['SCHED_PORT'] +defaults = relevant_defaults(names) + +SCHED_PORT = (defaults['SCHED_PORT'] || '3000') + +CCI_SRC = ENV['CCI_SRC'] +system "bash #{CCI_SRC}/container/compile ." +system "cp #{CCI_SRC}/sbin/create-job-cpio.sh ." + +system "docker build -t sch-ruby-a:v0.00d-#{SCHED_PORT} ." + +system 'rm scheduler' +system 'rm create-job-cpio.sh' diff --git a/container/scheduler/build-depends b/container/scheduler/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..5dd403e166afc1ed9b15b32328320e82db6f5155 --- /dev/null +++ b/container/scheduler/build-depends @@ -0,0 +1 @@ +scheduler-dev diff --git a/container/scheduler/start b/container/scheduler/start new file mode 100755 index 0000000000000000000000000000000000000000..326b65be76e96922954dbed8df818313af43b1c0 --- /dev/null +++ b/container/scheduler/start @@ -0,0 +1,71 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + SCHED_HOST + SCHED_PORT + REDIS_HOST + REDIS_PORT + ES_HOST + ES_PORT + OS_HTTP_HOST + OS_HTTP_PORT + INITRD_HTTP_HOST + INITRD_HTTP_PORT + FLUENTD_HOST + FLUENTD_PORT + TASKQUEUE_PORT + TASKQUEUE_HOST + lab +] + +defaults = relevant_defaults(names) +env = docker_env(defaults) + +CCI_REPOS = ENV['CCI_REPOS'] || '/c' +DEFAULT_LKP = "#{CCI_REPOS}/lkp-tests" +DEFAULT_CCI = "#{CCI_REPOS}/compass-ci" +LAB = defaults['lab'] || 'z9' +SCHED_PORT = defaults['SCHED_PORT'] || '3000' +SCHED_HOST = defaults['SCHED_HOST'] || '172.17.0.1' +FLUENTD_HOST = defaults['FLUENTD_HOST'] || '172.17.0.1' +FLUENTD_PORT = defaults['FLUENTD_PORT'] || '24224' +docker_rm "s001-alpine-#{SCHED_PORT}" + +cmd = %W[ + docker run + --restart=always + --name s001-alpine-#{SCHED_PORT} + -d + -u 1090:1090 + -p #{SCHED_PORT}:#{SCHED_PORT} +] + env + %W[ + -e CCI_REPOS=#{CCI_REPOS} + -e LKP_SRC=#{DEFAULT_LKP} + -e CCI_SRC=#{DEFAULT_CCI} + -e SCHED_HOST=#{SCHED_HOST} + -e SCHED_PORT=#{SCHED_PORT} + -v #{ENV['LKP_SRC']}:#{DEFAULT_LKP} + -v #{ENV['CCI_SRC']}:#{DEFAULT_CCI} + -v #{CCI_REPOS}/lab-#{LAB}:#{CCI_REPOS}/lab-#{LAB} + -v /srv/cci/scheduler/alpine:/srv/cci/scheduler + -v /etc/localtime:/etc/localtime:ro + -v /srv/result:/srv/result + -v /srv/initrd:/srv/initrd:ro + -v /srv/os:/srv/os:ro + --log-driver=fluentd + --log-opt fluentd-address=#{FLUENTD_HOST}:#{FLUENTD_PORT} + --log-opt mode=non-blocking + --log-opt max-buffer-size=4m + --log-opt tag=scheduler-#{SCHED_PORT} + sch-ruby-a:v0.00d-#{SCHED_PORT} +] + +cmd += ['sh', '-c', 'umask 002 && ./scheduler'] + +system(*cmd) diff --git a/container/scheduler/start-depends b/container/scheduler/start-depends new file mode 100755 index 0000000000000000000000000000000000000000..3e28dc1875614356b4999ec8a17e2d15cd5859aa --- /dev/null +++ b/container/scheduler/start-depends @@ -0,0 +1,4 @@ +redis +fluentd +es +taskqueue diff --git a/container/send-mail/Dockerfile b/container/send-mail/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..36b145c48ee965034a4677c8dc047f7aa6e06d51 --- /dev/null +++ b/container/send-mail/Dockerfile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian +MAINTAINER luanshd0525@163.com +ENV DEBIAN_FRONTEND noninteractive + +COPY conf/sources.list* /etc/apt/ +RUN apt-get update && \ + apt-get install -y ruby-mail ruby-json ruby-sinatra && \ + ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + echo 'Asia/Shanghai' > /etc/timezone + +EXPOSE 25 + +COPY mail-post.rb /usr/local/bin/ +COPY send-mail.rb /usr/local/bin/ + +CMD ["/usr/local/bin/mail-post.rb"] diff --git a/container/send-mail/build b/container/send-mail/build new file mode 100755 index 0000000000000000000000000000000000000000..9159641a8509c4d7c248798a482c643ab5d6d058 --- /dev/null +++ b/container/send-mail/build @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t send-mail:latest . diff --git a/container/send-mail/conf/sources.list b/container/send-mail/conf/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..8179e01cadd258c86414c028f9eb913fa44e58d1 --- /dev/null +++ b/container/send-mail/conf/sources.list @@ -0,0 +1,9 @@ +deb [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib + +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib diff --git a/container/send-mail/mail-post.rb b/container/send-mail/mail-post.rb new file mode 100755 index 0000000000000000000000000000000000000000..4eecf6ec62b2827b550d916773bd54f6c62c64b8 --- /dev/null +++ b/container/send-mail/mail-post.rb @@ -0,0 +1,48 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'sinatra' +require 'json' +require 'yaml' +require 'open3' +require_relative 'send-mail.rb' + +set :bind, '0.0.0.0' +set :port, 11311 + +post '/send_mail_yaml' do + data = YAML.safe_load request.body.read + raise TypeError, data, 'request data type error' unless data.class.eql? Hash + + mail_info = { + 'references' => data['references'] || '', + 'from' => data['from'] || 'team@crystal.ci', + 'subject' => data['subject'], + 'to' => data['to'], + 'body' => data['body'] + } + check_send_mail(mail_info) +end + +post '/send_mail_text' do + data = Mail.read_from_string(request.body.read) + + mail_info = { + 'references' => data.references || '', + 'from' => data.from || 'team@crystal.ci', + 'subject' => data.subject, + 'to' => data.to, + 'body' => data.body.decoded + } + check_send_mail(mail_info) +end + +def check_send_mail(mail_info) + raise TypeError, data, 'empty subject.' if mail_info['subject'].empty? + raise TypeError, data, 'empty email address.' if mail_info['to'].empty? + raise TypeError, data, 'empty email content.' if mail_info['body'].empty? + + send_mail(mail_info) +end diff --git a/container/send-mail/send-mail.rb b/container/send-mail/send-mail.rb new file mode 100755 index 0000000000000000000000000000000000000000..f459e787761258e784becca3f60afe2585838763 --- /dev/null +++ b/container/send-mail/send-mail.rb @@ -0,0 +1,28 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'mail' + +mail_server = `/sbin/ip route |awk '/default/ {print $3}'`.chomp + +# setup smtp config +smtp = { + address: mail_server, + enable_starttls_auto: false +} + +Mail.defaults { delivery_method :smtp, smtp } + +# send mail +def send_mail(mail_info) + mail = Mail.new do + references mail_info['references'] + from mail_info['from'] + subject mail_info['subject'] + to mail_info['to'] + body mail_info['body'] + end + mail.deliver! +end diff --git a/container/send-mail/start b/container/send-mail/start new file mode 100755 index 0000000000000000000000000000000000000000..80be86ac7134e2c1346c739324857a31ece64783 --- /dev/null +++ b/container/send-mail/start @@ -0,0 +1,60 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh + +docker_rm send-mail + +cmd=( + docker run + --restart=always + --name=send-mail + -u nobody + -d + -p 11311:11311 + -v /etc/localtime:/etc/localtime:ro + send-mail +) + +"${cmd[@]}" + +cat</dev/null; then + # debian only created sudo group + # archlinux has neither sudo no wheel group + groupadd --system wheel + else + # alpine already has wheel group, so won't go here + addgroup -S wheel + fi +fi + +# fix warnings in archlinux: no users group +sed -i '/GROUP=users/d' /etc/default/useradd +# everyone can sudo in docker testbed +echo 'GROUP=wheel' >> /etc/default/useradd + +# ssh authorized_keys +[ -n "$SSH_KEYS" ] || exit 0 + +for user in ${TEAM//,/ } +do + uid=$(awk -F: "/^$user:/ { print \$3}" /opt/passwd) + gid=$(awk -F: "/^$user:/ { print \$4}" /opt/passwd) + if command -v useradd >/dev/null; then + # debian + groupadd --gid $gid $user + useradd --create-home --shell /bin/zsh -u $uid -g $gid $user + passwd --lock $user + else + # alpine busybox + addgroup -g $gid $user + adduser -D -s /bin/zsh -k /etc/skel -u $uid $user + adduser $user wheel + passwd -u $user # necessary for ssh login + fi + mkdir -p /home/$user/.ssh + echo "$SSH_KEYS" | grep " $user@" > /home/$user/.ssh/authorized_keys + chown -R $user.$user /home/$user/.ssh +done + +# setup root +cp -a /mnt/skel/.??* /root/ +passwd --lock root +chsh -s /bin/zsh +mkdir -p /root/.ssh +echo "$SSH_KEYS" | grep -E " (${COMMITTERS//,/|})@" > /root/.ssh/authorized_keys + +echo "$SSH_KEYS" > /home/team/.ssh/authorized_keys + +if [ -d /etc/sudoers.d ]; then + echo "%wheel ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/wheel-nopasswd + + # https://github.com/sudo-project/sudo/issues/42 + sudo --version | grep -q -F 1.8 && + echo "Set disable_coredump false" >> /etc/sudo.conf +fi + +chmod -R go-rwxs /root/.ssh /home/*/ diff --git a/container/shellcheck/Dockerfile b/container/shellcheck/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..b78beb75686e2503c7d4fd8dd8dc87d04c81e8cc --- /dev/null +++ b/container/shellcheck/Dockerfile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM debian + +MAINTAINER 18509160991@163.com + +ENV DEBIAN_FRONTEND noninteractive + +COPY conf/sources.list* /etc/apt/ +RUN apt-get update && \ + apt-get install -y shellcheck + +VOLUME /shellbox +WORKDIR /shellbox + +CMD ["/bin/bash"] diff --git a/container/shellcheck/README.md b/container/shellcheck/README.md new file mode 100644 index 0000000000000000000000000000000000000000..61ecac235987cb6c3b784db2dd87299e3e59daea --- /dev/null +++ b/container/shellcheck/README.md @@ -0,0 +1,11 @@ +# Run shellcheck in the container. +# Run "shellchell $script" on the host machine to check the syntax in the shell script. +1.If this is the first time, please check whether there is a shellcheck container image. + docker images |grep shellcheck + If the image does not exist, run: + ./build +2.After creating the image ,please run: + ./run $script + or + ./shellcheck $script + diff --git a/container/shellcheck/build b/container/shellcheck/build new file mode 100755 index 0000000000000000000000000000000000000000..854bae63413f58643513cf1e56b882eae52ae235 --- /dev/null +++ b/container/shellcheck/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t shellcheck . diff --git a/container/shellcheck/conf/sources.list b/container/shellcheck/conf/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..8179e01cadd258c86414c028f9eb913fa44e58d1 --- /dev/null +++ b/container/shellcheck/conf/sources.list @@ -0,0 +1,9 @@ +deb [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib + +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-updates main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian/ buster-backports main non-free contrib +deb-src [trusted=yes] http://mirrors.163.com/debian-security/ buster/updates main non-free contrib diff --git a/container/shellcheck/run b/container/shellcheck/run new file mode 100755 index 0000000000000000000000000000000000000000..7d322b3ebdcbdb06ce1e2a329b19f12ae929e94c --- /dev/null +++ b/container/shellcheck/run @@ -0,0 +1,14 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +cmd=( + docker run + --rm + -v $PWD:/shellbox + shellcheck + shellcheck "$@" +) + +"${cmd[@]}" + diff --git a/container/srv-http/Dockerfile b/container/srv-http/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8a3bdcb6c8f000bbe874797e75c11fc8f755a5b3 --- /dev/null +++ b/container/srv-http/Dockerfile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM nginx:alpine +ADD root / diff --git a/container/srv-http/build b/container/srv-http/build new file mode 100755 index 0000000000000000000000000000000000000000..3a500ea0092dca7aac253395aba79520ee6c8c78 --- /dev/null +++ b/container/srv-http/build @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker build -t srv-http . diff --git a/container/srv-http/root/etc/nginx/conf.d/default.conf b/container/srv-http/root/etc/nginx/conf.d/default.conf new file mode 100644 index 0000000000000000000000000000000000000000..cec0d70651042d30e4168ed78f58e900effa7645 --- /dev/null +++ b/container/srv-http/root/etc/nginx/conf.d/default.conf @@ -0,0 +1,19 @@ +server { + listen 11300; + server_name "srv-http"; + server_tokens off; + + root /usr/share/nginx/html; + index index.html; + + location /favicon.ico { + log_not_found off; + } + + location / { + autoindex on; + autoindex_localtime on; + allow all; + root /srv; + } +} diff --git a/container/srv-http/start b/container/srv-http/start new file mode 100755 index 0000000000000000000000000000000000000000..2b5bd1dd6399744d39ffbd192bb84df4fe775d18 --- /dev/null +++ b/container/srv-http/start @@ -0,0 +1,20 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $CCI_SRC/container/defconfig.sh +docker_rm srv-http + +cmd=( + docker run + --restart=always + --name srv-http + -p 11300:11300 + -v /srv/pub:/srv/pub:ro + -v /etc/localtime:/etc/localtime:ro + -v /srv/result:/srv/result:ro + -d + srv-http +) + +"${cmd[@]}" diff --git a/container/ssh-r/Dockerfile b/container/ssh-r/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1c10e03b7afdb5648cf013bfe696faf4fdcbf294 --- /dev/null +++ b/container/ssh-r/Dockerfile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:latest + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories +RUN apk update + +RUN umask 002 \ + && adduser sshr -u 1090 -g 1090 -D \ + && mkdir -p /home/sshr/.ssh \ + && chown -R sshr:sshr /home/sshr \ + && chmod 700 /home/sshr/.ssh + +RUN apk add openssh augeas \ + && rm -fr /var/cache/apk/* + +COPY known_auth_keys /home/sshr/.ssh/authorized_keys + +RUN chown -R sshr:sshr /home/sshr/.ssh/authorized_keys \ + && chmod 600 /home/sshr/.ssh/authorized_keys \ + && passwd -u sshr + +EXPOSE 20000-22000 +EXPOSE 22 + +COPY entry.sh /entry.sh +ENTRYPOINT ["/entry.sh"] + +CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config"] diff --git a/container/ssh-r/build b/container/ssh-r/build new file mode 100755 index 0000000000000000000000000000000000000000..4bec5439c0a6491e32d0cc67163263dc8b5c8fa0 --- /dev/null +++ b/container/ssh-r/build @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ ! -f "known_auth_keys" ]; then + echo "-----> ERROR: no testbox rsa public key file 'known_auth_keys' at currrent DIR" + exit 1 +fi +docker build -t ssh-r:0.001 . diff --git a/container/ssh-r/entry.sh b/container/ssh-r/entry.sh new file mode 100755 index 0000000000000000000000000000000000000000..723765b72a9fd90970f4f35d2cf091eb7007cc9f --- /dev/null +++ b/container/ssh-r/entry.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# Below script mainly borrow from https://github.com/panubo/docker-sshd.git +# It use MIT LICENSE + +set -e +DAEMON=sshd +echo "> Starting SSHD" + +set_hostkeys() { + printf '%s\n' \ + 'set /files/etc/ssh/sshd_config/HostKey[1] /etc/ssh/keys/ssh_host_rsa_key' \ + 'set /files/etc/ssh/sshd_config/HostKey[2] /etc/ssh/keys/ssh_host_dsa_key' \ + 'set /files/etc/ssh/sshd_config/HostKey[3] /etc/ssh/keys/ssh_host_ecdsa_key' \ + 'set /files/etc/ssh/sshd_config/HostKey[4] /etc/ssh/keys/ssh_host_ed25519_key' \ + | augtool -s 1> /dev/null +} + +print_fingerprints() { + local BASE_DIR=${1-'/etc/ssh'} + for item in dsa rsa ecdsa ed25519; do + echo ">>> Fingerprints for ${item} host key" + ssh-keygen -E md5 -lf ${BASE_DIR}/ssh_host_${item}_key + ssh-keygen -E sha256 -lf ${BASE_DIR}/ssh_host_${item}_key + ssh-keygen -E sha512 -lf ${BASE_DIR}/ssh_host_${item}_key + done +} + +# Generate Host keys, if required +if ls /etc/ssh/keys/ssh_host_* 1> /dev/null 2>&1; then + echo ">> Found host keys in keys directory" + set_hostkeys + print_fingerprints /etc/ssh/keys +elif ls /etc/ssh/ssh_host_* 1> /dev/null 2>&1; then + echo ">> Found Host keys in default location" + # Don't do anything + print_fingerprints +else + echo ">> Generating new host keys" + mkdir -p /etc/ssh/keys + ssh-keygen -A + mv /etc/ssh/ssh_host_* /etc/ssh/keys/ + set_hostkeys + print_fingerprints /etc/ssh/keys +fi + +configure_ssh_options() { + # Enable AllowTcpForwarding + if [[ "${TCP_FORWARDING}" == "true" ]]; then + echo 'set /files/etc/ssh/sshd_config/AllowTcpForwarding yes' | augtool -s 1> /dev/null + fi + # Enable GatewayPorts + if [[ "${GATEWAY_PORTS}" == "true" ]]; then + echo 'set /files/etc/ssh/sshd_config/GatewayPorts yes' | augtool -s 1> /dev/null + fi + # Disable SFTP + if [[ "${DISABLE_SFTP}" == "true" ]]; then + printf '%s\n' \ + 'rm /files/etc/ssh/sshd_config/Subsystem/sftp' \ + 'rm /files/etc/ssh/sshd_config/Subsystem' \ + | augtool -s 1> /dev/null + fi +} + +configure_ssh_options + +# Enable PubkeyAuthentication +echo 'set /files/etc/ssh/sshd_config/PubkeyAuthentication yes' | augtool -s 1> /dev/null + +stop() { + echo "Received SIGINT or SIGTERM. Shutting down $DAEMON" + # Get PID + local pid=$(cat /var/run/$DAEMON/$DAEMON.pid) + # Set TERM + kill -SIGTERM "${pid}" + # Wait for exit + wait "${pid}" + # All done. + echo "Done." +} + +echo "Running $@" +if [ "$(basename $1)" == "$DAEMON" ]; then + trap stop SIGINT SIGTERM + $@ & + pid="$!" + mkdir -p /var/run/$DAEMON && echo "${pid}" > /var/run/$DAEMON/$DAEMON.pid + wait "${pid}" + exit $? +else + exec "$@" +fi diff --git a/container/ssh-r/known_auth_keys b/container/ssh-r/known_auth_keys new file mode 100644 index 0000000000000000000000000000000000000000..c4ca49edc6351f819e3608bfdf85aae13685716b --- /dev/null +++ b/container/ssh-r/known_auth_keys @@ -0,0 +1,8 @@ +# compass-ci developer's public keys +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/r/hkWzaR8Tocyj5xnsDltBnyPCdxvq0oYZCr2nE8eqE9IHKYNwkjSUSVb+4rA2H2hABlxQLsiDOerYOa49uvgcVhK18Ib++e0/PBCfGoUUtqmhZB1oAM11lmCXhCzksKyXG1WRpbCp3xSXOi7dSab6Htnw3esD8SJC8KgNJ05iJxLxd2CCX9CaSydjq/J91D8O/lhU3u8dHmhI0cq++0V1zz8jZXW2saFbw+jvgJXp1+iJHaK781KHB3t/6qKepeX8QYZ6C2IJSkhduN2VcFOml/4ofsIkyQ5AW+KD+I4PH/NNotVmK5vY0UXjvjHCTInpNq8zwRyvuKWivfp42VlVhMYwQwrRgIqdyi4Q4klEAvlgyDqJcajsRoGiQwIoA6R8hW8VMDAZB2ZpQiajT0fG2Rgb6X4LzYtQhUWrFkoGkzAbFwCUAvhFKOGbcQOYY+2XsP7qwyheUqxCiXCwn1l8Z3MoD1ffWkh3ubStraO5VSOyTOxKQ8NO4yBbl6Yfn1WGHdX5J8nOdJ9sjkGMeBq395v8gj9HXrpcKZIp33e55Dq+tPyRXLPxq7uZ+Iqvl7Qz0EwUYUZaN5YSVpyJWpsNHv37ci+c60f+YvEM2tQePKKpnzZ784/y6rM46uW9oyyimG7tIwI5ryF4xwNBn72pwqAxKZh/I6N8/YOQByZw== chief@crystal.ci + +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxHMGr1ISS93P8law0v9Os3u0/M93RDX5jKt5UmhRLRbHr48J2SGElJaSjMrIHTrTa6vF8uVN2tkYnBuc5Bzm8FS3//eq4XTlQq66Y9lzcRkZ+jP9X3k2Y9yA1dqWCgf6KlOYGlyErKF29YxuFQ4aG+XGyu14Ca1nqfpmSGL087bGhvCNZr/CNk9lNhwdWpIr2mHnJJkUdgPy1vNve/41DEJIhd84vYrU6L8DuDIs2o++XHh4vVMx9LKkfG0obE66fE47kazR3Kk0LImJi9EutyATWamuGdH5i9ztpzBG68xMkTImpFqSwMV1LUJszLftRAbr5Zu1BB77iwdp2sODd luanshd@crystal.ci + +# compass-ci testbox public keys +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDPcr7MB7+FFE9N2w3z51O5vjs8xBr1R1rJ81EW/aKqvlYWx6yZcHuQUQxWkhuSfgVqmLslPXrVZR1Hwdg+RS5hKtiO1Nk8cawh9YHB/XDN3IgvDbJlr9+esTPoxu7tDPiBFixTYEcYVhEFktlUxc7IN42+6ecBJqU1yzxB7sWGNyd+iRfxNMvNO1xtzdqK8M6e2yLbXeUhvC01nvydDPtdSkL/p4y2UX+743Ez823jZmbLqVahDFL8590xIrXkt9NUst02W63CUh0zk7Rt5PobAebyTY5+kaYEtsCIXBiIvyP4HtCij5kMyYwZwMuRaPJl0p6NbaG6IL+1fXEGZgLoVwb1p+0akosQ3AgXfl1vc0pDe0X47TRXztHnEjdswLSvNPdgUNM4q27kJ5TV8s1Ny3PTWhIK7wRlbSrVoueH0uvVU4Pla13mlPyYY2JWkPAR9NBJWxhuePD9Gq+0wxyOaB1v+dXue6ewcXAJKnsLjCEKH9v3LA93WCNHZNZb79jAON+MQJmHHFBDGRuRbwKjCeKm7hDVbgi/EY3+7tJ+iNeRgS9gE3PgaDtkpBZFN2saW+fK22IrqO5m1S3VYDitJNhePqRwueVWnfXVSqfzB3HL0eFG1cva2yB/62dEr8J4+4N2i3xW1RAgDwSmnu+OMzVdEKHqAWXAmsS3KmWOMw== sshr + diff --git a/container/ssh-r/start b/container/ssh-r/start new file mode 100755 index 0000000000000000000000000000000000000000..dccd694e30a785143e6850ecf622b0b4d333c370 --- /dev/null +++ b/container/ssh-r/start @@ -0,0 +1,23 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +vm_name=ssh_r + +docker rm ${vm_name} + +cmd=( + docker run + --restart=always + --name ${vm_name} + -e GATEWAY_PORTS=true + -e TCP_FORWARDING=true + -d + -p 20000-22000:20000-22000 + -p 5050:22 + -v /etc/localtime:/etc/localtime:ro + -v /srv/pub/sshr/keys/:/etc/ssh/keys + ssh-r:0.001 +) + +"${cmd[@]}" diff --git a/container/sub-fluentd/Dockerfile b/container/sub-fluentd/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..192ece3ebb15322c1cda461d5a982549e16cc175 --- /dev/null +++ b/container/sub-fluentd/Dockerfile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM fluentd-base:alpine + +COPY --chown=1090:1090 docker-fluentd.conf /fluentd/etc/docker-fluentd.conf diff --git a/container/sub-fluentd/build b/container/sub-fluentd/build new file mode 100755 index 0000000000000000000000000000000000000000..797cd0ddd325d8b850fcecf4221fef2b4960a162 --- /dev/null +++ b/container/sub-fluentd/build @@ -0,0 +1,6 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +system 'docker build -t sub-fluentd:alpine .' diff --git a/container/sub-fluentd/docker-fluentd.conf b/container/sub-fluentd/docker-fluentd.conf new file mode 100644 index 0000000000000000000000000000000000000000..24ad2e6a96075db443539255419d40a51e2433a5 --- /dev/null +++ b/container/sub-fluentd/docker-fluentd.conf @@ -0,0 +1,28 @@ + + @type tail + path /srv/cci/serial/logs/* + pos_file /srv/cci/serial/fluentd-pos/serial.log.pos + tag serial.* + path_key serial_path + refresh_interval 1s + + @type none + + + + + @type forward + compress gzip + keepalive true + + name myserver + host "#{ENV['FLUENTD_SERVER_HOST']}" + port "#{ENV['FLUENTD_SERVER_PORT']}" + + + chunk_limit_size 1M + total_limit_size 100M + flush_interval 1s + overflow_action block + + diff --git a/container/sub-fluentd/start b/container/sub-fluentd/start new file mode 100755 index 0000000000000000000000000000000000000000..fa64dba12f8a34642e43c8855be34bcbb177174e --- /dev/null +++ b/container/sub-fluentd/start @@ -0,0 +1,40 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + FLUENTD_SERVER_HOST + FLUENTD_SERVER_PORT +] + +defaults = relevant_defaults(names) +FLUENTD_SERVER_PORT = defaults['FLUENTD_SERVER_PORT'] || '24224' +raise 'must config FLUENTD_SERVER_HOST' unless defaults['FLUENTD_SERVER_HOST'] + +docker_rm 'sub-fluentd' + +cmd = %W[ + docker run + --restart=always + --name sub-fluentd + -v /etc/localtime:/etc/localtime:ro + -v /srv/cci/serial/logs:/srv/cci/serial/logs:ro + -v /srv/cci/serial/fluentd-pos:/srv/cci/serial/fluentd-pos + -d + -u 1090:1090 + -p 24224:24224/tcp + -p 24224:24224/udp + -e FLUENTD_CONF=docker-fluentd.conf + -e FLUENTD_SERVER_HOST=#{defaults['FLUENTD_SERVER_HOST']} + -e FLUENTD_SERVER_PORT=#{FLUENTD_SERVER_PORT} + --log-driver json-file + --log-opt max-size=1g + sub-fluentd:alpine +] +cmd += ['sh', '-c', 'umask 002 & fluentd -c /fluentd/etc/docker-fluentd.conf'] + +system(*cmd) diff --git a/container/taskqueue/Dockerfile b/container/taskqueue/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..28ac925d16fbaca7715f4d4046aef99937202cf1 --- /dev/null +++ b/container/taskqueue/Dockerfile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +FROM alpine:3.11 + +RUN sed -ri.origin 's|^https?://dl-cdn.alpinelinux.org|http://mirrors.huaweicloud.com|g' /etc/apk/repositories + +RUN apk add --no-cache 'gcc' 'pcre' 'libevent' 'bash' + +ENV RUNTIME_DIR /c/cci/taskqueue + +RUN mkdir -p $RUNTIME_DIR && \ + chown -R 1090:1090 /c + +WORKDIR $RUNTIME_DIR + +COPY --chown=1090:1090 taskqueue . diff --git a/container/taskqueue/build b/container/taskqueue/build new file mode 100755 index 0000000000000000000000000000000000000000..3a5d4c9f04df39bf1bc77ab7cd7927073ad80c31 --- /dev/null +++ b/container/taskqueue/build @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +[[ $CCI_SRC ]] || CCI_SRC=/c/cci + +. $CCI_SRC/container/defconfig.sh + +load_cci_defaults + +service_port=${TASKQUEUE_PORT:-3060} + +if [[ ! -f "taskqueue" ]]; then + $CCI_SRC/container/compile . +else + echo -e "\n\033[34m- Info: using \033[31mexists\033[34m taskqueue -\033[0m\n" +fi + +docker build -t taskqueue-alpine:v0.001-${service_port} . diff --git a/container/taskqueue/build-depends b/container/taskqueue/build-depends new file mode 100755 index 0000000000000000000000000000000000000000..5dd403e166afc1ed9b15b32328320e82db6f5155 --- /dev/null +++ b/container/taskqueue/build-depends @@ -0,0 +1 @@ +scheduler-dev diff --git a/container/taskqueue/start b/container/taskqueue/start new file mode 100755 index 0000000000000000000000000000000000000000..4760b7d5aac105fdfdb2e10e22f6253e162d1759 --- /dev/null +++ b/container/taskqueue/start @@ -0,0 +1,40 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require_relative '../defconfig.rb' + +names = Set.new %w[ + TASKQUEUE_PORT + REDIS_HOST + REDIS_PORT +] + +defaults = relevant_defaults(names) +env = docker_env(defaults) + +service_port = defaults['TASKQUEUE_PORT'] || '3060' +service_name = 'taskqueue' +service_image = "#{service_name}-alpine:v0.001-#{service_port}" + +docker_rm "#{service_name}-#{service_port}" + +cmd = %W[ + docker run + --name #{service_name}-#{service_port} + --restart=always + -d + -u 1090:1090 + -p #{service_port}:#{service_port} +] +cmd += env +cmd += %W[ + -v /etc/localtime:/etc/localtime:ro + #{service_image} +] + +cmd += ['sh', '-c', "umask 002 && ./#{service_name}"] + +system(*cmd) diff --git a/container/update-os-docker/config.d/hub_dc_image_name b/container/update-os-docker/config.d/hub_dc_image_name new file mode 100644 index 0000000000000000000000000000000000000000..9ff3f58147d0ecfa177d5a64686ee73dc0a0dd5d --- /dev/null +++ b/container/update-os-docker/config.d/hub_dc_image_name @@ -0,0 +1,2 @@ +centos: centos +openeuler: kunpengcompute/openeuler diff --git a/container/update-os-docker/config.d/hub_dc_image_tag b/container/update-os-docker/config.d/hub_dc_image_tag new file mode 100644 index 0000000000000000000000000000000000000000..94075efadc89af3efabcb5ebb6443bc07b6b020d --- /dev/null +++ b/container/update-os-docker/config.d/hub_dc_image_tag @@ -0,0 +1,4 @@ +centos7.6: 7 +centos76: 7 +centos8: 8 +openeuler20: latest diff --git a/container/update-os-docker/config.d/local_dc_image_tag b/container/update-os-docker/config.d/local_dc_image_tag new file mode 100644 index 0000000000000000000000000000000000000000..b7e75bcb86c744cfcbf6ce9c4cd62753766760fa --- /dev/null +++ b/container/update-os-docker/config.d/local_dc_image_tag @@ -0,0 +1,4 @@ +centos7.6: 7.6 +centos76: 7.6 +centos8: 8 +openeuler20: 20 diff --git a/container/update-os-docker/lib/common.rb b/container/update-os-docker/lib/common.rb new file mode 100755 index 0000000000000000000000000000000000000000..f5af1aa5d3a168f39c06bf90309bd8a4951b64bf --- /dev/null +++ b/container/update-os-docker/lib/common.rb @@ -0,0 +1,47 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative 'dc-image' + +OS_PATH ||= ENV['OS_PATH'] || '/os/' + +# Parse the parameters and make use of them +class ParseParam + attr_reader :dc_name, :local_dc_img + + def initialize(rootdir) + _, _, @os_name, _, @os_version = rootdir.split('/') + @dc_name = "dc-#{@os_name}-#{@os_version}" + end + + def prepare_env + @hub_dc_img = get_hub_dc_image(@os_name, @os_version) + @local_dc_img = get_local_dc_image(@os_name, @os_version) + prepare_dc_images(@local_dc_img, @hub_dc_img) + end +end + +def check_argv(argv) + usage(argv) + rootfs_dir = OS_PATH + argv[0] + raise 'Wrong vmlinuz path' unless File.exist?(rootfs_dir + '/vmlinuz') + + return rootfs_dir +end + +def usage(argv) + return if argv.size > 1 + + raise "Example usages: +./run centos/aarch64/7.6 package1 package2 ... +./run centos/aarch64/7.6 $(show-depends-packages centos) +centos is an example adaption file contain packages mapping from debian to centos. +The whole path is '$LKP_SRC/distro/adaptation/centos'." +end + +def get_packages(argv) + argv.shift + return argv.join(' ') +end diff --git a/container/update-os-docker/lib/dc-image.rb b/container/update-os-docker/lib/dc-image.rb new file mode 100755 index 0000000000000000000000000000000000000000..4bdf7d457d7a57b63a2ef08324b6e13930ade94a --- /dev/null +++ b/container/update-os-docker/lib/dc-image.rb @@ -0,0 +1,31 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'yaml' + +CONTAINER_PATH = File.dirname(__dir__) + +def get_hub_dc_image(os_name, os_version) + name_map = YAML.load_file("#{CONTAINER_PATH}/config.d/hub_dc_image_name") + tag_map = YAML.load_file("#{CONTAINER_PATH}/config.d/hub_dc_image_tag") + return "#{name_map[os_name]}:#{tag_map[os_name + os_version]}" +end + +def get_local_dc_image(os_name, os_version) + tag_map = YAML.load_file("#{CONTAINER_PATH}/config.d/local_dc_image_tag") + return "dc-#{os_name}:#{tag_map[os_name + os_version]}" +end + +def prepare_dc_images(local_dc_image, hub_dc_image) + puts "preparing, local_dc_image: #{local_dc_image} hub_dc_image: #{hub_dc_image}" + find_local_image = %x(docker images #{local_dc_image}) + return if find_local_image.include?(local_dc_image.split(':')[0]) + + find_hub_image = %x(docker images #{hub_dc_image}) + unless find_hub_image.include?(local_dc_image.split(':')[0]) + system "docker pull #{hub_dc_image}" + end + system "docker tag #{hub_dc_image} #{local_dc_image}" +end diff --git a/container/update-os-docker/run b/container/update-os-docker/run new file mode 100755 index 0000000000000000000000000000000000000000..5fe3d0281555be2449b53905c6df8906346d6ca1 --- /dev/null +++ b/container/update-os-docker/run @@ -0,0 +1,30 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'pathname' +require_relative 'lib/common.rb' +require_relative '../defconfig.rb' + +SBIN_DIR = Pathname.new(File.dirname(__FILE__)).realpath.to_s + '/sbin' +ROOT_DIR = check_argv(ARGV) + +params = ParseParam.new(ROOT_DIR) +params.prepare_env +packages = get_packages(ARGV) + +cmd = %W[ + docker run + -it + --name #{params.dc_name} + -v #{SBIN_DIR}:/usr/local/sbin + -v #{ROOT_DIR}/etc/yum.repos.d:/etc/yum.repos.d:ro + #{params.local_dc_img} +] + +cmd += ['bash', '-c', "umask 0002 && /usr/local/sbin/setup #{packages}"] + +system(*cmd) +system "docker commit #{params.dc_name} #{params.local_dc_img}" +docker_rm params.dc_name diff --git a/container/update-os-docker/sbin/setup b/container/update-os-docker/sbin/setup new file mode 100755 index 0000000000000000000000000000000000000000..a38e2df3d6c3e6275ec91a2a397e546f376a6041 --- /dev/null +++ b/container/update-os-docker/sbin/setup @@ -0,0 +1,6 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +yum update -y +yum install -y --skip-broken "$@" diff --git a/doc/apply-ssh-account.md b/doc/apply-ssh-account.md new file mode 100644 index 0000000000000000000000000000000000000000..69d8df6dc67f9df8f43c02655825bb181fac8ea5 --- /dev/null +++ b/doc/apply-ssh-account.md @@ -0,0 +1,37 @@ + +# apply for ssh account + +## purpose + +The ssh account is used to login our jumper VM, where you can +- submit a job +- ssh into a testbox + +## steps overview +1. send email to apply for ssh account + If approved, you'll get a response email with: + + login command: + ssh -p port account@server_ip + +2. login authentication + - use pub key + - use password (if no pub key) + +3. submit jobs + - prepare job.yaml + - run job + - command: + submit job.yaml + +## example apply email + + To: team@crystal.ci + Subject: apply ssh account + + # Show a commit URL that you contributed to an OSS project + # We'll validate whether the URL contains your email. + # for example, + commit: https://github.com/torvalds/linux/commit/7be74942f184fdfba34ddd19a0d995deb34d4a03 + + # attach your ssh pub key as file (optionally but highly recommended) diff --git a/doc/develop-environment.md b/doc/develop-environment.md new file mode 100644 index 0000000000000000000000000000000000000000..2f906039bf363c9f3627e62ef5c145a3ba452a39 --- /dev/null +++ b/doc/develop-environment.md @@ -0,0 +1,120 @@ +# ssh + +## setup in your local laptop + + cat >> ~/.ssh/config <<-EOF + Host crystal + Hostname 124.90.34.227 + Port 22113 + EOF + + # password-less login + [ -f ~/.ssh/id_rsa.pub ] || ssh-keygen + ssh-copy-id crystal + +## setup in crystal server + + cat >> ~/.ssh/config <<-EOF + Host alpine + Hostname localhost + Port 2200 + User team + + Host debian + Hostname localhost + Port 2201 + User team + EOF + +# git + +## user setting + + git config --global user.name "Your Name" + git config --global user.email "youremail@yourdomain.com" + +Example: + + git config --global user.name "Wu Fengguang" + git config --global user.email "wfg@mail.ustc.edu.cn" + +## repos + +Clone the following git repos to your $HOME + + git clone file:///c/todo.git + git clone https://gitee.com/wu_fengguang/lkp-tests.git + git clone https://gitee.com/wu_fengguang/compass-ci.git + +Then read through documents + + lkp-tests/doc/INSTALL.md + compass-ci/doc/INSTALL.md + compass-ci/doc/develop-flow.md + compass-ci/doc/learning-resources.md + +# crystal compiler + +## local install + +Follow instructions here: + + https://crystal-lang.org/install/ + +## arm build environment in docker + +We created an alpine docker for running crystal compiler. +It's the only convenient way to use crystal in aarch64. +Usage: + ssh crystal # first login to our Kunpeng server + ssh team@alpine # then login to the docker + +We also provided a global wrapper script "crystal" for use +in our Kunpeng server. + +## development tips + +Ruby => Crystal code conversion +https://github.com/DocSpring/ruby_crystal_codemod + +Interactive console like Ruby's irb/pry +https://github.com/crystal-community/icr + +# vim setup + + git clone https://github.com/rhysd/vim-crystal + cd vim-crystal + cp -R autoload ftdetect ftplugin indent plugin syntax ~/.vim/ + +# vscode setup + +Install these extensions: + +- Markdown Preview Enhanced + document at https://shd101wyy.github.io/markdown-preview-enhanced/ + +- Crystal Language + + Need standalone install crystal compiler (mentioned above) and + crystal language server (below) first. + + git clone https://github.com/crystal-lang-tools/scry.git + # then follow instructions of the "Installation" section in scry/README.md + +- Ruby + +- Ruby Solargraph + + +# email notification app + +Usage: + + ssh -X crystal + cd; wmmaiload & + +Look and feel: + + http://tnemeth.free.fr/projets/dockapps.html + +The docker app will flash when there are new/unread emails. diff --git a/doc/develop-flow.md b/doc/develop-flow.md new file mode 100644 index 0000000000000000000000000000000000000000..ad5128aec91defa8527a565479779bcc081dda5d --- /dev/null +++ b/doc/develop-flow.md @@ -0,0 +1,94 @@ +document-driven development +=========================== + +For trivial ideas that take less than 1 day to implement, feel free to just +do it and submit patches. + +For unsure/larger ideas, please announce and discuss it first: +- submit RFC idea/patch with document, comment and/or skeleton code; +- submit TODO which can be implemented by anyone + +It's also possible to embed TODO/FIXME items in comments, so that others may +jump in and help with your current work. + +Sometimes test specs serve as good document, too. + +All in all, we encourage document-driven development. It's a good way to make +sure ealy design ideas are valid, clear enough, and everyone are aligned with +the plan. They also serve as good resources for the community to understand +core ideas behind current code, what's going on and where to contribute. + +todo repo +========= + + git clone file:///c/todo.git + +The formal TODOs shall be committed to the above "todo" repo. +Anyone can create TODO items in markdown doc format for review and implement by others. + +- top level directory holds TODOs for the whole team +- dirs data/ deploy/ lkp/ scheduler/ tests/ hold TODOs for specific areas + +- to take a TODO for implementation, one should + + git pull --rebase + git mv some_todo.md people/myname/todo/ + git commit -a + gmail + +- to mark a TODO as done + + git pull --rebase + git mv people/myname/todo/some_todo.md people/myname/done/some_todo.md + git commit people/myname + gmail + +To make "--rebase" the default behavior: + + git config --global pull.rebase true + +review process +============== + +The above "gmail" command is a wrapper for "git send-email". +It submits your patch for review. The patch subject and changelog +should be well written according to suggestions here: + + https://github.com/thoughtbot/dotfiles/blob/master/gitmessage + https://www.cnblogs.com/cpselvis/p/6423874.html + + http://www.ozlabs.org/~akpm/stuff/tpp.txt + 2: Subject: + 3: Attribution + 4: Changelog + + https://www.kernel.org/doc/html/latest/translations/zh_CN/process/submitting-patches.html#cn-describe-changes + 2) 描述你的改动 + 3) 拆分你的改动 + 8)回复评审意见 + +Everyone is encouraged to review others' patches. In particular, these 2 roles +are obliged to give review comments: + +- pair programming teammate +- subsystem committer + +A patch may undergo several rounds of revisions and reviews before being +considered OK. It's up to the subsystem committer to apply and push code +to the mainline. + +tech discussions +================ + +Technical discussions can happen in 2 main ways +- text messages +- voice talks + +All text based discussions shall happen in mailing list. Prefer mutt email +client over outlook. Prefer point-to-point bottom-posting. Every point should +have clear and informative response. + +Phone meeting links should better be posted publicly, so that anyone interested +can take part in. It's good to have text meeting minutes published if important +decisions are made in the phone call. Meetings should be effective, with +written minutes and actions. diff --git a/doc/lang-choice.md b/doc/lang-choice.md new file mode 100644 index 0000000000000000000000000000000000000000..cff52a8089ecb7720bfa141db3e2a3bd98f43de9 --- /dev/null +++ b/doc/lang-choice.md @@ -0,0 +1,95 @@ +shell +===== + +## Pro +- widely available +- quick & dirty + +## Con +- don't grow large + +## Main scenarios +- run in test box +- won't grow large +- operational (install/config/wrapper), not logical/parsing/API heavy + + +ruby +==== + +## Pro +- human efficient +- human friendly +- no compile +- gems: good coverage +- ri: nice doc +- pry: convenient to try out +- google: rich experiences + +## Con +- slow, however: + - ruby3 aims 3x faster in 2021 + - truffleruby demos 16-31x faster ERB benchmark + (only has experimental x86_64 support for now) + - https://github.com/oracle/truffleruby + - https://www.graalvm.org/docs/reference-manual/languages/ruby/ +- bloated for small devices + +## Main scenarios +- extend existing code base +- end user tools + - easy to distribute + - transparent and trusted +- server side tools (not performance sensitive) + + +crystal +======= + +## Pro +- ruby like +- human efficient +- machine efficient +- suitable for large projects + - static type + - compile checked +- easy to deploy (static linking, like golang) + +## Con +- pre-release +- slow compile +- sharks: only cover core libs +- little aids like ri/pry/google + +## Main scenarios +- micro-services, eg. + - post processing + - wrap stats/* in classes in a long run service + - git service + - use libgit2 + git cli wrapper + - to deprecate ruby-git + - ruby-git adds complexity. Either use libgit2 for better performance, or + wrap plain git cli commands to be reuse knowledge and cli git docs. +- run in testbox (beyond shell capabilities) + - tests/* + - testbox side framework + +python +====== + +## Pro +- popular and excellent ecosystem +## Con +- slow (and looks hard to improve in generic way) +## Main scenarios +- library that greatly helps + - fbtftp + - data analyze and plot? + - web? + +javascript +========== + +## Main scenarios +- web interface + - d3.js visualization diff --git a/doc/learning-resources.md b/doc/learning-resources.md new file mode 100644 index 0000000000000000000000000000000000000000..62716cf2c2f2a186369088aa8189c4c907a56136 --- /dev/null +++ b/doc/learning-resources.md @@ -0,0 +1,210 @@ +culture +======= + +How To Ask Questions The Smart Way +https://zhuanlan.zhihu.com/p/19779979 +https://www.jianshu.com/p/60dd8e9cd12f + +The Cathedral and the Bazaar +https://www.ruanyifeng.com/blog/2008/02/notes_on_the_cathedral_and_the_bazaar.html +http://www.catb.org/~esr/writings/cathedral-bazaar/cathedral-bazaar/ + +ruby +==== + +## introduction +man pry # for trying out code snippets below +https://learnxinyminutes.com/docs/ruby/ +https://github.com/ThibaultJanBeyer/cheatsheets/blob/master/Ruby-Cheatsheet.md +https://www.vikingcodeschool.com/professional-development-with-ruby/ruby-cheat-sheet +http://www.cheat-sheets.org/saved-copy/RubyCheat.pdf # skip the cryptic "Predefined Variables" and "Ruby arguments" tables +http://www.testingeducation.org/conference/wtst3_pettichord9.pdf + +https://www.ruby-lang.org/en/documentation/ruby-from-other-languages/to-ruby-from-python/ + +## API +man ri +http://overapi.com/ruby + +## coding style +https://rubystyle.guide/ +https://ruby-china.org/wiki/coding-style + +## resources +https://github.com/markets/awesome-ruby + +## debug +https://github.com/deivid-rodriguez/pry-byebug +https://github.com/JoshCheek/seeing_is_believing + + +crystal +======= + +https://getgood.at/in-a-day/crystal +https://crystal-lang.org/2018/01/08/top-5-reasons-for-ruby-ists-to-use-crystal.htmL + +/c/crystal/crystal/src/ +https://crystal-lang.org/api +https://crystal-lang.org/reference + +https://github.com/veelenga/awesome-crystal +https://github.com/DocSpring/ruby_crystal_codemod + + +shell +===== + +man bash +https://devhints.io/bash +https://github.com/denysdovhan/bash-handbook/blob/master/translations/zh-CN/README.md + +https://juejin.im/post/5e4123e3e51d45271515501f +https://juejin.im/post/5e42858de51d45270d53022e + +https://shellmagic.xyz/ +https://ngte-ac.gitbook.io/i/infrastructure/linux-command-cheatsheet +https://github.com/jlevy/the-art-of-command-line/blob/master/README-zh.md + +https://google.github.io/styleguide/shellguide.html + +## zsh keys (customized) + + ctrl-p history-beginning-search-backward + ctrl-n history-beginning-search-forwardd + alt-p history-search-backward + alt-n history-search-forward + alt-. insert-last-word + alt- vi-cmd-mode + + alt-f forward-word + alt-b backward-word + ctrl-a beginning-of-line + ctrl-e end-of-line + + bindkey show all key bindings + +python +====== + +https://learnxinyminutes.com/docs/python/ + + +Vim +=== + +https://www.jianshu.com/p/bcbe916f97e1 +https://coolshell.cn/articles/5426.html +https://devhints.io/vim + +## vim keys (customized) + + F2 toggle number/cursorline + F3 toggle spell check + F4 toggle paste/nowrap + F10/F11 prev/next color scheme + g. search in subdir + g/ search whole git repo + alt-n/p next/prev search result + ctrl-n/p next/prev file + ctrl-j/k left/right buffer + next buffer + alt-c toggle comment + +Tmux +==== + +## tmux keys (customized) + + ctrl-s c create new window + alt-j/k switch to left/right window + alt-1/2/3... switch to the 1st, 2nd, 3rd, ... window + ctrl-s ctrl-u page up (press ctrl-u to continue paging up, to exit) + +Mutt +==== + +http://www.ctex.org/documents/shredder/mutt_frame.html # enough to read the 1st section + +## mutt keys (customized) + +Check /etc/mutt/key.muttrc for our customized key bindings. +Type "?" in mutt will show you the complete key bindings. +The most used ones are: + + g reply to all recipients + m compose a new mail message + j/k move up/down one line + -/ move up/down one page + ctrl-u/ctrl-d move up/down half page + 9/G move to bottom + 0 move to top + next-unread + / search + l limit + i limit: toggle to me/all + + # for committer + a apply patch + p apply patch + git push + + +Regular Expression +================== + +Regular expression is powerful but cryptic. +The right way is to learn by examples: +https://www.rubyguides.com/2015/06/ruby-regex/ + +ri Regexp +https://cheatography.com/davechild/cheat-sheets/regular-expressions/ + + +Docker +====== + +https://ngte-ac.gitbook.io/i/infrastructure/docker-cheatsheet + + +Git +=== + +http://justinhileman.info/article/git-pretty/git-pretty.png +https://github.com/k88hudson/git-flight-rules/blob/master/README_zh-CN.md +https://github.com/521xueweihan/git-tips +https://github.com/arslanbilal/git-cheat-sheet/blob/master/other-sheets/git-cheat-sheet-zh.md +https://www.codementor.io/@citizen428/git-tutorial-10-common-git-problems-and-how-to-fix-them-aajv0katd +http://www.columbia.edu/~zjn2101/intermediate-git/ +https://git-scm.com/book/zh/v2 + +## resolve conflicts + +https://githowto.com/resolving_conflicts +https://easyengine.io/tutorials/git/git-resolve-merge-conflicts/ + +## edit emailed patch then apply + +in mutt: alt-e to open full email in vim + +in vim: modify the raw patch and save it + ctrl-g to show the full file name (at bottom line) + +in shell: copy & paste the full file name to command + + git am /tmp/FILE + +btw, quilt and wiggle are also good patch tools. + +Markdown +======== + +https://guides.github.com/pdfs/markdown-cheatsheet-online.pdf +https://shd101wyy.github.io/markdown-preview-enhanced/#/zh-cn/ +http://support.typora.io/Draw-Diagrams-With-Markdown/ + + +YAML +==== + +https://yaml.org/YAML_for_ruby.html +https://alexharv074.github.io/puppet/2020/03/06/why-erb-should-be-preferred-to-jinja2-for-devops-templating.html diff --git a/doc/principles.md b/doc/principles.md new file mode 100644 index 0000000000000000000000000000000000000000..77f361f8d9fa9cb1ccf3f0c758dc2e263e5b4e9c --- /dev/null +++ b/doc/principles.md @@ -0,0 +1,29 @@ +grep-friendly ID +================ + +Convert all magic numbers to some constant with grep-friendly name. +For example, all ip/host shall be replaced like this + + + JOB_REDIS_IP = "172.17.0.1" + JOB_REDIS_PORT = 6379 + + ... + + resources.redis_client(JOB_REDIS_IP, JOB_REDIS_PORT) + +If it's not convenient defined as constant, should add a comment like +this + + # some.yaml + + port: 6379 # JOB_REDIS_PORT + +So that in future whenever we want to find all related files, +we can grep such named ID easily. + +The same principle applies to more places that have IMPLICIT DEPENDENCIES: +give some named ID to all places, so that in future refactors, when +changing one place, one will be confident and know some other places +shall also be checked. It's a very important rule to follow in the +beginning of a large project. diff --git a/doc/tutorial.md b/doc/tutorial.md new file mode 100644 index 0000000000000000000000000000000000000000..d5576d4e0f56001ec0445d6b0a0cc491870c4041 --- /dev/null +++ b/doc/tutorial.md @@ -0,0 +1,698 @@ +# Crystal-CI Introduction + +## 简单说明 + + + + + + + + + + + + + + +//kaitian + + +概念 + +Compass-CI 是一个可持续集成的软件平台。为开发者提供针对上游开源软件(来自 Github,Gitee,Gitlab 等托 + +管平台)的测试服务、登录服务、故障辅助定界服务、基于历史数据的分析服务。通过 Compass-CI,社区开发者将 + +开源软件快速引入 openEuler 社区,补充更多的测试用例,共同构建一个健康完善的开源软件生态。 + +背景 + +开源社区的软件质量保障一直是一个难题,不同的开源软件质量差别较大,同时当前社区测试系统一般以测试为 + +主,较少考虑社区开发者与测试系统的协同能力。 + +开源软件大多数基于个人 PC 开发及调测,缺乏强大易用的多元化测试集群环境。一般测试系统主要关注发现问 + +题,缺乏为开发者提供软件调测、调优、定位、复现能力。 + +openEuler 开放 Compass-CI 测试平台,为开源软件提供基于鲲鹏集群测试服务,一键式登录调测、自动 git + +bisect、测试结果分析,大大提升社区开发者的开发调测体验。 + +功能描述 + +• 测试服务 + +支持开发者基于本地设备开发,往 github 提交代码,Compass-CI 自动获取代码开展测试,并向开发者反馈 + +测试结果。 + +• 调测环境登录 + +Compass-CI 提供 SSH 登录能力,测试过程中如果遇到有问题,开发者可根据需要环境进行登录调测 。 + +• 测试结果比较 + +Compass-CI 记录历史测试结果,对外提供 web 及命令行接口,支持开发者针对已有的测试结果进行分析,挖 + +掘影响测试结果的因素。 + +• bug 辅助定界 + +Compass-CI 测试过程中自动识别错误信息,触发基于 git tree 的测试,找出引入问题模块的变化点。 + +应用场景 + +• 应用场景1 + +聚合开发者测试用例:开发者往代码托管平台提交代码、测试用例、测试工具时,Compass-CI 自动获取提交的 + +代码开展构建测试,同时获取开发者编写到开源软件包的用例自动化测试,并反馈测试结果。 + +• 应用场景2 + +测试过程中,全面监控系统运行信息(CPU/MEM/IO/网络等),对测试过程数据快照归档,提供多次测试之间 + +快照数据分析对比能力,协助开发者对测试结果开展分析,找出影响测试结果的因素。 + +• 应用场景3 + +Compass-CI + +应用场景 + +iSula-build 目前的应用场景很明确,可以在通用场景无缝替换 docker build 构建容器镜像,同时提供了上述涉 + +及的新特性。 + +测试过程中,发现有 Bug 时,自动触发 Regression 机制,找出首次引入问题 Commit 信息。 + +• 应用场景4 + +测试过程中,发现有 Bug 时,可随时提供调测资源服务,登录到环境进行复现、调试。 + +Compass-CI 优点 + +Compass-CI 集开发调测、测试服务、测试结果分析、辅助定位为一体的综合平台,打造社区开发者极致开发体 + +验。相比业绩其它持续集成软件相比,Compass-CI 平台具有如下特点:软件测试更简单、bug 调测更便捷、测试分 + +析数据更全面。 + +下一步计划: + +1、优化 git bisect 精准率及效率 + +2、优化数据分析联动能力,让数据分析更加聚焦 + +3、数据可视化优化,更加友好展示数据比较结果。 + +4、增强登录认证机制,如:GPG 认证 + +5、优化部署效率 + + + + + + + + + +## 快速入门 + +- 前提条件 + + + + + + + + + + + + + + +// shengde + 1. 如何申请账号 + 通过向compass-ci@openeuler.io发邮件申请 + 配置default + + 申请邮件: + - 邮件标题:'apply account' + - 收件地址:compass-ci@139.com + - 公钥:附件方式添加公钥 + - url: 开源社区提交过commit的url地址 + - 例:https://github.com/torvalds/linux/commit/7be74942f184fdfba34ddd19a0d995deb34d4a03 + + 回复邮件内容: + + account_uuird: xxxxxx + SCHED_HOST: xxx.xxx.xxx.xxx + SCHED_PORT: 10000 + + + + + + + + + + + + + + + + + + +## 2. 建立本地环境(构建一个能够提交job的环境) +- 下载lkp-tests, 安装依赖包并配置环境变量 + ```bash + git clone http://gitee.com/wu_fengguang/lkp-tests.git + cd lkp-tests + make install + ``` + +- 配置lab + - 打开lkp-tests/include/lab/z9 + ```yaml + SCHED_HOST: ip + SCHED_PORT: port + ``` + - 新建$HOME/.config/compass-ci/defaults/$USER.yaml + ```yaml + lab: z9 + ``` + + + + + + + + + + +## 3. 注册自己的仓库 + +如果您想在git push的时候, 自动触发测试, 那么需要把您的公开git url添加到如下仓库 +[upstream-repos](https://gitee.com/wu_fengguang/upstream-repos) + git clone https://gitee.com/wu_fengguang/upstream-repos.git + less upstream-repos/README.md + + + + + + + + + + + + + + + +# Job的定义和提交 + +## 一、job yaml文件如何写? + +### job yaml的简介 + +- job yaml是测试描述和执行的基本单元。 +- 它是以[YAML]的格式编写(http://yaml.org/YAML_for_ruby.html)。 +- 所有job文件位于**```$LKP_SRC/jobs```**路径下。 + +### job yaml的结构 + +#### 1、yaml的标识头部(必选) + +- 每一个job文件的开始部分,都有一些基础的描述信息,称之为yaml的Header。 +- 头部主要有suite和category,suite是测试套的名称。category是测试类型, + 包括benchmarch(性能测试)/functional(功能测试)/noise-benchmark(不常用)。 + + ```yaml + suite: netperf + category: benchmark + ``` +#### 2、测试脚本和参数(必选) + +- Job yaml是以键:值的格式编写,如果该键与下面路径中的某个脚本文件相匹配,则将其视为可执行脚本 + + ```$LKP_SRC/setup + $LKP_SRC/monitors + $LKP_SRC/daemon + $LKP_SRC/tests + ``` +- 测试脚本位于```**$LKP_SRC/tests**```,```**$LKP_SRC/daemon**```。 +- 参数值是一个字符串或字符串数组, + 每个测试脚本将在其文件头的注释中记录可接受的参数(它将作为环境变量传递)。 + + ```yaml + netserver: + netperf: + runtime: 10s + test: + - TCP_STREAM + - UDP_STREAM + send_size: + - 1024 + - 2048 + ``` +#### 3、测试资源相关变量(必选) + +- SUT/schduler/部署等资源相关变量。 + + ```yaml + testbox: vm-hi1620-2p8g + os: openeuler + os_version: 20.03 + os_arch: aarch64 + ``` +#### 4、系统设置脚本(可选) + +- 设置脚本位于```**$LKP_SRC/setup**``` 目录。 +- 设置脚本将在测试脚本之前运行,主要是用于启停一些依赖性服务或工具,或者配置测试所需的参数等。 + + ```yaml + cgroup2: #$LKP_SRC/setup/cgroup2 executable script + memory.high: 90% + memory.low: 50% + memory.max: max + memory.swap.max: + io.max: + io.weight: + rdma.max: + ``` +#### 5、监控脚本(可选) + +- 位于```**$LKP_SRC/monitors**``` +- 监视器在运行基准测试时捕获性能统计数据。 +- 对性能分析和回归根源有价值。 + + ```yaml + proc-vmstats: + nterval: 5 + ``` +### Job yaml的扩展和解释可参考: +``` +【job的定义到执行】(https://gitee.com/wu_fengguang/lkp-tests/blob/master/jobs/README.md) +``` +## 二、 提交测试任务 + +- 环境准备就绪,就可以提交job给调度器。 +- 测试任务的结果见结果查看章节。 + +#### 1、 submit命令的用法: +``` +- Usage: submit [options] jobs... + submit test jobs to scheduler +- options: + - s, --set 'KEY: VALUE' #add YAML hash to job + - o, --output DIR #save job yaml to DIR/ + - a, --auto-define-files #auto add define_files + - c, --connect #auto connect to the host + - m, --monitor # monitor job status: use -m 'KEY: VALUE' to add rule" +``` +#### 2、以netperf为例,提交job文件 +``` + **submit netperf.yaml** +``` + + + + + + + + + + 5. 查看测试结果 + - 测试结果生成与存储 + 1. 测试执行机完成测试任务后,将结果保存为日志文件,并上传至服务器,按照 + $suite/$tbox_group/$date/job_id的目录结构存储在本地/srv/result目录。 + 2. extract-stats 服务将本地日志文件的数据进行提取,生成与日志文件对应的json文件 + 以及汇总的结果stats.json,并将汇总后的结果存储到jobs数据库(ES)对应id的job中。 + + + - web页面查看结果: + - jobs数据库(ES)中的结果:https://compass-ci.openeuler.org/jobs + + - 文件中的结果,示例: + http://124.90.34.227:11300/result/iperf/dc-2g--xiao/2020-09-21/crystal.83385/,其中: + + boot-time, diskstats.gz, interrupts.gz, ...等文件 + - 日志文件(由测试执行机上传)。 + + boot-time.json, diskstats.json, interrupts.json, ... stats.json等 + - json 文件对应每一个日志文件提取后的结果,stats.json为汇总后的结果, + (由extract-stats服务生成)。 + + + + + + + + + + + + +#### 6、比较测试结果 + +> After the test has been completed, use conditions to query data, then combine data to multiple matrices. And that will compute each matrix average and standard deviation to compare test case performance changes in a specific dimension. At last, print compare result in pretty format. + +- methods + - web + - Compass CI/compare + - command + - `compare conditions -d dimension` + +- example result + + ``` + os=openeuler/os_arch=aarch64/tbox_group=vm-hi1620-2p8g + + + 20 1 metric + -------------------- ------------------------------ ------------------------------ + fails:runs change fails:runs + | | | + 8:21 -38.1% 0:1 last_state.daemon.sshd.exit_code.1 + 1:21 -4.8% 0:1 last_state.daemon.sshd.exit_code.2 + 1:21 -4.8% 0:1 last_state.setup.disk.exit_code.1 + 1:21 -4.8% 0:1 last_state.test.dbench.exit_code.99 + 1:21 -4.8% 0:1 last_state.test.email.exit_code.7 + + + 20 1 metric + -------------------- ------------------------------ ------------------------------ + %stddev change %stddev + \ | \ + 0.02 ± 265% +2259.8% 0.57 mpstat.cpu.all.soft% + 0.48 ± 299% +1334.9% 6.90 mpstat.cpu.all.sys% + 2760.71 ± 164% +292.6% 10838.00 proc-vmstat.nr_dirty_background_threshold + 5522.10 ± 164% +292.6% 21680.00 proc-vmstat.nr_dirty_threshold + ``` + + + + + + + + + + + + + + +##7. 提交borrow任务 +-概述:通过提交任务的方式申请环境 + 提交borrow任务的yaml文件可以参考lkp-tests/jobs/borrow-1h.yaml + +###yaml文件配置说明: + +-必填字段: + sshd: + pub_key: <%= + begin + File.read("#{ENV['HOME']}/.ssh/id_rsa.pub").chomp + rescue + nil + end + %> + email: + runtime: 1h + testbox: vm-hi1620-2p8g + +-选填字段: + os: openeuler + os_arch: aarch64 + os_version: 20.03 + +-字段描述: + 1.sshd: + pub_key: + 将用户的公钥信息添加到job中 + 请确保公钥文件"#{ENV['HOME']}/.ssh/id_rsa.pub"存在 + email: + 配置用户邮箱地址(用于接收申请设备的登录信息) + 2.runtime: + 申请环境的使用时间 + 可以使用单位 h/d/w + 3.testbox: + 申请环境的规格,如下: + vm-hi1620-1p1g + vm-hi1620-2p1g + vm-hi1620-2p4g + vm-hi1620-2p8g + vm-pxe-hi1620-1p1g + vm-pxe-hi1620-2p1g + vm-pxe-hi1620-2p4g + vm-pxe-hi1620-2p8g + taishan200-2280-2s64p-256g + 4.申请环境的操作系统参数: + 默认配置为: + os: openeuler, os_arch: aarch64, os_version: 20.03 + 可选: + os: debian, os_arch: aarch64, os_version: sid + os: centos, os_arch: aarch64, os_version: 7.6.1810 + +###提交任务 + submit -m -c borrow-1h.yaml + 运行该命令之后,会自动ssh连接到申请的环境当中 + + + + + + + + + + + + + + 8. 提交 bisect任务 + The bisect task will find the first commit information which introduced the error in a git repo. + + Refer to $LKP_SRC/jobs/bisect.yaml. + Required fields: + bisect: + job_id: + error_id: + + Field description: + job_id: + submit a job and get a $job_id. + error_id: + search $job_id in compass-ci web and get the $error_ids, select a $error_id from $error_ids for bisect. + + Command: + submit bisect.yaml + + Result: + will get a email if bisect successed. + + +## 高级功能 + + + + + + + + + + + + + +- 添加OS支持 + nfs/cifs + + osimage(initramfs) + + 制作一个initramfs启动的cgz镜像,当系统内核启动时,直接从打包的cgz镜像中导出rootfs,在内存中展开文件系统,利用磁盘的高速缓存机制让系统直接在内存中读写文件,以提升系统的I/O性能。 + + [此处以openEuler为例] + 1. 获取对应os版本的rootfs + 1) 通过docker获取rootfs + a) 下载openEuler官方提供的docker镜像压缩包 + wget https://repo.openeuler.org/openEuler-20.03-LTS/docker_img/aarch64/openEuler-docker.aarch64.tar.xz + + b) 加载docker镜像 + docker load -i openEuler-docker.aarch64 + + c) 启动openEuler容器 + docker run -id openeuler-20.03-lts + + b) 拷贝docker的rootfs + docker cp -a docker run -d openeuler-20.03-lts:/ openEuler-rootfs + + 2) 通过qemu.img(qcow2格式)获取rootfs (此处以centos为例) + a) 下载openEuler官方网站提供的qcow2格式镜像 + wget https://repo.openeuler.org/openEuler-20.03-LTS/virtual_machine_img/aarch64/openEuler-20.03-LTS.aarch64.qcow2.xz + b) 使用{compass-ci}/container/qcow2rootfs 制作rootfs + cd {compass-ci}/container/qcow2rootfs + ./run openEuler-20.03-LTS.aarch64.qcow2.xz /tmp/openEuler-rootfs + + 2. 定制rootfs + 1) 使用chroot命令切换到rootfs中(此步骤需要root权限) + chroot openEuler-rootfs + + 2) 根据个人需要安装并配置服务 + a) 修改root密码 + b) 配置ssh服务 + c) 检查系统时间 + d) 如果使用docker制作osimage还需要以下操作: + 安装所需版本内核 + 从centos官方网站下载内核rpm包 + 使用yum进行安装 + 删除docker环境变量文件 + rm /.dockerenv文件 + + 3. 退出rootfs,并打包 + cd $rootfs + find . | coip -o -Hnewc |gzip -9 > $os_name.cgz + + FAQ: + Q:如果出现报错: + + ... + [ 0.390437] List of all partitions: + [ 0.390806] No filesystem could mount root, tried: + [ 0.391489] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(0,0) + ... + [ 0.399404] Memory Limit: none + [ 0.399749] ---[ end Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(0,0) ]--- + + A: 1)启动内存不足,增加内存可解决。 + 2)内核文件权限不足,给予644权限。 + + Q:如果打包镜像体积过大,会消耗很大内存。 + A:建议用户根据具体需要对rootfs进行裁剪 + + + + + + + + + + + + + +- 添加测试用例 + 参考文档详见:[添加测试用例](https://gitee.com/wu_fengguang/lkp-tests/blob/master/doc/add-testcase.md) + + + + + + + + + + + + + + +// shaofei +- PKGBUILD 构建 + + + + + + + + + + + + + + +// wangyong +- depends + + + + + + + + + + + + + + +- 本地搭建compass-ci 服务器节点 + + 概述:在openEuler系统一键部署compass-ci环境 + + 声明:目前已支持 openEuler-aarch64-20.03-LTS 系统环境 + 以下配置仅供参考 + + - 准备工作 + + - 硬件 + 服务器类型:ThaiShan200-2280 (建议) + 架构:aarch64 + 内存:>= 8GB + CPU:64 nuclear (建议) + 硬盘:>= 500G + + - 软件 + OS:openEuler-aarch64-20.03 LTS + git:2.23.0版本 (建议) + 预留空间:>= 300G + 网络:可以访问互联网 + + 说明: openEuler系统安装 + https://openeuler.org/zh/docs/20.03_LTS/docs/Installation/%E5%AE%89%E8%A3%85% +E5%87%86%E5%A4%87.html + + - 操作指导 + + 1. 登录openEuler系统 + + 2. 创建工作目录并设置文件权限 + + mkdir demo && cd demo && umask 002 + + 3. 克隆compass-ci项目代码到demo目录 + + git clone https://gitee.com/wu_fengguang/compass-ci.git + + 4. 执行一键部署脚本install-tiny + + cd compass-ci/sparrow && ./install-tiny + + + +## todo call for Cooperation + improve git bisect + improve 数据分析 + improve 数据结果可视化 + + diff --git a/lib/commit_status.rb b/lib/commit_status.rb new file mode 100644 index 0000000000000000000000000000000000000000..19473f4c8c959d1f37764293cdb11a3d86cea9fd --- /dev/null +++ b/lib/commit_status.rb @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative './constants' +require_relative './es_query' +require_relative './params_group' + +def query_commit_status(job, error_id) + items = { + 'upstream_commit' => job['upstream_commit'] + } + jobs_list = query_jobs_from_es(items) + commit_status = parse_jobs_status(jobs_list, error_id) + return commit_status +end + +def query_latest_good_commit(job, error_id) + items = { + 'suite' => job['suite'], + 'upstream_repo' => job['upstream_repo'] + } + jobs_list = query_jobs_from_es(items) + jobs_list = filter_jobs_list(jobs_list, job) + latest_good_commit = parse_latest_good_commit(jobs_list, error_id) + return latest_good_commit +end + +def query_jobs_from_es(items) + es = ESQuery.new(ES_HOST, ES_PORT) + result = es.multi_field_query items + jobs = result['hits']['hits'] + jobs_list = extract_jobs_list(jobs) + return jobs_list +end + +def filter_jobs_list(jobs_list, bad_job) + jobs_list.delete_if do |item| + item['id'] == bad_job['id'] || + (!item.key? 'commit_date') || + item['commit_date'] > bad_job['commit_date'] + end +end + +def parse_latest_good_commit(jobs_list, error_id) + return nil if jobs_list.empty? + + commit_hash = {} + jobs_list.each do |job| + commit_id = job['upstream_commit'] + commit_hash[commit_id] = [] unless commit_hash.key? commit_id + commit_hash[commit_id] << job + end + + commit_list = commit_hash.to_a + commit_list.sort_by! { |item| item[1][0]['commit_date'] }.reverse! + + commit_list.each do |item| + upstream_commit = item[0] + commit_jobs_list = item[1] + commit_status = parse_jobs_status(commit_jobs_list, error_id) + return upstream_commit if commit_status + end + return nil +end + +def parse_jobs_status(jobs_list, error_id) + status_list = [] + jobs_list.each do |job| + next unless job.key? 'stats' + + status_list << (job['stats'].key? error_id) + end + return nil if status_list.empty? + + return status_list.none? +end diff --git a/lib/compare.rb b/lib/compare.rb new file mode 100644 index 0000000000000000000000000000000000000000..2837edc7bcd2c7665b28a8fef581e6e97ad4d652 --- /dev/null +++ b/lib/compare.rb @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative './es_query.rb' +require_relative './matrix2.rb' +require_relative './compare_matrixes.rb' +require_relative './constants.rb' + +# ------------------------------------------------------------------------------------------- +# compare_matrices_list +# - 2 conditions at least +# - each conditions will be parsed to query_fields for es_query +# - option: common_conditions, which are same with conditions will be merged with each conditions +# conditions sample: +# - single conditions: "id=6001" +# - multiple conditions: "os=centos,debian suite=iperf,atomic" +# + +def compare_matrices_list(argv, common_conditions, options) + condition_list = parse_argv(argv, common_conditions) + matrices_list = create_matrices_list(condition_list) + compare_matrixes(matrices_list, options: options) +end + +def parse_argv(argv, common_conditions) + conditions = [] + common_items = common_conditions.split(' ') + argv.each do |item| + items = item.split(' ') + common_items + condition = parse_conditions(items) + conditions << condition + end + conditions +end + +def create_matrices_list(conditions) + matrices_list = [] + es = ESQuery.new(ES_HOST, ES_PORT) + conditions.each do |condition| + query_results = es.multi_field_query(condition) + matrices_list << combine_query_data(query_results) + end + matrices_list +end + +# ------------------------------------------------------------------------------------------- +# compare_group +# - one condition only +# - condition can be parsed to query_fields for es_query +# - option: dimensions required, used for auto_group +# dimensions sample: +# - single dimension: "os" +# - multiple dimensions: "os os_version ..." +# + +def compare_group(argv, dimensions, options) + conditions = parse_conditions(argv) + dims = dimensions.split(' ') + groups_matrices = create_groups_matrices_list(conditions, dims) + compare_group_matrices(groups_matrices, options) +end + +def create_groups_matrices_list(conditions, dims) + es = ESQuery.new(ES_HOST, ES_PORT) + query_results = es.multi_field_query(conditions) + combine_group_query_data(query_results, dims) +end diff --git a/lib/compare_matrixes.rb b/lib/compare_matrixes.rb new file mode 100644 index 0000000000000000000000000000000000000000..6c7d8738d07c4217fa10a201ae4db9824da0e4d3 --- /dev/null +++ b/lib/compare_matrixes.rb @@ -0,0 +1,729 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +LKP_SRC ||= ENV['LKP_SRC'] || File.dirname(__dir__) +require 'set' +require 'json/ext' +require_relative 'themes' +require "#{LKP_SRC}/lib/stats" + +FAILURE_PATTERNS = IO.read("#{LKP_SRC}/etc/failure").split("\n") +LATENCY_PATTERNS = IO.read("#{LKP_SRC}/etc/latency").split("\n") + +# Compute Tools + +def get_matrix_size(matrix) + if matrix.nil? || matrix.empty? + 0 + elsif matrix['stats_source'] + matrix['stats_source'].size + else + [matrix.values[0].size, matrix.values[-1].size].max + end +end + +def get_matrixes_size(matrixes_list) + matrixes_size = {} + matrixes_list.length.times do |index| + matrixes_size[index] = get_matrix_size(matrixes_list[index]) + end + matrixes_size +end + +def fill_missing_with_zeros(value_list, matrix_size) + value_list ||= [0] * matrix_size + value_list << 0 while value_list.size < matrix_size + value_list +end + +def successful?(field) + FAILURE_PATTERNS.all? { |pattern| field !~ /^#{pattern}/ } +end + +def standard_deviation(value_list, average, length) + Math.sqrt( + value_list.reduce(0) do |result, v| + result + (v - average)**2 + end / length.to_f + ) +end + +def latency?(field) + LATENCY_PATTERNS.any? { |pattern| field =~ /^#{pattern}/ } +end + +# Core +def get_length_sum_average_sorted(value_list) + length = value_list.length + sum = value_list.sum + average = sum / length.to_f + sorted = value_list.sort + return length, sum, average, sorted +end + +def get_values(value_list, success) + # get values(type: Hash) that include :average, :runs, :stddev_percent, ... + # + length, sum, average, sorted = get_length_sum_average_sorted(value_list) + if success + stddev_percent = nil + if length > 1 && average != 0 + stddev_percent = ( + standard_deviation(value_list, average, length) * 100 / average + ).to_i + end + { average: average, stddev_percent: stddev_percent, + min: sorted[0], max: sorted[-1], sorted: sorted } + else + { average: average, fails: sum, runs: length, + min: sorted[0], max: sorted[-1], sorted: sorted } + end +end + +def get_compare_value(base_value_average, value_average, success) + # get compare value(change or reproduction) + # + if success + return if base_value_average.zero? + + (100 * value_average / base_value_average - 100).round(1) + else + (100 * (value_average - base_value_average)).round(1) + end +end + +def field_changed?(base_values, values, success, field, options) + # check matrix field if changed + # + changed_stats?( + values[:sorted], values[:min], + values[:average], values[:max], + base_values[:sorted], base_values[:min], + base_values[:average], base_values[:max], + !success, + latency?(field), + field, + options + ) +end + +def set_compare_values(index, values, field, success, options) + # set compare values, example average/ reproduction and check if changed + # + compare_str = success ? :change : :reproduction + values[index][compare_str] = get_compare_value( + values[0][:average], + values[index][:average], + success + ) + values[:changed] |= field_changed?( + values[0], + values[index], + success, + field, + options + ) +end + +def get_values_by_field(matrixes_list, field, matrixes_size, success, options) + # get values by field, values struce example: values[0][:average] + # + values = {} + matrixes_list.length.times do |index| + value_list = fill_missing_with_zeros( + matrixes_list[index][field], + matrixes_size[index] + ) + values[index] = get_values(value_list, success) + next if index.zero? + + set_compare_values( + index, values, + field, success, + options + ) + end + + values +end + +def get_matrixes_fields(matrixes_list) + # get all fields of matrixes + # + matrixes_fields = Set.new + matrixes_list.each do |matrix| + matrixes_fields |= Set.new(matrix.keys) + end + matrixes_fields +end + +def get_matrixes_values(matrixes_list, options) + # get all matrixes all field values + # + matrixes_values = { false => {}, true => {} } + matrixes_size = get_matrixes_size(matrixes_list) + get_matrixes_fields(matrixes_list).each do |field| + next if field == 'stats_source' + + success = successful?(field) + matrixes_values[success][field] = get_values_by_field( + matrixes_list, field, + matrixes_size, success, options + ) + end + matrixes_values +end + +def remove_unchanged_field(matrixes_values) + # remove unchanged field from matrixes valus and remove :changed key + # + matrixes_values.each_key do |success| + matrixes_values[success].delete_if do |field| + !matrixes_values[success][field].delete(:changed) + end + end +end + +def matrixes_empty?(matrixes_list) + return true if matrixes_list.nil? + return true if matrixes_list.empty? + + return matrixes_list.any?(&:empty?) +end + +def compare_matrixes(matrixes_list, matrixes_titles = nil, group_key = nil, options: {}) + # compare matrix in matrixes_list and print info + # @matrixes_list: list consisting of matrix + # @matrixes_titles: number or dimension of matrix + # @group_key: group_key of matrixes_list(only for group mode) + # @options: compare options, type: hash + return warn('Matrix cannot be empty!') || '' if matrixes_empty?(matrixes_list) + + options = { 'perf-profile': 5, theme: :none, no_print: false }.merge(options) + matrixes_values = get_matrixes_values(matrixes_list, options) + remove_unchanged_field(matrixes_values) if matrixes_list.length > 1 + no_print = options[:no_print] + result_str = group_key ? "\n\n\n\n\n" + group_key : '' + result_str += get_all_result_str( + matrixes_values, + matrixes_titles, + matrixes_list.size, + options[:theme] + ) + return result_str if no_print + + print result_str +end + +# JSON Format + +def print_json_result(matrixes_values, matrixes_titles) + result = { + 'matrixes_titles': matrixes_titles, + 'success': matrixes_values[true], + 'failure': matrixes_values[false] + }.to_json + print result +end + +# HTML Format + +def get_html_index(matrixes_titles) + index = " \n 0\n" + matrixes_titles.each do |matrix_index| + index += " #{matrix_index}\n" + end + index + " #{FIELD_STR}\n \n" +end + +def get_html_title(common_title, compare_title, matrixes_titles) + matrixes_number = matrixes_titles.size + title = " \n #{common_title}\n" + title += " #{compare_title}\n #{common_title}\n" * ( + matrixes_number - 1 + ) + title + " \n" +end + +def get_html_header(matrixes_titles, success) + if success + common_title = STDDEV_STR + compare_title = CHANGE_STR + else + common_title = FAILS_RUNS_STR + compare_title = REPRODUCTION_STR + end + + header = get_html_index(matrixes_titles) + header + get_html_title(common_title, compare_title, matrixes_titles) +end + +def get_html_success(values, index) + stddev_str = values[:average].to_s + stddev_percent = values[:stddev_percent] + if stddev_percent && stddev_percent != 0 + stddev_str += " ± #{stddev_percent}%" + end + + change_str = " #{values[:change]}%\n" unless index.zero? + (change_str || '') + " #{stddev_str}\n" +end + +def get_html_failure(values, index) + fails_runs_str = "#{values[:fails]}:#{values[:runs]}" + reproduction_str = " #{values[:reproduction]}%\n" unless index.zero? + (reproduction_str || '') + " #{fails_runs_str}\n" +end + +def get_html_values(matrixes, success) + html_values = '' + matrixes.each do |index, values| + html_values += if success + get_html_success(values, index) + else + get_html_failure(values, index) + end + end + html_values +end + +def get_html_field(field) + " #{field}\n" +end + +def print_html_result(matrixes_values, matrixes_titles, success) + return if matrixes_values[success].empty? + + print "\n" + print get_html_header(matrixes_titles, success) + matrixes_values[success].each do |field, matrixes| + print " \n" + print get_html_values(matrixes, success) + print get_html_field(field) + print " \n" + end + print '
' +end + +# Format Tools +def get_decimal_length(number, length) + return length - 7 if number.negative? + + return length - 6 +end + +def get_suitable_number_str(number, length, format_pattern) + # if number string length can't < target length, + # transform number string to scientific notation string + + format_str = format(format_pattern, number) + return format_str if format_str.length <= length + + decimal_length = get_decimal_length(number, length) + unless decimal_length.negative? + scientific_str = format("%.#{decimal_length}e", number).sub('e+0', 'e+').sub('e-0', 'e-') + lack_length = length - scientific_str.length + unless lack_length.negative? + return scientific_str + ' ' * lack_length + end + end + format_str +end + +# Colorize + +def get_compare_value_color(value, theme) + if value.nil? + elsif value >= GOOD_STANDARD + { + foreground: theme[:good_foreground], + background: theme[:good_background] + } + elsif value <= BAD_STANDARD + { + foreground: theme[:bad_foreground], + background: theme[:bad_background] + } + end +end + +def get_color_code(color_str) + color_sym = color_str.to_sym if color_str.is_a?(String) + COLORS[color_sym] +end + +def replace_n(str, left_str, right_str) + if str.index("\n") + result_str = str.split("\n").join(right_str + "\n" + left_str) + result_str = left_str + result_str + right_str + result_str += "\n" if str[-1] == "\n" + result_str + else + left_str + str + right_str + end +end + +def colorize(color, str) + return str if color.nil? || color.empty? + + f_code = get_color_code(color[:foreground]) + b_code = get_color_code(color[:background]) + b_str = "\033[#{b_code + 10}m" if b_code + f_str = "\033[#{f_code}m" if f_code + left_str = "#{b_str}#{f_str}" + return str if left_str == '' + + right_str = "\033[0m" + replace_n(str, left_str, right_str) +end + +# compare each matrices_list within pre dimension of group matrices +# input: group matrices +# output: pre compare result of each group +def compare_group_matrices(group_matrices, options) + result_str = '' + group_matrices.each do |k, v| + matrices_list = [] + matrices_titles = [] + v.each do |dim, matrix| + matrices_titles << dim + matrices_list << matrix + end + if options[:no_print] + result_str += compare_matrixes(matrices_list, matrices_titles, k, options: options) + else + print compare_matrixes(matrices_list, matrices_titles, k, options: options) + end + end + result_str +end + +# Format Fields + +def format_fails_runs(fails, runs) + fails_width = (SUB_LONG_COLUMN_WIDTH * FAILS_PROPORTION).to_i + runs_width = SUB_LONG_COLUMN_WIDTH - fails_width - 1 + runs_str = get_suitable_number_str( + runs, + runs_width, + "%-#{runs_width}d" + ) + fails_str = get_suitable_number_str( + fails, + fails_width, + "%#{fails_width}d" + ) + fails_str + ':' + runs_str +end + +def get_reproduction_index_str(reproduction, compare_index) + reproduction_str = format('%+.1f%%', reproduction) + reproduction_index = compare_index - reproduction_str.index('.') + if reproduction_index.negative? + reproduction_str = format('%+.1e%%', reproduction).sub('e+0', 'e+').sub('e-0', 'e-') + reproduction_index = compare_index - reproduction_str.index('e') - 1 + end + return reproduction_index, reproduction_str +end + +def get_reprodcution(reproduction, compare_index) + if reproduction + reproduction_index, reproduction_str = get_reproduction_index_str(reproduction, compare_index) + space_str = ' ' * (SUB_SHORT_COLUMN_WIDTH - reproduction_str.length) + reproduction_str = space_str.insert(reproduction_index, reproduction_str) + else + reproduction_str = format("%-#{SUB_SHORT_COLUMN_WIDTH}s", '0') + end + reproduction_str +end + +def format_reproduction(reproduction, theme, compare_index) + color = get_compare_value_color(reproduction, theme) + colorize( + color, + get_reprodcution(reproduction, compare_index) + ) +end + +def get_change_index_str(change, compare_index) + change_str = format('%+.1f%%', change) + change_index = compare_index - change_str.index('.') + if change_index.negative? + change_str = format('%+.1e%%', change).sub('e+0', 'e+').sub('e-0', 'e-') + change_index = compare_index - change_str.index('e') - 1 + end + return change_index, change_str +end + +def get_change(change, compare_index) + if change + change_index, change_str = get_change_index_str(change, compare_index) + space_length = SUB_SHORT_COLUMN_WIDTH - change_str.length + space_str = ' ' * space_length + change_str = space_str.insert(change_index, change_str) + else + space_str = ' ' * (SUB_SHORT_COLUMN_WIDTH - 1) + change_str = space_str.insert(compare_index, '0') + end + format("%-#{SUB_SHORT_COLUMN_WIDTH}s", change_str) +end + +def format_change(change, theme, compare_index) + color = get_compare_value_color(change, theme) + colorize( + color, + get_change(change, compare_index) + ) +end + +def format_stddev_percent(stddev_percent, average_width) + percent_width = SUB_LONG_COLUMN_WIDTH - average_width + if stddev_percent + if stddev_percent != 0 + percent_str = get_suitable_number_str( + stddev_percent.abs, + percent_width - 4, + "%-#{percent_width - 4}d" + ) + space_index = percent_str.index(' ') || -1 + percent_str = percent_str.insert(space_index, '%') + return " ± #{percent_str}" + end + end + ' ' * percent_width +end + +def format_stddev(average, stddev_percent) + average_width = ( + SUB_LONG_COLUMN_WIDTH * STDDEV_AVERAGE_PROPORTION + ).to_i + average_str = get_suitable_number_str( + average.round(2), + average_width, + "%#{average_width}.2f" + ) + percent_str = format_stddev_percent(stddev_percent, average_width) + average_str + percent_str +end + +# Get Table Content + +def get_index(matrixes_number) + index_line = format("%#{SUB_LONG_COLUMN_WIDTH}d", 0) + (1...matrixes_number).each do |index| + index_line += INTERVAL_BLANK + format("%#{COLUMN_WIDTH}d", index) + end + index_line += INTERVAL_BLANK + index_line += format("%-#{COLUMN_WIDTH}s\n", FIELD_STR) + index_line +end + +def get_dim(dims) + index_line = format("%#{SUB_LONG_COLUMN_WIDTH}s", dims[0]) + (1...dims.size).each do |i| + index_line += INTERVAL_BLANK + format("%#{COLUMN_WIDTH}s", dims[i]) + end + index_line + INTERVAL_BLANK + format("%-#{COLUMN_WIDTH}s\n", FIELD_STR) +end + +def get_liner(matrixes_number) + liner = '-' * SUB_LONG_COLUMN_WIDTH + liner + (INTERVAL_BLANK + '-' * COLUMN_WIDTH) * matrixes_number + "\n" +end + +def get_base_matrix_title(common_title, common_index) + str = ' ' * (SUB_LONG_COLUMN_WIDTH - common_title.length) + str.insert(common_index, common_title) +end + +def get_other_matrix_title(common_title, compare_title, common_index) + column = ' ' * ( + COLUMN_WIDTH - common_title.length - compare_title.length + ) + compare_index = (SUB_SHORT_COLUMN_WIDTH - compare_title.length) / 2 + compare_index = 0 if compare_index.negative? + column = column.insert(compare_index, compare_title) + column.insert(SUB_SHORT_COLUMN_WIDTH + common_index, common_title) +end + +def get_other_matrixes_title(common_title, compare_title, matrixes_number, common_index) + column = INTERVAL_BLANK + get_other_matrix_title( + common_title, compare_title, common_index + ) + column * (matrixes_number - 1) +end + +def get_title_name(success) + if success + common_title = STDDEV_STR + compare_title = CHANGE_STR + else + common_title = FAILS_RUNS_STR + compare_title = REPRODUCTION_STR + end + return common_title, compare_title +end + +def get_title(common_title, compare_title, matrixes_number, success, common_index) + common_index -= if success + common_title.length / 2 + else + common_title.index(':') + end + title = get_base_matrix_title(common_title, common_index) + title += get_other_matrixes_title( + common_title, compare_title, matrixes_number, common_index + ) + title += INTERVAL_BLANK + ' ' * COLUMN_WIDTH + title + "\n" +end + +def get_base_matrix_title_symbol(common_index, success) + title_symbol = ' ' * SUB_LONG_COLUMN_WIDTH + title_symbol[common_index] = success ? '\\' : '|' + title_symbol +end + +def get_other_matrixes_title_symbol(_compare_title, matrixes_number, common_index, success) + title_symbol = ' ' * ( + (INTERVAL_WIDTH + COLUMN_WIDTH) * matrixes_number + ) + start_point = 0 + + common_symbol = success ? '\\' : '|' + compare_symbol = '|' + compare_index = SUB_SHORT_COLUMN_WIDTH / 2 + + (matrixes_number - 1).times do |_| + start_point += INTERVAL_WIDTH + title_symbol[start_point + compare_index] = compare_symbol + title_symbol[start_point + SUB_SHORT_COLUMN_WIDTH + common_index] = common_symbol + start_point += COLUMN_WIDTH + end + title_symbol +end + +def get_title_symbol(compare_title, matrixes_number, common_index, success) + title_symbol = get_base_matrix_title_symbol(common_index, success) + title_symbol += get_other_matrixes_title_symbol( + compare_title, matrixes_number, common_index, success + ) + title_symbol + "\n" +end + +def get_header(matrixes_titles, success, common_title, compare_title) + common_index = if success + # average + " + " + standard_deviation + STDDEV_AVERAGE_PROPORTION * SUB_LONG_COLUMN_WIDTH + 1 + else + # fails + ":" + runs + FAILS_PROPORTION * SUB_LONG_COLUMN_WIDTH + end + + header, matrixes_number = get_first_header(matrixes_titles) + header += get_liner(matrixes_number) + header += get_title(common_title, compare_title, matrixes_number, success, common_index) + header += get_title_symbol( + compare_title, + matrixes_number, + common_index, + success + ) + header +end + +def get_first_header(matrixes_titles) + if matrixes_titles.is_a?(Array) + matrixes_number = matrixes_titles.size + header = get_dim(matrixes_titles) + else + matrixes_number = matrixes_titles + header = get_index(matrixes_number) + end + [header, matrixes_number] +end + +def get_success_str(values, index, theme, compare_index) + change_str = format_change(values[:change], theme, compare_index) unless index.zero? + stddev_str = format_stddev( + values[:average], + values[:stddev_percent] + ) + (change_str || '') + stddev_str +end + +def get_failure_str(values, index, theme, compare_index) + unless index.zero? + reproduction_str = format_reproduction( + values[:reproduction], theme, compare_index + ) + end + + fails_runs_str = format_fails_runs( + values[:fails], + values[:runs] + ) + (reproduction_str || '') + fails_runs_str +end + +def get_values_str(matrixes, success, theme) + values_str = '' + compare_index = SUB_SHORT_COLUMN_WIDTH / 2 + matrixes.each do |index, values| + values_str += if success + get_success_str( + values, index, theme, compare_index + ) + INTERVAL_BLANK + else + get_failure_str( + values, index, theme, compare_index + ) + INTERVAL_BLANK + end + end + values_str +end + +def get_field_str(field) + format("%-#{COLUMN_WIDTH}s", field) +end + +# Print +def get_theme(matrixes_values, matrixes_title, theme) + theme = theme.to_sym if theme.is_a?(String) + if theme == :html + print_html_result(matrixes_values, matrixes_title, false) + print_html_result(matrixes_values, matrixes_title, true) + return + elsif theme == :json + return print_json_result(matrixes_values, matrixes_titles) + end + return THEMES[theme] if THEMES.key?(theme) + + warn "Theme #{theme} does not exist! use default theme." + return THEMES[:none] +end + +def get_all_result_str(matrixes_values, matrixes_titles, matrixes_number, theme) + matrixes_titles ||= matrixes_number.times.to_a.map(&:to_s) + theme = get_theme(matrixes_values, matrixes_titles, theme) + return '' unless theme + + failure_str = get_result_str(matrixes_values[false].sort, matrixes_titles, false, theme) + success_str = get_result_str(matrixes_values[true].sort, matrixes_titles, true, theme) + failure_str + success_str +end + +def get_result_str(values, matrixes_titles, success, theme) + return '' if values.empty? + + result_str = "\n\n\n" + common_title, compare_title = get_title_name(success) + result_str += get_header(matrixes_titles, success, common_title, compare_title) + values.each do |field, matrixes| + result_str += get_values_str(matrixes, success, theme) + result_str += get_field_str(field) + result_str += "\n" + end + result_str +end diff --git a/lib/constants.rb b/lib/constants.rb new file mode 100644 index 0000000000000000000000000000000000000000..3a4f0832e165f5d3bb6f0e394fbb35ecf529ca9e --- /dev/null +++ b/lib/constants.rb @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative '../container/defconfig.rb' + +config = cci_defaults +ES_HOST = config['ES_HOST'] || '172.17.0.1' +ES_PORT = config['ES_PORT'] || 9200 + +MAIL_HOST = config['MAIL_HOST'] || '172.17.0.1' +MAIL_PORT = config['MAIL_PORT'] || 11_311 diff --git a/lib/es_query.rb b/lib/es_query.rb new file mode 100644 index 0000000000000000000000000000000000000000..1b23c06b422ec806baa309cc2f413c6c9ff26590 --- /dev/null +++ b/lib/es_query.rb @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'elasticsearch' +require_relative 'constants.rb' + +# build multiple query request body +class ESQuery + HOST = (ENV.key?('ES_HOST') ? ENV['ES_HOST'] : ES_HOST) + PORT = (ENV.key?('ES_PORT') ? ENV['ES_PORT'] : ES_PORT).to_i + def initialize(host = HOST, port = PORT) + @client = Elasticsearch::Client.new url: "http://#{host}:#{port}" + raise 'Connect Elasticsearch error!' unless @client.ping + end + + # Example @items: { key1 => value1, key2 => [value2, value3, ..], ...} + # means to query: key1 == value1 && (key2 in [value2, value3, ..]) + def multi_field_query(items, size: 10_000) + query_fields = build_mutli_field_subquery_body items + query = { + query: { + bool: { + must: query_fields + } + }, size: size + } + @client.search index: 'jobs*', body: query + end + + def query_by_id(id) + @client.get_source({ index: 'jobs', type: '_doc', id: id }) + rescue Elasticsearch::Transport::Transport::Errors::NotFound + nil + end +end + +def build_mutli_field_subquery_body(items) + query_fields = [] + items.each do |key, value| + if value.is_a?(Array) + inner_query = build_multi_field_or_query_body(key, value) + query_fields.push({ bool: { should: inner_query } }) + else + query_fields.push({ term: { key => value } }) + end + end + query_fields +end + +def build_multi_field_or_query_body(field, value_list) + inner_query = [] + value_list.each do |inner_value| + inner_query.push({ term: { field => inner_value } }) + end + inner_query +end + +def parse_conditions(items) + items_hash = {} + items.each do |i| + key, value = i.split('=') + if key && value + value_list = value.split(',') + items_hash[key] = value_list.length > 1 ? value_list : value + else + warn "error: condition \"#{key}\" missing", "tips: should give the input like \"#{key}=value\" " + exit + end + end + items_hash +end diff --git a/lib/git.rb b/lib/git.rb new file mode 100644 index 0000000000000000000000000000000000000000..db97c47048d315c59b398137fd7d805689457986 --- /dev/null +++ b/lib/git.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +# wrap common git commands +class GitCommit + def initialize(repo, commit) + @git_prefix = "git -C /srv/git/#{repo}.git" + @commit = commit + end + + def author_name + `#{@git_prefix} log -n1 --pretty=format:'%an' #{@commit}`.chomp + end + + def author_email + `#{@git_prefix} log -n1 --pretty=format:'%ae' #{@commit}`.chomp + end + + def subject + `#{@git_prefix} log -n1 --pretty=format:'%s' #{@commit}`.chomp + end + + def commit_time + `#{@git_prefix} log -n1 --pretty=format:'%ci' #{@commit}`.chomp + end + + def url + `#{@git_prefix} remote -v`.split[1] + end + + def diff + `#{@git_prefix} diff -1 #{@commit}~..#{@commit}`.chomp + end +end diff --git a/lib/git_mirror.rb b/lib/git_mirror.rb new file mode 100644 index 0000000000000000000000000000000000000000..ad278803525ec31c03a73c0cf0db71561680e82f --- /dev/null +++ b/lib/git_mirror.rb @@ -0,0 +1,222 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'yaml' +require 'fileutils' +require 'bunny' +require 'json' +# gem install PriorityQueue +require 'priority_queue' +require 'English' + +# worker threads +class GitMirror + ERR_MESSAGE = <<~MESSAGE + fatal: not a git repository (or any parent up to mount point /srv) + Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). + MESSAGE + ERR_CODE = 128 + + def initialize(queue, feedback_queue) + @queue = queue + @feedback_queue = feedback_queue + @feedback_info = {} + end + + def feedback(git_repo, possible_new_refs) + @feedback_info = { git_repo: git_repo, possible_new_refs: possible_new_refs } + @feedback_queue.push(@feedback_info) + end + + def git_clone(url, mirror_dir) + ret = false + url = Array(url)[0] + if url.include?('gitee.com/') && File.exist?("/srv/git/#{url.delete_prefix('https://')}") + url = "/srv/git/#{url.delete_prefix('https://')}" + end + 10.times do + ret = system("git clone --mirror #{url} #{mirror_dir}") + break if ret + end + FileUtils.rm_r(mirror_dir) unless ret + return ret + end + + def git_fetch(mirror_dir) + fetch_info = %x(git -C #{mirror_dir} fetch 2>&1) + # Check whether mirror_dir is a good git repository by 3 conditions. If not, delete it. + if ($CHILD_STATUS.exitstatus == ERR_CODE) && fetch_info.include?(ERR_MESSAGE) && Dir.empty?(mirror_dir) + FileUtils.rmdir(mirror_dir) + end + return fetch_info.include? '->' + end + + def mirror_sync + fork_info = @queue.pop + mirror_dir = "/srv/git/#{fork_info['forkdir']}.git" + possible_new_refs = false + if File.directory?(mirror_dir) + possible_new_refs = git_fetch(mirror_dir) + else + FileUtils.mkdir_p(mirror_dir) + possible_new_refs = git_clone(fork_info['url'], mirror_dir) + end + feedback(fork_info['forkdir'], possible_new_refs) + end + + def git_mirror + loop do + mirror_sync + end + end +end + +# main thread +class MirrorMain + REPO_DIR = ENV['REPO_SRC'] + + def initialize + @feedback_queue = Queue.new + @fork_stat = {} + @priority = 0 + @priority_queue = PriorityQueue.new + @git_info = {} + @defaults = {} + @git_queue = Queue.new + load_fork_info + connection = Bunny.new('amqp://172.17.0.1:5672') + connection.start + channel = connection.create_channel + @message_queue = channel.queue('new_refs') + end + + def fork_stat_init(stat_key) + @fork_stat[stat_key] = { + queued: false, + priority: 0, + fetch_time: nil, + new_refs_time: nil + } + end + + def load_defaults(repodir) + defaults_file = "#{repodir}/DEFAULTS" + return unless File.exist?(defaults_file) + + project = repodir.delete_prefix("#{REPO_DIR}/") + @defaults[project] = YAML.safe_load(File.open(defaults_file)) + end + + def load_repo_file(repodir, project, fork_name) + @git_info["#{project}/#{fork_name}"] = YAML.safe_load(File.open(repodir)) + @git_info["#{project}/#{fork_name}"]['forkdir'] = "#{project}/#{fork_name}" + @git_info["#{project}/#{fork_name}"].merge!(@defaults[project]) if @defaults[project] + fork_stat_init("#{project}/#{fork_name}") + @priority_queue.push "#{project}/#{fork_name}", @priority + @priority += 1 + end + + def traverse_repodir(repodir) + if File.directory? repodir + load_defaults(repodir) + entry_list = Dir.entries(repodir) - Array['.', '..', 'DEFAULTS', '.ignore', '.git'] + entry_list = Array['linus'] if File.basename(repodir) == 'linux' + entry_list.each do |entry| + traverse_repodir("#{repodir}/#{entry}") + end + else + project = File.dirname(repodir).delete_prefix("#{REPO_DIR}/") + fork_name = File.basename(repodir) + load_repo_file(repodir, project, fork_name) + end + end + + def load_fork_info + traverse_repodir(REPO_DIR) + end + + def create_workers + 10.times do + Thread.new do + git_mirror = GitMirror.new(@git_queue, @feedback_queue) + git_mirror.git_mirror + end + sleep(0.1) + end + end + + def send_message(feedback_info) + message = feedback_info.to_json + @message_queue.publish(message) + end + + def handle_feedback + return if @feedback_queue.empty? + + feedback_info = @feedback_queue.pop(true) + @fork_stat[feedback_info[:git_repo]][:queued] = false + return unless feedback_info[:possible_new_refs] + + new_refs = check_new_refs(feedback_info[:git_repo]) + return if new_refs[:heads].empty? + + feedback_info[:new_refs] = new_refs + feedback_info.merge!(@git_info[feedback_info[:git_repo]]) + feedback_info.delete(:cur_refs) + send_message(feedback_info) + end + + def push_git_queue + return if @git_queue.size >= 1 + + fork_key = @priority_queue.delete_min_return_key + unless @fork_stat[fork_key][:queued] + @fork_stat[fork_key][:queued] = true + @git_info[fork_key][:cur_refs] = get_cur_refs(fork_key) if @git_info[fork_key][:cur_refs].nil? + @git_queue.push(@git_info[fork_key]) + end + @priority_queue.push fork_key, @priority + @priority += 1 + end + + def main_loop + loop do + push_git_queue + handle_feedback + end + end +end + +# main thread +class MirrorMain + def compare_refs(cur_refs, old_refs) + new_refs = { heads: {} } + cur_refs[:heads].each do |ref, commit_id| + if old_refs[:heads][ref] != commit_id + new_refs[:heads][ref] = commit_id + end + end + return new_refs + end + + def get_cur_refs(git_repo) + mirror_dir = "/srv/git/#{git_repo}.git" + show_ref_out = %x(git -C #{mirror_dir} show-ref --heads) + cur_refs = { heads: {} } + show_ref_out.each_line do |line| + next if line.start_with? '#' + + strings = line.split + cur_refs[:heads][strings[1]] = strings.first + end + return cur_refs + end + + def check_new_refs(git_repo) + cur_refs = get_cur_refs(git_repo) + new_refs = compare_refs(cur_refs, @git_info[git_repo][:cur_refs]) + @git_info[git_repo][:cur_refs] = cur_refs + return new_refs + end +end diff --git a/lib/mail_bisect_result.rb b/lib/mail_bisect_result.rb new file mode 100644 index 0000000000000000000000000000000000000000..309155b4f3b17d251d22c0ab8b113a77704de674 --- /dev/null +++ b/lib/mail_bisect_result.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require_relative 'mail_client' +require_relative 'git' +require 'json' + +# compose and send email for bisect result +class MailBisectResult + def initialize(bisect_info) + @error_id = bisect_info['error_id'] + @repo = bisect_info['repo'] + @commit_id = bisect_info['commit'] + @git_commit = GitCommit.new(@repo, @commit_id) + end + + def create_send_email + compose_mail + send_mail + end + + def compose_mail + subject = "[Compass-CI] #{@repo}.git: bisect result" + body = <<~BODY + Hi #{@git_commit.author_name}, + + Bisect completed for + + url: #{@git_commit.url} + + This is a bisect email from compass-ci. We met some problems when test with new commits. + Would you help to check what happend? + After submitting a job we noticed an error response due to the commit: + + commit: #{@commit_id[0..11]} ("#{@git_commit.subject}") + error_id: #{@error_id} + + https://gitee.com/openeuler/compass-ci + BODY + @hash = { 'to' => @git_commit.author_email, 'body' => body, 'subject' => subject } + end + + def send_mail + json = @hash.to_json + MailClient.new.send_mail(json) + end +end diff --git a/lib/mail_client.rb b/lib/mail_client.rb new file mode 100644 index 0000000000000000000000000000000000000000..1d0938a60d23474255e96c408e3b71aa278c0d4e --- /dev/null +++ b/lib/mail_client.rb @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'rest-client' +require_relative 'constants.rb' + +# mail client class +class MailClient + HOST = (ENV.key?('MAIL_HOST') ? ENV['MAIL_HOST'] : MAIL_HOST) + PORT = (ENV.key?('MAIL_PORT') ? ENV['MAIL_PORT'] : MAIL_PORT).to_i + def initialize(host = HOST, port = PORT) + @host = host + @port = port + end + + def send_mail(mail_json) + resource = RestClient::Resource.new("http://#{@host}:#{@port}/send_mail_yaml") + resource.post(mail_json) + end +end diff --git a/lib/mail_job_result.rb b/lib/mail_job_result.rb new file mode 100644 index 0000000000000000000000000000000000000000..9202b8101baa876bc8c3ea1fa622fe93a9831b22 --- /dev/null +++ b/lib/mail_job_result.rb @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative 'mail_client.rb' +require_relative 'es_query.rb' +require_relative 'constants.rb' +require 'json' + +# compose and send email for job result +class MailJobResult + def initialize(job_id) + @job_id = job_id + end + + def send_mail + json = compose_mail.to_json + MailClient.new.send_mail(json) + end + + def compose_mail + set_submitter_info + subject = "[Compass-ci] job: #{@job_id} result" + signature = "Regards\nCompass-ci\nhttps://gitee.com/openeuler/compass-ci" + body = "Hi, + Thanks for your participation in Kunpeng and software ecosystem! + Your Job: #{@job_id} had finished. + Please check job result: \n\n#{signature}" + { 'to' => @submitter_email, 'body' => body, 'subject' => subject } + end + + def set_submitter_info + job = query_job + exit unless job['email'] + + @submitter_email = job['email'] + end + + def query_job + es = ESQuery.new + query_result = es.multi_field_query({ 'id' => @job_id }) + query_result['hits']['hits'][0]['_source'] + end +end diff --git a/lib/matrix2.rb b/lib/matrix2.rb new file mode 100644 index 0000000000000000000000000000000000000000..05f96dc2d7f4485f245e315c41ce147bb928b442 --- /dev/null +++ b/lib/matrix2.rb @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# frozen_string_literal: true + +LKP_SRC = ENV['LKP_SRC'] || '/c/lkp-tests' + +require "#{LKP_SRC}/lib/stats" +require "#{LKP_SRC}/lib/yaml" +require "#{LKP_SRC}/lib/matrix" +require_relative './params_group.rb' + +def set_pre_value(item, value, sample_size) + if value.size == 1 + value[0] + elsif independent_counter? item + value.sum + elsif event_counter? item + value[-1] - value[0] + else + value.sum / sample_size + end +end + +def extract_pre_result(stats, monitor, file) + monitor_stats = load_json file # yaml.load_json + sample_size = max_cols(monitor_stats) + + monitor_stats.each do |k, v| + next if k == "#{monitor}.time" + + stats[k] = set_pre_value(k, v, sample_size) + stats[k + '.max'] = v.max if should_add_max_latency k + end +end + +def file_check(file) + case file + when /\.json$/ + File.basename(file, '.json') + when /\.json\.gz$/ + File.basename(file, '.json.gz') + end +end + +def create_stats(result_root) + stats = {} + + monitor_files = Dir["#{result_root}/*.{json,json.gz}"] + + monitor_files.each do |file| + next unless File.size?(file) + + monitor = file_check(file) + next if monitor == 'stats' # stats.json already created? + + extract_pre_result(stats, monitor, file) + end + + save_json(stats, result_root + '/stats.json') # yaml.save_json + # stats +end + +def samples_fill_missing_zeros(value, size) + samples = value || [0] * size + samples << 0 while samples.size < size + samples +end + +# input: job_list +# return: matrix of Hash(String, Array(Number)) +# Eg: matrix: { +# test_params_1 => [value_1, value_2, ...], +# test_params_2 => [value_1, value_2, ...], +# test_params_3 => [value_1, 0, ...] +# ... +# } +def create_matrix(job_list) + matrix = {} + job_list.each do |job| + stats = job['stats'] + next unless stats + + stats.each do |key, value| + matrix[key] = [] unless matrix[key] + matrix[key] << value + end + end + col_size = job_list.size + matrix.each_value do |value| + samples_fill_missing_zeros(value, col_size) + end + matrix +end + +# input: query results from es_query +# return: matrix +def combine_query_data(query_data) + job_list = query_data['hits']['hits'] + job_list.map! { |job| job['_source'] } + create_matrix(job_list) +end + +# input: query results from es_query +# return: group_matrix of Hash(String, Hash(String, matrix)) +# Eg: group_matrix: { +# group1_key => { dimension_1 => matrix +# dimension_2 => matrix +# ... +# group2_key => {...} +# ... +# } +def combine_group_query_data(query_data, dims) + job_list = query_data['hits']['hits'] + groups = auto_group(job_list, dims) + groups.each do |group_key, value| + value.each do |dimension_key, jobs| + groups[group_key][dimension_key] = create_matrix(jobs) + end + groups.delete(group_key) if value.size < 2 + end + groups +end diff --git a/lib/params_group.rb b/lib/params_group.rb new file mode 100644 index 0000000000000000000000000000000000000000..0f07d267f0b8ee831e3f8dfb59ce5de2f62a3837 --- /dev/null +++ b/lib/params_group.rb @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +# Exammple: +# Input: jobs_list. The results of ES query. +# +# eg: [ jobs1, jobs2, ...... ] +# +# job: {suite => xxx, os => xxx, tbox_group => xxx, id => xxx, stats => xxx, ...} +# +# Dimension: The dimensions you want to compare, and the value will affect the test result +# +# eg: os +# +# output: grouped results. +# eg: +# +# { +# 'tbox_group=xxx/os_arch=xxx/os_version=23/pp.a.b=xxx' => { +# 'centos' => [job1, job2, ... ], +# 'debian' => [job3, job4, job5, ...] +# }, +# .... +# } +# + +COMMON_PARAMS = %w[tbox_group os os_arch os_version].freeze + +# ES search result auto grouping. +# @jobs_list: Array. job list. +# @dimensions: Array, compare key list. +def auto_group(jobs_list, dimensions) + return [] if dimensions.empty? + + jobs_list = extract_jobs_list(jobs_list) + groups = group(jobs_list, dimensions) + return remove_singleton(groups) +end + +def extract_jobs_list(jobs_list) + jobs_list.map do |job| + job['_source'] + end +end + +def group(jobs_list, dimensions) + groups = {} + jobs_list.each do |job| + group_params, dimension_key = get_group_dimension_params(job, dimensions) + group_key = get_group_key(group_params) + groups[group_key] ||= {} + groups[group_key][dimension_key] ||= [] + next unless job['stats'] + + groups[group_key][dimension_key] << job + end + filter_groups(groups) + groups +end + +def filter_groups(groups) + groups.each do |group_key, value| + value.each_key do |dim_key| + value.delete(dim_key) if value[dim_key].empty? + end + groups.delete(group_key) if groups[group_key].empty? + end +end + +def get_all_params(job) + all_params = {} + job.each_key do |param| + all_params[param] = job[param] if COMMON_PARAMS.include?(param) + next unless param == 'pp' + + pp_params = get_pp_params(job[param]) + pp_params.each do |k, v| + all_params[k] = v + end + end + all_params +end + +def get_pp_params(pp_params) + pp = {} + pp_params.each do |k, v| + next unless v.is_a?(Hash) + + v.each do |inner_key, inner_value| + pp[['pp', k, inner_key].join('.')] = inner_value + end + end + pp +end + +def get_group_dimension_params(job, dimensions) + all_group_params = get_all_params(job) + dimension_list = [] + dimensions.each do |dimension| + dimension_list << all_group_params.delete(dimension) if all_group_params.key?(dimension) + end + [all_group_params, dimension_list.join('|')] +end + +def get_group_key(group_params) + group_str = group_params.each.map do |k, v| + "#{k}=#{v}" + end.sort!.join('/') + return group_str +end + +def remove_singleton(groups) + groups.delete_if { |_k, v| v.length < 2 } +end diff --git a/lib/sched_client.rb b/lib/sched_client.rb new file mode 100644 index 0000000000000000000000000000000000000000..f1c095defa99d31c5d423eb47aa57ffc3f39e679 --- /dev/null +++ b/lib/sched_client.rb @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'rest-client' + +# sched client class +class SchedClient + HOST = (ENV.key?('SCHED_HOST') ? ENV['SCHED_HOST'] : '172.17.0.1') + PORT = (ENV.key?('SCHED_PORT') ? ENV['SCHED_PORT'] : 3000).to_i + def initialize(host = HOST, port = PORT) + @host = host + @port = port + end + + def submit_job(job_json) + resource = RestClient::Resource.new("http://#{@host}:#{@port}/submit_job") + resource.post(job_json) + end +end diff --git a/lib/taskqueue_client.rb b/lib/taskqueue_client.rb new file mode 100644 index 0000000000000000000000000000000000000000..91b72dcd88ec46fc626ac1d2b4415bfe9843c5b2 --- /dev/null +++ b/lib/taskqueue_client.rb @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'rest-client' + +# taskqueue client +class TaskQueueClient + HOST = (ENV.key?('TASKQUEUE_HOST') ? ENV['TASKQUEUE_HOST'] : '127.0.0.1') + PORT = (ENV.key?('TASKQUEUE_PORT') ? ENV['TASKQUEUE_PORT'] : 3060).to_i + def initialize(host = HOST, port = PORT) + @host = host + @port = port + end + + def consume_task(queue_path) + url = "http://#{@host}:#{@port}/consume?queue=#{queue_path}" + RestClient::Request.execute( + method: :put, + url: url + ) + end +end diff --git a/lib/themes.rb b/lib/themes.rb new file mode 100644 index 0000000000000000000000000000000000000000..7b79ee4a3239e3beb26256b21397dc2cfaced0ce --- /dev/null +++ b/lib/themes.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +COLUMN_WIDTH = 30 # print column width +INTERVAL_WIDTH = 2 # width of interval that between column +INTERVAL_BLANK = ' ' * INTERVAL_WIDTH + +# the sub_column that are children of column +# sub_short_column_width : sub_long_column_width = 1 : 3 +# the short sub_column +SUB_SHORT_COLUMN_WIDTH = (COLUMN_WIDTH / 3.0).to_i + +# the long sub_column +SUB_LONG_COLUMN_WIDTH = COLUMN_WIDTH - SUB_SHORT_COLUMN_WIDTH + +CHANGE_STR = 'change' +STDDEV_STR = '%stddev' +STDDEV_AVERAGE_PROPORTION = 5 / 8.0 +FIELD_STR = 'metric' +FAILS_RUNS_STR = 'fails:runs' +REPRODUCTION_STR = 'change' +FAILS_PROPORTION = 4 / 7.0 + +# when change or reproduction greater or equal to GOOD_STANDARD +# change show color. +# example: 100 mean 100% +GOOD_STANDARD = 15 + +# same as GOOD_STANDARD +BAD_STANDARD = -15 + +COLORS = { + default: 39, + black: 30, + red: 31, + green: 32, + yellow: 33, + blue: 34, + magenta: 35, + cyan: 36, + 'light gray': 37, + 'dark gray': 90, + 'light red': 91, + 'light yellow': 93, + 'light blue': 94, + 'light magenta': 95, + 'light cyan': 96, + white: 97 +}.freeze + +THEMES = { + none: {}, + classic: { + good_foreground: 'light yellow', + bad_foreground: 'light red' + }, + focus_good: { + good_foreground: 'light yellow' + }, + focus_bad: { + bad_foreground: 'light red' + }, + striking: { + good_foreground: 'black', + good_background: 'light yellow', + bad_foreground: 'black', + bad_background: 'light red' + }, + light: { + good_foreground: 'light blue', + good_background: 'white', + bad_foreground: 'light red', + bad_background: 'white' + } +}.freeze diff --git a/os/debian-packages.sh b/os/debian-packages.sh new file mode 100755 index 0000000000000000000000000000000000000000..c19f73cb3288fe70359298233dfec4334a2253fb --- /dev/null +++ b/os/debian-packages.sh @@ -0,0 +1,67 @@ +pkgs=( + blktrace + btrfs-progs + cifs-utils + cmake + cpio + curl + dbench + dd + dnsutils + docker.io + fio + fuse + gawk + hpcc + hwinfo + hwloc + iperf3 + ipmitool + iproute2 + kexec-tools + lftp + mawk + netperf + netpipe + netpipe-tcp + net-tools + nfs-common + nmap + numactl + openssh-server + pciutils + pigz + pixz + procps + psmisc + qemu + qemu-system + qemu-system-arm + qemu-system-common + qemu-system-data + qemu-system-gui + qemu-system-mips + qemu-system-misc + qemu-system-ppc + qemu-system-sparc + qemu-system-x86 + rsync + rt-tests + socat + ssh + strace + sudo + sysbench + sysstat + tcpdump + time + unixbench + unzip + virt-what + vmstat + wget + xfsprogs + zsh +) + +apt-get install "${pkgs[@]}" diff --git a/providers/docker/bin/entrypoint.sh b/providers/docker/bin/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..63cb605b69d24c8e2b4f3dac6fb1fe1be1b000ff --- /dev/null +++ b/providers/docker/bin/entrypoint.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +sh /lkp/lkp/src/rootfs/addon/etc/init.d/lkp-bootstrap diff --git a/providers/docker/docker.rb b/providers/docker/docker.rb new file mode 100755 index 0000000000000000000000000000000000000000..c75fc2c42db5a32b130382c69fc76cbe12a82ae9 --- /dev/null +++ b/providers/docker/docker.rb @@ -0,0 +1,110 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'open-uri' +require 'json' +require 'set' +require 'fileutils' +require_relative '../../container/defconfig' + +BASE_DIR = '/srv/dc' + +def get_url(hostname) + names = Set.new %w[ + SCHED_HOST + SCHED_PORT + ] + defaults = relevant_defaults(names) + host = defaults['SCHED_HOST'] || '172.17.0.1' + port = defaults['SCHED_PORT'] || 3000 + "http://#{host}:#{port}/boot.container/hostname/#{hostname}" +end + +def parse_response(url) + response = nil + URI.open(url) do |http| + response = http.read + end + hash = response.is_a?(String) ? JSON.parse(response) : nil + if hash.nil? || !hash.key?('job') + puts '..........' + puts 'no job now' + puts '..........' + return nil + end + return hash +end + +def wget_cmd(path, url, name) + system "wget -q -P #{path} #{url} && gzip -dc #{path}/#{name} | cpio -id -D #{path}" +end + +def build_load_path(hostname) + return BASE_DIR + '/' + hostname +end + +def clean_dir(path) + Dir.foreach(path) do |file| + if file != '.' && file != '..' + filename = File.join(path, file) + if File.directory?(filename) + FileUtils.rm_r(filename) + else + File.delete(filename) + end + end + end +end + +def load_initrds(load_path, hash) + clean_dir(load_path) if Dir.exist?(load_path) + arch = RUBY_PLATFORM.split('-')[0] + job_url = hash['job'] + lkp_url = hash['lkp'] + wget_cmd(load_path, job_url, 'job.cgz') + wget_cmd(load_path, lkp_url, "lkp-#{arch}.cgz") +end + +def run(hostname, load_path, hash) + docker_image = hash['docker_image'] + system "docker pull #{docker_image}" + system( + { 'hostname' => hostname, 'docker_image' => docker_image, 'load_path' => load_path }, + ENV['CCI_SRC'] + '/providers/docker/run.sh' + ) + clean_dir(load_path) +end + +def main(hostname) + url = get_url hostname + puts url + hash = parse_response url + return if hash.nil? + + load_path = build_load_path(hostname) + load_initrds(load_path, hash) + run(hostname, load_path, hash) +end + +def save_pid(pids) + FileUtils.cd("#{ENV['CCI_SRC']}/providers") + f = File.new('dc.pid', 'a') + f.puts pids + f.close +end + +def multi_docker(hostname, nr_container) + pids = [] + nr_container.to_i.times do |i| + pid = Process.fork do + loop do + main("#{hostname}-#{i}") + sleep 5 + end + end + pids << pid + end + return pids +end diff --git a/providers/docker/run.sh b/providers/docker/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..ddbacd61319a934930c50fa08af04d7a7e4178e5 --- /dev/null +++ b/providers/docker/run.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +. $LKP_SRC/lib/yaml.sh + +: ${docker_image:="centos:7"} +: ${load_path:="${HOME}/jobs"} +: ${hostname:="dc-1g-1"} + +if [[ $hostname =~ ^(.*)-[0-9]+$ ]]; then + tbox_group=${BASH_REMATCH[1]} +else + tbox_group=$hostname +fi +host=${tbox_group%%--*} + +create_yaml_variables "$LKP_SRC/hosts/${host}" + +DIR=$(dirname $(realpath $0)) +cmd=( + docker run + --rm + -m $memory + --mount type=tmpfs,destination=/tmp + -v ${load_path}/lkp:/lkp + -v ${DIR}/bin:/root/bin:ro + --oom-score-adj="-1000" + ${docker_image} + /root/bin/entrypoint.sh +) + +"${cmd[@]}" diff --git a/providers/multi-docker b/providers/multi-docker new file mode 100755 index 0000000000000000000000000000000000000000..64cafaa93165b3cc63a9598eef73199622cbf63c --- /dev/null +++ b/providers/multi-docker @@ -0,0 +1,40 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'optparse' +require_relative './docker/docker' + +opt = {} + +options = OptionParser.new do |opts| + opts.banner = 'Usage: multi-qemu [-t] [-n]' + + opts.separator '' + opts.on('-n HOSTNAME_PREFIX', '--name HOSTNAME_PREFIX', 'specify used hostname_prefix') do |name| + opt['hostname_prefix'] = name + end + + opts.on('-c count', '--count count', 'how much container do you need') do |num| + opt['nr_container'] = num + end + + opts.on_tail('-h', '--help', 'show this message') do + puts opts + exit + end +end + +if ARGV.size.zero? + puts options + exit +end + +options.parse!(ARGV) + +hostname = opt['hostname_prefix'] || 'dc-1g' +nr_container = opt['nr_container'] || 1 + +pids = multi_docker(hostname, nr_container) +save_pid pids diff --git a/providers/multi-qemu b/providers/multi-qemu new file mode 100755 index 0000000000000000000000000000000000000000..866ca8d2d7bf45046e7cac03d288a6f0356865eb --- /dev/null +++ b/providers/multi-qemu @@ -0,0 +1,68 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'fileutils' + +PWD = File.dirname(File.realpath($PROGRAM_NAME)) + +# Run multiple QEMU in parallel +HOSTNAME = ARGV[0] || "vm-hi1620-2p8g--#{ENV['USER']}" +NR_VM = ARGV[1] || 1 + +def run(seqno) + loop do + start_time = Time.new + hostname = "#{HOSTNAME}-#{seqno}" + log_file = "/srv/cci/serial/logs/#{hostname}" + + File.open(log_file, 'w') do |f| + # fluentd refresh time is 1s + # let fluentd to monitor this file first + sleep(2) + f.puts "\n#{start_time.strftime('%Y-%m-%d %H:%M:%S')} starting QEMU" + end + + pwd_hostname = File.join(PWD, hostname) + FileUtils.mkdir_p(pwd_hostname) unless File.exist?(pwd_hostname) + FileUtils.cd(pwd_hostname) + system( + { 'hostname' => hostname }, + ENV['CCI_SRC'] + '/providers/qemu.sh' + ) + + duration = ((Time.new - start_time) / 60).round(2) + File.open(log_file, 'a') do |f| + f.puts "\nTotal QEMU duration: #{duration} minutes" + end + + # sleep 5s is for fluentd to collect it's log + sleep(5) + end +end + +def save_pid(arr) + FileUtils.rm('pid') if File.exist?('pid') + f = File.new('pid', 'a') + arr.each do |i| + f.puts(i) + end + f.close +end + +def multiqemu + pids = [] + NR_VM.to_i.times do |i| + pid = Process.fork do + run i + end + pids << pid + end + return pids +end + +if $PROGRAM_NAME == __FILE__ + pids = multiqemu + save_pid pids +end diff --git a/providers/my-qemu.sh b/providers/my-qemu.sh new file mode 100755 index 0000000000000000000000000000000000000000..2f20f67dcc242fef85d717a76f9b4e2919e61c49 --- /dev/null +++ b/providers/my-qemu.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +[[ $tbox_group ]] || +tbox_group=vm-hi1620-2p8g +export hostname=$tbox_group--$USER-$$ + +$CCI_SRC/providers/qemu.sh diff --git a/providers/qemu.sh b/providers/qemu.sh new file mode 100755 index 0000000000000000000000000000000000000000..786a7aa0c898f26155db562dbd38ad26337466c5 --- /dev/null +++ b/providers/qemu.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# - hostname + +. $LKP_SRC/lib/yaml.sh +. $CCI_SRC/container/defconfig.sh + +load_cci_defaults + +: ${hostname:="vm-hi1620-1p1g-1"} +# unicast prefix: x2, x6, xA, xE +export mac=$(echo $hostname | md5sum | sed 's/^\(..\)\(..\)\(..\)\(..\)\(..\).*$/0a-\1-\2-\3-\4-\5/') +echo hostname: $hostname +echo mac: $mac +echo $mac > mac +echo "arp -n | grep ${mac//-/:}" > ip.sh +chmod +x ip.sh + +curl -X PUT "http://${SCHED_HOST:-172.17.0.1}:${SCHED_PORT:-3000}/set_host_mac?hostname=${hostname}&mac=${mac}" + +del_host_mac() +{ + curl -X PUT "http://${SCHED_HOST:-172.17.0.1}:${SCHED_PORT:-3000}/del_host_mac?mac=${mac}" > /dev/null 2>&1 +} + +trap del_host_mac EXIT + +( + if [[ $hostname =~ ^(.*)-[0-9]+$ ]]; then + tbox_group=${BASH_REMATCH[1]} + else + tbox_group=$hostname + fi + + host=${tbox_group%%--*} + + create_yaml_variables "$LKP_SRC/hosts/${host}" + + source "$CCI_SRC/providers/$provider/${template}.sh" +) diff --git a/providers/qemu/kvm.sh b/providers/qemu/kvm.sh new file mode 100755 index 0000000000000000000000000000000000000000..64f4247c1bde2914157b1cb943d8194f22908e8d --- /dev/null +++ b/providers/qemu/kvm.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# - nr_cpu +# - memory + +: ${nr_cpu:=1} +: ${memory:=1G} + +log_file=/srv/cci/serial/logs/${hostname} +if [ ! -f "$log_file" ]; then + touch $log_file + # fluentd refresh time is 1s + # let fluentd to monitor this file first + sleep 2 +fi + +qemu=qemu-system-aarch64 +command -v $qemu >/dev/null || qemu=qemu-kvm + +echo $SCHED_PORT +ipxe_script=ipxe_script +curl http://${SCHED_HOST:-172.17.0.1}:${SCHED_PORT:-3000}/boot.ipxe/mac/${mac} > $ipxe_script +cat $ipxe_script >> ${log_file} +#echo ----- +#cat $ipxe_script +#echo ----- +#exit + +append= +initrds= +while read a b c +do + case "$a" in + '#') + ;; + initrd) + file=$(basename "$b") + rm -f $file + wget -a ${log_file} --progress=bar:force $b + initrds+="$file " + ;; + kernel) + kernel=$(basename "$b") + #[[ -f $kernel ]] || + rm -f $kernel + wget -a ${log_file} --progress=bar:force $b + append=$(echo "$c" | sed -r "s/ initrd=[^ ]+//g") + ;; + *) + ;; + esac +#done < /tftpboot/boot.ipxe-debian +#done < /tftpboot/boot.ipxe-centos +done < $ipxe_script + +[ -n "$initrds" ] || { + exit +} + +initrd=initrd +cat $initrds > $initrd + +echo kernel: $kernel +echo initrds: $initrds +echo append: $append +echo less $log_file + +sleep 5 + +kvm=( + $qemu + -machine virt-4.0,accel=kvm,gic-version=3 + -kernel $kernel + -initrd $initrd + -smp $nr_cpu + -m $memory + -cpu Kunpeng-920 + -device virtio-gpu-pci + -bios /usr/share/qemu-efi-aarch64/QEMU_EFI.fd + -nic tap,model=virtio-net-pci,helper=/usr/libexec/qemu-bridge-helper,br=br0,mac=${mac} + -k en-us + -no-reboot + -nographic + -serial file:${log_file} + -monitor null +) + +"${kvm[@]}" --append "${append}" diff --git a/providers/qemu/pxe.sh b/providers/qemu/pxe.sh new file mode 100755 index 0000000000000000000000000000000000000000..8cf198daa63993d6aa82a7c85758f66af3fa91bd --- /dev/null +++ b/providers/qemu/pxe.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# - nr_cpu +# - memory + +: ${nr_cpu:=1} +: ${memory:=1G} + +serial_log=/srv/cci/serial/logs/${hostname} +if [ ! -f "$serial_log" ]; then + touch $serial_log + # fluentd refresh time is 1s + # let fluentd to monitor this file first + sleep 2 +fi + +qemu=qemu-system-aarch64 +command -v $qemu >/dev/null || qemu=qemu-kvm + +echo less $serial_log + +kvm=( + $qemu + -machine virt-4.0,accel=kvm,gic-version=3 + -smp $nr_cpu + -m $memory + -cpu Kunpeng-920 + -device virtio-gpu-pci + -bios /usr/share/qemu-efi-aarch64/QEMU_EFI.fd + -nic tap,model=virtio-net-pci,helper=/usr/libexec/qemu-bridge-helper,br=br0,mac=${mac} + -k en-us + -no-reboot + -nographic + -serial file:${serial_log} + -monitor null +) +"${kvm[@]}" diff --git a/rootfs/tools/build-deps-pkg.sh b/rootfs/tools/build-deps-pkg.sh new file mode 100755 index 0000000000000000000000000000000000000000..5b43860d1b138c923f69e2733282054a0bcef898 --- /dev/null +++ b/rootfs/tools/build-deps-pkg.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# Exclude 'dev' or some files that do not need to perform cci-depends. +suffix_detect() +{ + [ ${1##*.} != $1 ] || [ ${1##*-} == 'dev' ] +} + +submit_job() +{ + command submit "$CCI_SRC/rootfs/build-deps-pkg.yaml" +} + +deps_generate_yaml() +{ + export suite='cci-depends' + + for benchmark + do + suffix_detect "$benchmark" && continue + [ -f "$LKP_SRC/distro/depends/$benchmark" ] || continue + + export benchmark + submit_job + done +} + +pkg_generate_yaml() +{ + export suite='cci-makepkg' + + for benchmark + do + [ -f "$LKP_SRC/pkg/$benchmark/PKGBUILD" ] || continue + + export benchmark + submit_job + done +} + +set_vars() +{ + local work_dir=$(pwd) + local os_path=${work_dir##*/rootfs/} + local os_array=($(echo "$os_path" | tr '/' ' ')) + + [[ "${work_dir}" == "${os_path}" ]] && { + echo "error: script execution path error" + echo "cd ${CCI_SRC}/rootfs/\$os_mount/\$os/\$os_arch/\$os_version; ./${0}" + exit 1 + } + + [[ "${#os_array[@]}" == 4 ]] || { + echo "error: expect 4 parameters, found ${#os_array[@]}" + exit 2 + } + + export os_mount="${os_array[0]}" + export os="${os_array[1]}" + export os_arch="${os_array[2]}" + export os_version="${os_array[3]}" +} + +main() +{ + set_vars + + if [ "$#" -gt 0 ]; then + [[ "$0" == 'build-depends' ]] && deps_generate_yaml "$@" + [[ "$0" == 'build-makepkg' ]] && pkg_generate_yaml "$@" + else + [[ "$0" == 'build-depends' ]] && deps_generate_yaml $(ls "$LKP_SRC"/distro/depends) + [[ "$0" == 'build-makepkg' ]] && pkg_generate_yaml $(ls "$LKP_SRC"/pkg) + fi +} + +main "$@" diff --git a/rootfs/tools/build-deps-pkg.yaml b/rootfs/tools/build-deps-pkg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90854ae49f21510197b1565fe8bc2392c7548215 --- /dev/null +++ b/rootfs/tools/build-deps-pkg.yaml @@ -0,0 +1,12 @@ +--- +suite: <%= ENV['suite'] %> +category: functional +os: <%= ENV['os'] %> +os_arch: <%= ENV['os_arch'] %> +os_mount: <%= ENV['os_mount'] %> +os_version: <%= ENV['os_version'] %> + +"<%= ENV['suite'] %>": + benchmark: <%= ENV['benchmark'] %> + +testbox: dc-16g diff --git a/sbin/ameba b/sbin/ameba new file mode 100755 index 0000000000000000000000000000000000000000..d1e76c570072f6f8d817ad73147f40212ee3a98c --- /dev/null +++ b/sbin/ameba @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +exec /c/ameba/bin/ameba -c $CCI_SRC/.ameba.yml "$@" + diff --git a/sbin/auto_submit b/sbin/auto_submit new file mode 100755 index 0000000000000000000000000000000000000000..808078194e247576e8e580ce257e500367c2b27d --- /dev/null +++ b/sbin/auto_submit @@ -0,0 +1,84 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'bunny' +require 'json' +require 'yaml' + +# receive message and auto submit job +class AutoSubmit + def initialize + connection = Bunny.new('amqp://172.17.0.1:5672') + connection.start + channel = connection.create_channel + @queue = channel.queue('new_refs') + @repo2job = YAML.load_file("#{$PROGRAM_NAME}.yaml") + end + + def get_pkgbuild_repo(repo_array) + pkgbuild_repo = nil + repo_array.each do |repo| + next unless repo =~ /-git$/ + + pkgbuild_repo = "archlinux/#{repo}" + break + end + return pkgbuild_repo + end + + def submit(newrefs_info, submit_argv) + newrefs_info['new_refs']['heads'].each do |branch, commit_id| + commit_date = `git -C /srv/git/#{newrefs_info['git_repo']}.git log --format=%ct -1 #{commit_id}` + submit_argv.push("upstream_branch=#{branch.delete_prefix('refs/heads/')}") + submit_argv.push("upstream_commit=#{commit_id}") + submit_argv.push("commit_date=#{commit_date}") + + system(submit_argv.join(' ')) + end + end + + def get_argvs(newrefs_info) + git_repo = newrefs_info['git_repo'] + url = Array(newrefs_info['url'])[0] + submit_argv = ["#{ENV['LKP_SRC']}/sbin/submit upstream_repo=#{git_repo} upstream_url=#{url}"] + + if newrefs_info['pkgbuild_repo'] + pkgbuild_repo = get_pkgbuild_repo(newrefs_info['pkgbuild_repo']) + return unless pkgbuild_repo + + submit_argv.push("pkgbuild_repo=#{pkgbuild_repo}") + repo2job_key = 'archlinux' + else + return unless @repo2job[git_repo] + + repo2job_key = git_repo + end + [submit_argv, repo2job_key] + end + + def submit_job(newrefs_info) + submit_argv, repo2job_key = get_argvs(newrefs_info) + return unless submit_argv + + @repo2job[repo2job_key].each do |argv_config| + argvs = Array.new(submit_argv) + argvs.push(argv_config) + submit(newrefs_info, argvs) + end + end + + def listen + @queue.subscribe(block: true) do |_delivery, _properties, message| + Thread.new do + message_info = JSON.parse(message) + submit_job(message_info) + end + sleep(0.1) + end + end +end + +auto_submitter = AutoSubmit.new +auto_submitter.listen diff --git a/sbin/auto_submit.yaml b/sbin/auto_submit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97f476dedb28eaa5337903c83798e5e1cc88c4f3 --- /dev/null +++ b/sbin/auto_submit.yaml @@ -0,0 +1,7 @@ +AvxToNeon/AvxToNeon: +- "testbox=vm-hi1620-2p8g os=centos os_arch=aarch64 os_version=7 api-avx2neon.yaml" +compass-ci/compass-ci: +- "testbox=vm-hi1620-2p8g os=openeuler os_arch=aarch64 os_version=20.03 INITRD_HTTP_HOST=$SCHED_HOST deploy-cci.yaml" +- "testbox=vm-hi1620-2p8g os=centos os_arch=aarch64 os_version=7 INITRD_HTTP_HOST=$SCHED_HOST deploy-cci.yaml" +archlinux: +- "testbox=vm-hi1620-2p8g os=openeuler os_arch=aarch64 os_version=20.03 build-pkg.yaml" diff --git a/sbin/compare b/sbin/compare new file mode 100755 index 0000000000000000000000000000000000000000..cf3980ccb89c66626d526f4620597f8b5776a8ef --- /dev/null +++ b/sbin/compare @@ -0,0 +1,67 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +# Usage: +# compare "conditions_1" "conditions_2" ... -c "common_conditions" +# compare "conditions" -d "dimensions" +# Eg: +# compare "id=6000,6001" "id=7000,7001" +# compare "commit=a12d232e" "commit=b3bacc31" +# compare "os=debian" "os=centos" -c "suite=iperf" +# compare "os=centos" -d "os_version os_arch" +# compare "os=centos" -d "os_version os_arch" --color "classic" + +require 'optparse' +require_relative '../lib/compare.rb' + +common_conditions = '' +is_group = false +dimensions = nil +colorful = nil +options = {} + +opt_parser = OptionParser.new do |opts| + opts.banner = 'Usage: compare "conditions" ... [option]' + + opts.separator '' + opts.separator 'a conditions can be "id=100, ..." or "suite=iperf os=debian ..."' + opts.separator '' + opts.separator 'options:' + + opts.on('-c', '--common common_conditions', 'common conditions are same with conditions', + 'and will merge with each conditions') do |c| + common_conditions = c + end + + opts.on('-d', '--dimension dimensions', 'dimensions to group compare: "tbox_group os_arch os ..."') do |d| + dimensions = d + is_group = true + end + + opts.on('--color color', 'turn on colorful display with theme: classic|focus_good|focus_bad', + '|striking|light|none') do |color| + colorful = color + end + + opts.on_tail('-h', '--help', 'show this message') do + puts opts + exit + end +end + +argv = if ARGV == [] + ['-h'] + else + ARGV + end +opt_parser.parse!(argv) + +options = { theme: colorful } if colorful + +if is_group + compare_group(argv, dimensions, options) +else + compare_matrices_list(argv, common_conditions, options) +end diff --git a/sbin/create-job-cpio.sh b/sbin/create-job-cpio.sh new file mode 100755 index 0000000000000000000000000000000000000000..e18bc0d52ac9ef30aec1eeff1f9e39c04934db39 --- /dev/null +++ b/sbin/create-job-cpio.sh @@ -0,0 +1,15 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# +# input files: $1/job.sh $1/job.yaml +# output file: $1/job.cgz + +cd "$1" || exit + +install -m775 -D -t lkp/scheduled job.sh +install -m664 -D -t lkp/scheduled job.yaml + +find lkp | cpio --quiet -o -H newc | gzip > job.cgz + +rm -fr ./lkp diff --git a/sbin/crystal-format b/sbin/crystal-format new file mode 100755 index 0000000000000000000000000000000000000000..14f82b232a55b573d440cd013441057a4522cc86 --- /dev/null +++ b/sbin/crystal-format @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +pj_dir=$(git rev-parse --show-toplevel 2>/dev/null) +if [[ $pj_dir ]]; then + v_dir=$pj_dir +else + v_dir=$PWD +fi + +cmd=( + docker run + -u $UID + --rm + -v $v_dir:$v_dir + -w $PWD + alpine:crystal-complier + crystal tool format "$@" +) + +"${cmd[@]}" diff --git a/sbin/docker2osimage b/sbin/docker2osimage new file mode 100755 index 0000000000000000000000000000000000000000..8bfc2eb718fc98e6fe0cf9b9d30c364b9909cf2d --- /dev/null +++ b/sbin/docker2osimage @@ -0,0 +1,61 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# This script requires root permission to run. +# Origin script: +# https://gitee.com/tinylab/linux-lab/blob/master/tools/root/docker/extract.sh + +image=$1 +arch="$(uname -m)" +rootdir="./$(echo "$image" | sed 's/[^a-zA-Z0-9_]/-/g')" +image_name="$(echo "$image" | cut -d '/' -f 2 | cut -d ':' -f 1)-$arch.cgz" + +if [ $UID -ne 0 ]; then + echo "Please run with root user" + exit 1 +fi + +if [ -z "$image" ]; then + echo "Usage: $0 image" + exit 2 +fi + +echo "LOG: Now install docker" + +if command -v docker >/dev/null; then + true +else + if command -v yum >/dev/null; then + yum -y repolist + yum -y install docker + fi + + if command -v apt-get >/dev/null; then + apt-get -y update + apt-get -y install docker.io + fi +fi + +echo "LOG: Pulling $image" +docker pull "$image" || exit 3 + +echo "LOG: Running $image" +id=$(docker run -d "$image") + +echo "LOG: Creating temporary rootdir: $rootdir" +mkdir -p "$rootdir" + +echo "LOG: Extract docker image to $rootdir" +docker cp -a "$id":/ "$rootdir"/ + +# echo "LOG: Removing docker container" +# docker rm -f "$id" + +# echo "LOG: Removing docker image" +# docker image rm -f "$image" + +echo "LOG: Package rootfs" +cd "$rootdir" || exit 4 +find . | cpio -o -Hnewc | gzip -9 > ../"$image_name" +echo "Package: $(realpath ../"$image_name")" diff --git a/sbin/es-account.sh b/sbin/es-account.sh new file mode 100755 index 0000000000000000000000000000000000000000..f7702c77a826f1ae759029e1ccb93e8b30138b83 --- /dev/null +++ b/sbin/es-account.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# check whether accounts index has created +status_code=$(curl -sIL -w "%{http_code}\\n" -o /dev/null http://localhost:9200/accounts) + +if [ "$status_code" -eq 200 ] +then + echo "accounts index has been created, exit." +else + echo "begin create index." + curl -H 'Content-Type: Application/json' -XPUT 'http://localhost:9200/accounts' -d '{ + "mappings": { + "_doc": { + "dynamic": false, + "properties": { + "uuid": { + "type": "keyword" + }, + "email": { + "type": "keyword" + } + } + } + } + }' +fi diff --git a/sbin/es-find b/sbin/es-find new file mode 100755 index 0000000000000000000000000000000000000000..2397338933725f48dcc78494b441c14a1c626c63 --- /dev/null +++ b/sbin/es-find @@ -0,0 +1,59 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +# Usage: +# es-find key=val ... +# if given multi keys, they create AND conditions +# if given val1,val2,... for a key, they create OR conditions +# Eg: query (testbox is xxx or yyy) and (suite is aaa) and (user is bbb). +# es-find testbox=xxx,yyy suite=aaa user=bbb + +require 'yaml' +require 'optparse' +require 'json' +require_relative '../lib/es_query.rb' +require_relative '../container/defconfig.rb' +require_relative '../lib/constants.rb' + +def parse_argv + items = {} + ARGV.each do |item| + key, value = item.split('=') + if key && value + value_list = value.split(',') + items[key] = value_list.length > 1 ? value_list : value + end + end + items +end + +options = {} +options[:nr_jobs] = 10 +opt_parser = OptionParser.new do |opts| + opts.banner = 'Usage: es-find [options] search_key1=val1[,val2..] ..' + + opts.separator 'search_key can be id, suite, os, etc.' + opts.separator '' + + opts.on('-n', '--nr-jobs NUM', Integer, 'max jobs, default 10.') do |n| + if n.to_i <= 0 + puts 'The value of the -n option must be a positive number.' + exit(-1) + end + options[:nr_jobs] = n.to_i + end + + opts.on_tail('-h', '--help', 'show this message') do + puts opts + exit + end +end + +opt_parser.parse!(ARGV) +items = parse_argv +es = ESQuery.new(ES_HOST, ES_PORT) +query_result = es.multi_field_query(items, size: options[:nr_jobs]) +json_string = JSON.pretty_generate(query_result) +puts json_string diff --git a/sbin/es-jobs-mapping.sh b/sbin/es-jobs-mapping.sh new file mode 100755 index 0000000000000000000000000000000000000000..9d420f7d9751fdf7d48a029b9fc12d981010d23a --- /dev/null +++ b/sbin/es-jobs-mapping.sh @@ -0,0 +1,148 @@ +#!/bin/sh +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# Determine whether curl is installed. If not, install curl. +if ! [ -x "$(command -v curl)" ] +then + echo "curl not exists, try to install." + if [ -x "$(command -v apk)" ] + then + apk add install -y curl + elif [ -x "$(command -v yum)" ] + then + yum install -y curl + elif [ -x "$(command -v apt-get)" ] + then + apt-get install -y curl + fi +else + echo "curl has install." +fi + +# Determine whether curl is installed successfully +if [ $? -ne 0 ] +then + echo "curl install failed, exit." + exit +fi + +# Determine whether jobs index has created +status_code=$(curl -sIL -w "%{http_code}\n" -o /dev/null http://localhost:9200/jobs) + +if [ $status_code -eq 200 ] +then + echo "jobs index has create, exit." +else + echo "jobs index not exists, begin create index." + curl -H 'Content-Type: Application/json' -XPUT 'http://localhost:9200/jobs' -d '{ + "mappings": { + "_doc": { + "dynamic_templates": [ + { + "pp": { + "path_match": "pp.*.*", + "mapping": { + "type": "keyword", + "enabled": true + } + } + }, + { + "default": { + "match": "*", + "unmatch": "pp", + "path_unmatch": "pp.*", + "mapping": { + "type": "object", + "enabled": false + } + } + } + ], + "properties": { + "suite": { + "type": "keyword" + }, + "category": { + "type": "keyword" + }, + "hw.nr_threads": { + "type": "keyword" + }, + "queue": { + "type": "keyword" + }, + "testbox": { + "type": "keyword" + }, + "tbox_group": { + "type": "keyword" + }, + "submit_id": { + "type": "keyword" + }, + "id": { + "type": "keyword" + }, + "hw.arch": { + "type": "keyword" + }, + "hw.model": { + "type": "keyword" + }, + "hw.nr_node": { + "type": "integer" + }, + "hw.nr_cpu": { + "type": "integer" + }, + "hw.memory": { + "type": "keyword" + }, + "os": { + "type": "keyword" + }, + "os_arch": { + "type": "keyword" + }, + "os_version": { + "type": "keyword" + }, + "upstream_repo": { + "type": "keyword" + }, + "upstream_commit": { + "type": "keyword" + }, + "enqueue_time": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss" + }, + "dequeue_time": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss" + }, + "user": { + "type": "keyword" + }, + "job_state": { + "type": "keyword" + }, + "start_time": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss" + }, + "end_time": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss" + } + } + } + } + }' + if [ $? -ne 0 ] + then + echo "create jobs index failed." + fi +fi diff --git a/sbin/es-regression-mapping.sh b/sbin/es-regression-mapping.sh new file mode 100755 index 0000000000000000000000000000000000000000..7ef19cd598e2ece323cbc46c2116b7f6106facbb --- /dev/null +++ b/sbin/es-regression-mapping.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +# check whether regression index has created +status_code=$(curl -sIL -w "%{http_code}\\n" -o /dev/null http://localhost:9200/regression) + +if [ "$status_code" -eq 200 ] +then + echo "regression index has been created, exit." +else + echo "begin create index." + curl -H 'Content-Type: Application/json' -XPUT 'http://localhost:9200/regression' -d '{ + "mappings": { + "_doc": { + "dynamic": false, + "properties": { + "error_id": { + "type": "keyword" + }, + "job_id": { + "type": "keyword" + } + } + } + } + }' +fi diff --git a/sbin/git-mirror.rb b/sbin/git-mirror.rb new file mode 100755 index 0000000000000000000000000000000000000000..57eecb1a76605dfdc47842bbafe46efedfc69d71 --- /dev/null +++ b/sbin/git-mirror.rb @@ -0,0 +1,11 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require "#{ENV['CCI_SRC']}/lib/git_mirror" + +git_mirror = MirrorMain.new + +git_mirror.create_workers +git_mirror.main_loop diff --git a/sbin/mail-job b/sbin/mail-job new file mode 100755 index 0000000000000000000000000000000000000000..954c14934526e1e5673b5eca7c62a757816f1de0 --- /dev/null +++ b/sbin/mail-job @@ -0,0 +1,13 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +# Usage: +# mail-job job_id + +require_relative '../lib/mail_job_result.rb' + +job_id = ARGV[0] +mail = MailJobResult.new(job_id) +mail.send_mail diff --git a/sbin/result2stats b/sbin/result2stats new file mode 100755 index 0000000000000000000000000000000000000000..85d68a7821415c2b8a49e33030eb259152b3f091 --- /dev/null +++ b/sbin/result2stats @@ -0,0 +1,21 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: GPL-2.0-only + +# frozen_string_literal: true + +CCI_SRC = File.dirname(__dir__) + +require "#{CCI_SRC}/lib/matrix2" + +File.umask 0o002 + +def extract_stats(result_root) + ENV['RESULT_ROOT'] = result_root + job_script = result_root + '/job.sh' + + system job_script, 'extract_stats' +end + +result_root = ARGV[0] || ENV['RESULT_ROOT'] +extract_stats result_root +create_stats result_root diff --git a/sbin/scan-service b/sbin/scan-service new file mode 100755 index 0000000000000000000000000000000000000000..866d6c2f93e1915a5760e13e3d558b8551b3b1e0 --- /dev/null +++ b/sbin/scan-service @@ -0,0 +1,98 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# +# config file locations can be: +# - /etc/compass-ci/defaults/scan.list +# - {HOME}/.config/compass-ci/defaults/scan.list +# - {PWD}/scan.list +# +# config file demo: # namp param: -iL +# 1.2.3.4 +# 1.2.3.4/5 + +log_info() +{ + echo "$(date "+%Y%m%d %H%M%S ")[INFO] $*" +} + +pre_works() +{ + log_info "Starting pre work ..." +} + +nmap_scan() +{ + log_info "Starting nmap scan ..." +} + +format_tab_header() +{ + local tab_header="$( + printf "$TAB_CONTENT_FORMAT" \ + "Server" "Port" "State" "Service Name" + )" + + cat <<-EOF >> "${RESULT_FILE}" + $TAB_SEPARATOR + $tab_header + $TAB_SEPARATOR + EOF +} + +format_tab_content() +{ + local service + for service + do + local s_arr=($service) + printf "${TAB_CONTENT_FORMAT}\n" \ + "${s_arr[0]}" "${s_arr[1]}" "${s_arr[2]}" "${s_arr[3]}"\ + >> "${RESULT_FILE}" + done +} + +format_result() +{ + log_info "Starting format result ..." + + local ld_server=15 + local ld_port=9 + local ld_state=13 + local ld_service_name=18 + TAB_SEPARATOR="$( + printf "|-%s-|-%s-|-%s-|-%s-|" \ + "$(printf "%0.s-" $(seq "$ld_server"))" \ + "$(printf "%0.s-" $(seq "$ld_port"))" \ + "$(printf "%0.s-" $(seq "$ld_state"))" \ + "$(printf "%0.s-" $(seq "$ld_service_name"))" + )" + + TAB_CONTENT_FORMAT="| %-${ld_server}s | %-${ld_port}s | %-${ld_state}s | %-${ld_service_name}s |" + + [ "${#NMAP_RESULTS[@]}" -eq "0" ] || { + format_tab_header + format_tab_content "${NMAP_RESULTS[@]}" + echo "$TAB_SEPARATOR" >> "${RESULT_FILE}" + } +} + +post_works() +{ + log_info "Starting post work ..." + + log_info "Scan finished." +} + +main() +{ + pre_works + + nmap_scan + + format_result + + post_works +} + +main diff --git a/sbin/setup-overcommit b/sbin/setup-overcommit new file mode 100755 index 0000000000000000000000000000000000000000..45ec07a56781caa2074ef837fba74c80d61c4c08 --- /dev/null +++ b/sbin/setup-overcommit @@ -0,0 +1,32 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +shopt -s nullglob + +# Should remove personal gem installs, so that the global config take effect: +# rm -fr /home/*/.gem/ruby/gems/overcommit-* +# rm -fr /home/*/.gem/ruby/gems/rubocop-0.* +# rm /home/*/.gem/ruby/specifications/overcommit-* +# rm /home/*/.gem/ruby/specifications/rubocop-0.* + +git_dirs=("$HOME"/*/.git/ "$HOME"/*/*/.git/ "$HOME"/.*/*/.git/) + +for git_dir in "${git_dirs[@]}" +do + [[ $git_dir =~ 'overcommit' ]] && continue + [[ $git_dir =~ 'igcommit' ]] && continue + + cd "$git_dir/.." || continue + echo "$PWD" + + [[ $git_dir =~ 'lkp-tests' ]] && { # too many old files + echo UNINSTALL overcommit from "$git_dir" ...... + overcommit --uninstall + continue + } + + overcommit --install --force + overcommit --sign + rm -f .overcommit.yml # should use global config +done diff --git a/sparrow/0-package/common b/sparrow/0-package/common new file mode 100755 index 0000000000000000000000000000000000000000..1bb6861085aecd1972109e1994e766d7378f6363 --- /dev/null +++ b/sparrow/0-package/common @@ -0,0 +1,38 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +gem install git activesupport rest-client cucumber json faye-websocket elasticsearch + +grep -q '^lkp:' /etc/passwd || adduser -u 1090 lkp +grep -q '^team:' /etc/group || groupadd team +grep -q '^committer:' /etc/group || groupadd -g 1999 committer + +cat >> /etc/sysctl.conf <> /etc/modules-load.d/nfs <> /etc/modules-load.d/cifs < /etc/docker/daemon.json <> /etc/fstab <> /etc/qemu/bridge.conf diff --git a/sparrow/2-network/cifs b/sparrow/2-network/cifs new file mode 100755 index 0000000000000000000000000000000000000000..74aa65f47930ad854e91e138a2ab30aa0a8b839d --- /dev/null +++ b/sparrow/2-network/cifs @@ -0,0 +1,10 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +modprobe cifs + +[[ -f /etc/modules-load.d/cci-cifs.conf ]] || +cat > /etc/modules-load.d/cci-cifs.conf < /etc/modules-load.d/cci-nfs.conf < /etc/compass-ci/defaults/$server_name.yaml < /etc/profile.d/crystal.sh <<'EOF' +export LKP_SRC=/c/lkp-tests +export CCI_SRC=/c/compass-ci + +export PATH="$PATH:$CCI_SRC/sbin:$LKP_SRC/sbin:$LKP_SRC/bin" +EOF + diff --git a/sparrow/3-code/git b/sparrow/3-code/git new file mode 100755 index 0000000000000000000000000000000000000000..b1d2c036dfb38ce8790a7a0a392e7294578deda2 --- /dev/null +++ b/sparrow/3-code/git @@ -0,0 +1,18 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# download git trees + +cd /c || exit + +# git clone https://gitee.com/openeuler/compass-ci.git +# modify and manual run for now: +[ -d "compass-ci/.git" ] || { + git clone https://gitee.com/wu_fengguang/compass-ci.git + ln -s compass-ci cci +} + +[ -d "lkp-tests/.git" ] || { + git clone https://gitee.com/wu_fengguang/lkp-tests.git +} diff --git a/sparrow/4-docker/buildall b/sparrow/4-docker/buildall new file mode 100755 index 0000000000000000000000000000000000000000..230c6acfe48563e1481618c8fe3a820df686f2c7 --- /dev/null +++ b/sparrow/4-docker/buildall @@ -0,0 +1,49 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +CONTAINER_PATH="$CCI_SRC/container" + +build_depends() +{ + local container=$1 + local should_wait=$2 + + for dep in $(cat $container/*-depends 2> /dev/null) + do + build_depends $CONTAINER_PATH/$dep block_wait + done + + if [ -n "$should_wait" ]; then + do_one $container + else + do_one $container & + fi +} + +do_one() +{ + local container=$1 + local container_name=$(basename $container) + lockfile-create -q --use-pid --retry 100 --lock-name "$container_name".lock + mkdir $tmpdir/$container_name 2>/dev/null && + ( + cd $container + [ "$container_name" == 'sub-fluentd' ] && exit + [ -x build ] && ./build + [ -x install ] && ./install + [ -x first-run ] && ./first-run + [ -x start ] && ./start + ) + lockfile-remove --lock-name "$container_name".lock +} + +tmpdir=$(mktemp -d) + +for dir in $CONTAINER_PATH/*/ +do + build_depends $dir +done + +wait +rm -fr $tmpdir diff --git a/sparrow/5-build/ameba b/sparrow/5-build/ameba new file mode 100755 index 0000000000000000000000000000000000000000..71c5983751bab11235314816d9730b10eb45e347 --- /dev/null +++ b/sparrow/5-build/ameba @@ -0,0 +1,11 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +cd /c + +git clone https://github.com/crystal-ameba/ameba + +cd ameba + +crystal build src/cli.cr -o bin/ameba diff --git a/sparrow/5-build/ipxe b/sparrow/5-build/ipxe new file mode 100755 index 0000000000000000000000000000000000000000..a66e2982c87670af490fd187ab2a15925d3597f3 --- /dev/null +++ b/sparrow/5-build/ipxe @@ -0,0 +1,25 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +make() +{ + command make -j $cpus "$@" +} + +cd /c || exit + +git clone -b custom-config https://gitee.com/wu_fengguang/ipxe.git + +cd ipxe/src || exit + +cpus=$(nproc) + +make ARCH=arm64 bin-arm64-efi/ipxe.efi +make ARCH=arm64 bin-arm64-efi/snponly.efi +make ARCH=arm64 bin-arm64-efi/snp.efi +make CONFIG=rpi bin-arm64-efi/rpi.efi + +mkdir -p /tftpboot/ipxe/bin-arm64-efi/ +cp -a bin-arm64-efi/*.efi /tftpboot/ipxe/bin-arm64-efi/ +chmod 664 /tftpboot/ipxe/bin-arm64-efi/*.efi diff --git a/sparrow/5-build/overcommit b/sparrow/5-build/overcommit new file mode 100755 index 0000000000000000000000000000000000000000..e8add6f7d5def18391347b7b8354ff6bbd157c91 --- /dev/null +++ b/sparrow/5-build/overcommit @@ -0,0 +1,13 @@ +#!/bin/sh -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +cd /c + +git clone https://github.com/sds/overcommit.git || exit + +cd overcommit + +oc_build_out=$(gem build overcommit.gemspec | grep "overcommit-.*\.gem") + +gem install "$oc_build_out" diff --git a/sparrow/6-test/docker b/sparrow/6-test/docker new file mode 100755 index 0000000000000000000000000000000000000000..3d6796d87044d9dab04edaec8ba11815f12a5c19 --- /dev/null +++ b/sparrow/6-test/docker @@ -0,0 +1,7 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +## TODO +# 1.submit a job +# 2.run docker.sh diff --git a/sparrow/6-test/qemu b/sparrow/6-test/qemu new file mode 100755 index 0000000000000000000000000000000000000000..1e92a474f93b7a5e28f12cb1117bc01dcbc3dc9d --- /dev/null +++ b/sparrow/6-test/qemu @@ -0,0 +1,16 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +run_job() +{ + submit -s "testbox: $1--$USER" /c/cci/user-client/jobs/iperf-sparrow.yaml + ( + export tbox_group=$1 + cd /c/cci/providers && ./my-qemu.sh + ) +} + +dmidecode -s system-product-name | grep -iq "virtual" && exit +run_job vm-hi1620-2p8g +run_job vm-pxe-hi1620-2p8g diff --git a/sparrow/7-systemd/cci-network b/sparrow/7-systemd/cci-network new file mode 100755 index 0000000000000000000000000000000000000000..4aadcdc8bc854b3fb10ca524fdb1e3a94d160aa8 --- /dev/null +++ b/sparrow/7-systemd/cci-network @@ -0,0 +1,16 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +#this file used in our own systemd unit file: cci-network.service. +#contents as follows will be automatically executed after system reboot. + +[[ $CCI_SRC ]] || CCI_SRC=/c/compass-ci + +$CCI_SRC/sparrow/2-network/br0 +$CCI_SRC/sparrow/2-network/iptables +$CCI_SRC/sparrow/2-network/nfs +$CCI_SRC/sparrow/2-network/cifs + +# --restart=always option is not absolutely reliable +containers=($(docker ps -a |grep -v NAMES |awk '{print $NF}')) +[ -n "$containers" ] && docker start "${containers[@]}" >/dev/null 2>&1 diff --git a/sparrow/7-systemd/cci-network.service b/sparrow/7-systemd/cci-network.service new file mode 100644 index 0000000000000000000000000000000000000000..3a0a661f8eefcfdac1ef6413983aee769e7d925d --- /dev/null +++ b/sparrow/7-systemd/cci-network.service @@ -0,0 +1,11 @@ +[Unit] +Description=Setup network for CCI qemu boxes +After=docker.service + +[Service] +Type=simple +TimeoutStartSec=0 +ExecStart=/c/compass-ci/sparrow/7-systemd/cci-network + +[Install] +WantedBy=default.target diff --git a/sparrow/7-systemd/systemd-setup b/sparrow/7-systemd/systemd-setup new file mode 100755 index 0000000000000000000000000000000000000000..926eb8147413c45abf5857956e0424df7b427505 --- /dev/null +++ b/sparrow/7-systemd/systemd-setup @@ -0,0 +1,8 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +cp $(dirname $0)/cci-network.service /etc/systemd/system +systemctl daemon-reload +systemctl enable cci-network.service +systemctl start cci-network.service diff --git a/sparrow/README.md b/sparrow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ccb1d150c783b0dcf57d483622279c37bf3e758a --- /dev/null +++ b/sparrow/README.md @@ -0,0 +1,26 @@ +Deploy a mini instance in one single machine. + +Prepare: + 1. hardware + Server: at least prepare a server + ProductType: ThaiShan200-2280 + Arch: aarch64 + Memory: 8G + CPU: 64 nuclear + DiskSpace: 500G + + 2. software + OS: openEuler-aarch64-20.03 LTS + git: suggest 2.23.0 + + 3. network + Internet is available + + 4. /os for store rootfs + >= 300G + +Steps: + umask 002 + git clone https://gitee.com/wu_fengguang/compass-ci.git + cd compass-ci/sparrow + ./install-tiny diff --git a/sparrow/install-tiny b/sparrow/install-tiny new file mode 100755 index 0000000000000000000000000000000000000000..063f872835d09a9902e3bf91866d3f92cc8a015c --- /dev/null +++ b/sparrow/install-tiny @@ -0,0 +1,22 @@ +#!/bin/bash -e +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# For your reference only. +# It's better to run the below scripts step by step. + +[[ $CCI_SRC ]] || export CCI_SRC=$(cd $(dirname $(realpath $0)); git rev-parse --show-toplevel) +cd $CCI_SRC/sparrow || exit + +0-package/install +1-storage/tiny +1-storage/permission +2-network/br0 +2-network/iptables +2-network/nfs +2-network/cifs +3-code/git +3-code/dev-env +. /etc/profile.d/crystal.sh +4-docker/buildall +5-build/ipxe +7-systemd/systemd-setup diff --git a/src/0_st_env b/src/0_st_env new file mode 100755 index 0000000000000000000000000000000000000000..a1c7ef500114f89f1975662cbcba4b01cbdc0b8a --- /dev/null +++ b/src/0_st_env @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +docker stop s001-alpine + +SCHED_DEBUG_DIR=/c/cci/scheduler +DIR=$(dirname $(realpath $0)) + +[[ $LKP_SRC ]] || LKP_SRC=/c/lkp-tests +[[ $CCI_SRC ]] || CCI_SRC=/c/compass-ci + +if [ ! -d test ]; then + mkdir test +fi + +cmd=( + docker run + --rm + -e LKP_SRC=/c/lkp-tests + -e CCI_SRC=/c/compass-ci + -e CRYSTAL_PATH="lib:/usr/share/crystal/app/lib:/usr/lib/crystal/shards:/usr/lib/crystal/core:/c/lkp-tests/lib:$SCHED_DEBUG_DIR" + -it + -p 3000:3000 + -u $UID + -v $DIR:$SCHED_DEBUG_DIR + -v $LKP_SRC:/c/lkp-tests + -v $CCI_SRC:/c/compass-ci + -v $DIR/test:/result + -w $SCHED_DEBUG_DIR + alpine:scheduler-dev + sh +) + +"${cmd[@]}" + + +docker restart s001-alpine diff --git a/src/1_llt_env b/src/1_llt_env new file mode 100755 index 0000000000000000000000000000000000000000..9055ebd5a02a3f88df641920703f793fa4b1cecc --- /dev/null +++ b/src/1_llt_env @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +SCHED_DEBUG_DIR=/c/cci/scheduler +DIR=$(dirname $(realpath $0)) +[[ $LKP_SRC ]] || LKP_SRC=/c/lkp-tests +[[ $CCI_SRC ]] || CCI_SRC=/c/compass-ci + +if [ ! -d test ]; then + mkdir test +fi + +cmd=( + docker run + --rm + -e LKP_SRC=/c/lkp-tests + -e CCI_SRC=/c/compass-ci + -e REDIS_PORT=6380 + -e ES_PORT=9201 + -e CRYSTAL_PATH="lib:/usr/share/crystal/app/lib:/usr/lib/crystal/shards:/usr/lib/crystal/core:/c/lkp-tests/lib:$SCHED_DEBUG_DIR" + -it + -u $UID + -v $DIR:$SCHED_DEBUG_DIR + --mount type=tmpfs,destination=/result + -v $LKP_SRC:/c/lkp-tests + -v $CCI_SRC:/c/compass-ci + --mount type=tmpfs,destination=/initrd + -w $SCHED_DEBUG_DIR + alpine:scheduler-dev + sh +) + +"${cmd[@]}" + diff --git a/src/delimiter.rb b/src/delimiter.rb new file mode 100644 index 0000000000000000000000000000000000000000..bb73c5fd8e24b7bd047e7f7290ed41aa91edcf02 --- /dev/null +++ b/src/delimiter.rb @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative './delimiter/constants' +require_relative './delimiter/delimiter' + +START_PROCESS_COUNT.times do + begin + Process.fork do + delimiter = Delimiter.new + delimiter.start_delimit + end + rescue StandardError => e + puts e + end +end + +sleep() diff --git a/src/delimiter/constants.rb b/src/delimiter/constants.rb new file mode 100644 index 0000000000000000000000000000000000000000..7a17b36a2c619ab3d0d3b3ef45a23fb613a6ad03 --- /dev/null +++ b/src/delimiter/constants.rb @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +START_PROCESS_COUNT = 10 +GIT_MIRROR_HOST = ENV['GIT_MIRROR_HOST'] || '172.17.0.1' +MONITOR_HOST = ENV['MONITOR_HOST'] || '172.17.0.1' +MONITOR_PORT = ENV['MONITOR_PORT'] || '11310' +TMEP_GIT_BASE = '/c/public_git' +DELIMITER_TASK_QUEUE = 'delimiter' +BISECT_RUN_SCRIPT = "#{ENV['CCI_SRC']}/src/delimiter/find-commit/bisect_run_script.rb" +DELIMITER_TBOX_GROUP = 'vm-hi1620-2p8g--delimiter' diff --git a/src/delimiter/delimiter.rb b/src/delimiter/delimiter.rb new file mode 100644 index 0000000000000000000000000000000000000000..9cfc4ad6fa6c7c2946898baaef08e8e869dd183c --- /dev/null +++ b/src/delimiter/delimiter.rb @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'json' + +require_relative './constants' +require_relative './find-commit/git_bisect' +require_relative '../../lib/taskqueue_client' +require_relative '../../lib/mail_bisect_result' + +# consume assister task queue +class Delimiter + def initialize + @tq = TaskQueueClient.new + end + + def start_delimit + loop do + begin + # consume delimiter task queue + task = consume_delimiter_queue + unless task + sleep(2) + next + end + + # find first bad commit based on the task + git_bisect = GitBisect.new task + result = git_bisect.find_first_bad_commit + + # send mail + mbr = MailBisectResult.new result + mbr.create_send_email + rescue StandardError => e + puts e + sleep(30) + end + end + end + + private + + def consume_delimiter_queue + response = @tq.consume_task(DELIMITER_TASK_QUEUE) + return unless response.code == 200 + + body = JSON.parse(response.body).to_hash + return body + end +end diff --git a/src/delimiter/find-commit/bisect_run_script.rb b/src/delimiter/find-commit/bisect_run_script.rb new file mode 100755 index 0000000000000000000000000000000000000000..539599fb7bfada58b7ff634471a2bbbeebb52ade --- /dev/null +++ b/src/delimiter/find-commit/bisect_run_script.rb @@ -0,0 +1,48 @@ +#!/usr/bin/env ruby +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# frozen_string_literal: true + +require 'json' +require_relative "#{ENV['CCI_SRC']}/lib/es_query" +require_relative "#{ENV['CCI_SRC']}/src/delimiter/utils" + +# git bisect run +class GitBisectRun + def initialize(job_id, error_id, tbox_group, work_dir) + @es = ESQuery.new + @job_id = job_id + @error_id = error_id + @tbox_group = tbox_group + @work_dir = work_dir + end + + def git_bisect + job = @es.query_by_id @job_id + job.delete('stats') if job.key?('stats') + job['tbox_group'] = @tbox_group + commit = `git -C #{@work_dir} log --pretty=format:"%H" -1` + job['upstream_commit'] = commit + get_bisect_status job + end + + private + + def get_bisect_status(job) + status = Utils.get_job_status(job, @error_id) + exit 125 unless status + + exit 1 if status.eql?('bad') + + exit 0 + end +end + +job_id = ARGV[0] +error_id = ARGV[1] +tbox_group = ARGV[2] +work_dir = ARGV[3] + +run = GitBisectRun.new job_id, error_id, tbox_group, work_dir +run.git_bisect diff --git a/src/delimiter/find-commit/git_bisect.rb b/src/delimiter/find-commit/git_bisect.rb new file mode 100644 index 0000000000000000000000000000000000000000..8a1646bdfb4f664bce79b6acc9f85619a7f4b57e --- /dev/null +++ b/src/delimiter/find-commit/git_bisect.rb @@ -0,0 +1,132 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'json' +require_relative '../../../lib/es_query' +require_relative '../../../lib/sched_client' +require_relative "#{ENV['LKP_SRC']}/lib/monitor" + +require_relative '../utils' + +# find the first bad commit +class GitBisect + def initialize(task) + @es = ESQuery.new + @task = task + end + + def find_first_bad_commit + # set object property + set_ids + set_bad_job + set_upstream + set_work_dir + set_good_commit + + start_bisect + end + + private + + def set_ids + puts "task content: #{@task}" + @error_id = @task['error_id'] + @bad_job_id = @task['job_id'] + end + + def set_bad_job + @bad_job = @es.query_by_id @bad_job_id + raise "es query job id: #{@bad_job_id} failed!" unless @bad_job + + @bad_job.delete('stats') + @bad_job.delete('id') + @bad_job.delete('error_ids') + @bad_job['tbox_group'] = DELIMITER_TBOX_GROUP + end + + def set_upstream + @upstream_repo = @bad_job['upstream_repo'] + @upstream_commit = @bad_job['upstream_commit'] + puts "upstream_repo: #{@upstream_repo}" + puts "upstream_commit: #{@upstream_commit}" + raise 'upstream info is null' unless @upstream_repo || @upstream_commit + + @upstream_repo_git = "git://#{GIT_MIRROR_HOST}/#{@upstream_repo}" + end + + def set_work_dir + @work_dir = Utils.clone_repo(@upstream_repo_git, @upstream_commit) + puts "work_dir: #{@work_dir}" + raise "checkout repo: #{@upstream_repo} to commit: #{@upstream_commit} failed!" unless @work_dir + end + + def set_good_commit + @good_commit = find_good_commit + raise 'can not find a good commit' unless @good_commit + end + + # run git bisect start use upstream_commit and good_commit + # run bisect script get the bisect info + def start_bisect + puts "bad_commit: #{@upstream_commit}" + puts "good_commit: #{@good_commit}" + + result = `git -C #{@work_dir} bisect start #{@upstream_commit} #{@good_commit}` + temp = result.split(/\n/) + if temp[0].include? 'Bisecting' + result = `git -C #{@work_dir} bisect run #{BISECT_RUN_SCRIPT} #{@bad_job_id} "#{@error_id}" \ + "#{DELIMITER_TBOX_GROUP}" #{@work_dir}` + end + FileUtils.rm_r(@work_dir) if Dir.exist?(@work_dir) + puts "\nbisect result: #{result}" + analyse_result(result) + end + + def analyse_result(result) + temp = result.split(/\n/) + return nil unless temp[0].include?('is the first bad commit') || temp[-1].include?('bisect run success') + + first_bad_commit = Utils.parse_first_bad_commit(result) + + return Hash['repo' => @upstream_repo, 'commit' => first_bad_commit, + 'job_id' => @bad_job_id, 'error_id' => @error_id] + end + + # first search the good commit in db + # second search the good commit by job + def find_good_commit + good_commit = find_good_commit_by_db + return good_commit if good_commit + + good_commit = find_good_commit_by_job + return good_commit if good_commit + end + + def find_good_commit_by_db + # todo + return nil + end + + # get a commit array offset upstream commit + # return the offset commit if the commit status is good or return nil + def find_good_commit_by_job + day_agos = [1, 3, 10, 30] + commits = Utils.get_test_commits(@work_dir, @upstream_commit, day_agos) + puts "commits: #{commits}" + commits.each do |commit| + commit_status = get_commit_status_by_job(commit) + next unless commit_status + return commit if commit_status == 'good' + end + + return nil + end + + # get commit status by submit the bad job + # according to the job stats return good/bad/nil + def get_commit_status_by_job(commit) + @bad_job['upstream_commit'] = commit + return Utils.get_job_status(@bad_job, @error_id) + end +end diff --git a/src/delimiter/utils.rb b/src/delimiter/utils.rb new file mode 100644 index 0000000000000000000000000000000000000000..a50e119c54898f2c0ab6016b1ed296c01fc52eea --- /dev/null +++ b/src/delimiter/utils.rb @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'set' +require 'json' +require 'fileutils' + +require_relative './constants' +require_relative '../../lib/sched_client' +require_relative "#{ENV['LKP_SRC']}/lib/monitor" + +# a utils module for delimiter service +module Utils + class << self + def clone_repo(repo, commit) + repo_root = "#{TMEP_GIT_BASE}/#{File.basename(repo, '.git')}-#{`echo $$`}".chomp + FileUtils.rm_r(repo_root) if Dir.exist?(repo_root) + system("git clone -q #{repo} #{repo_root} && git -C #{repo_root} checkout -q #{commit}") ? repo_root : nil + end + + def get_test_commits(work_dir, commit, day_agos) + commits = Set.new + day_agos.each do |day_ago| + temp_commit = get_day_ago_commit(work_dir, commit, day_ago) + commits << temp_commit if temp_commit + end + commits << get_last_commit(work_dir, commit) if commits.empty? + return commits.to_a + end + + def get_day_ago_commit(work_dir, commit, day_ago) + date = `git -C #{work_dir} rev-list --first-parent --pretty=format:%cd \ + --date=short #{commit} -1 | sed -n 2p`.chomp! + before = `date -d '-#{day_ago} day #{date}' +%Y-%m-%d`.chomp! + day_ago_commit = `git -C #{work_dir} rev-list --before=#{before} \ + --pretty=format:%H --first-parent #{commit} -1 | sed -n 2p`.chomp! + return day_ago_commit + end + + def get_last_commit(work_dir, commit) + last_commit = `git -C #{work_dir} rev-list --first-parent #{commit} -2 | sed -n 2p`.chomp! + return last_commit + end + + def parse_first_bad_commit(result) + result = result.split(/\n/) + result.each do |item| + # b9e2a2fe56e92f4fe5ac15251ab3f77d645fbf82 is the first bad commit + return item.split(/ /)[0] if item.end_with? 'is the first bad commit' + end + end + + def monitor_run_stop(query) + monitor = Monitor.new("ws//#{MONITOR_HOST}:#{MONITOR_PORT}/filter") + monitor.query = query + monitor.action = { 'stop' => true } + return monitor.run + end + + def submit_job(job) + sched = SchedClient.new + response = sched.submit_job(job.to_json) + puts "submit job response: #{response}" + res_arr = JSON.parse(response) + return nil if res_arr.empty? || !res_arr[0]['message'].empty? + + # just consider build-pkg job + return res_arr[0]['job_id'] + end + + # submit the bad job + # monitor the job id and job state query job stats when job state is extract_finished + # according to the job stats return good/bad/nil + def get_job_status(job, error_id) + new_job_id = submit_job(job) + puts "new job id: #{new_job_id}" + return nil unless new_job_id + + query = { 'job_id': new_job_id, 'job_state': 'extract_finished' } + extract_finished = monitor_run_stop(query) + return nil unless extract_finished.zero? + + es = ESQuery.new + new_job = es.query_by_id(new_job_id) + return 'bad' if new_job['stats'].key?(error_id) + + return 'good' + end + end +end diff --git a/src/extract-stats.cr b/src/extract-stats.cr new file mode 100644 index 0000000000000000000000000000000000000000..2f4b97d4691607680ebe749820e2bf104b12ced1 --- /dev/null +++ b/src/extract-stats.cr @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "./extract-stats/extract_stats" + +# results data post processing + +ExtractStats.in_extract_stats diff --git a/src/extract-stats/constants.cr b/src/extract-stats/constants.cr new file mode 100644 index 0000000000000000000000000000000000000000..332962a561dd28b4f0b77c0b9a9bee6640fd322f --- /dev/null +++ b/src/extract-stats/constants.cr @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +EXTRACT_STATS_QUEUE_PATH = "extract_stats" +STATS_WORKER_COUNT = 10 + +DELIMITER_TASK_QUEUE = "delimiter" +ERROR_ID_FILES = %w(build-pkg.json) diff --git a/src/extract-stats/extract_stats.cr b/src/extract-stats/extract_stats.cr new file mode 100644 index 0000000000000000000000000000000000000000..d3f32034044b813cfe218bd50d710225cc96b80c --- /dev/null +++ b/src/extract-stats/extract_stats.cr @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "./constants" +require "./stats_worker" + +module ExtractStats + # Consume scheduler queue + def self.in_extract_stats + self.back_fill_task + STATS_WORKER_COUNT.times do + Process.fork { + self.consume_task + } + end + + # keep main-process alive + sleep() + end + + def self.consume_task + worker = StatsWorker.new + worker.consume_sched_queue(EXTRACT_STATS_QUEUE_PATH) + end + + def self.back_fill_task + worker = StatsWorker.new + worker.back_fill_task(EXTRACT_STATS_QUEUE_PATH) + end +end diff --git a/src/extract-stats/regression_client.cr b/src/extract-stats/regression_client.cr new file mode 100644 index 0000000000000000000000000000000000000000..05286a5ccc092d2b28f4ad222afdf6c6b0339dbf --- /dev/null +++ b/src/extract-stats/regression_client.cr @@ -0,0 +1,45 @@ +require "elasticsearch-crystal/elasticsearch/api" +require "../scheduler/constants.cr" + +class RegressionClient + HOST = (ENV.has_key?("ES_HOST") ? ENV["ES_HOST"] : JOB_ES_HOST) + PORT = (ENV.has_key?("ES_PORT") ? ENV["ES_PORT"] : JOB_ES_PORT).to_i32 + + def initialize(host = HOST, port = PORT) + @client = Elasticsearch::API::Client.new({:host => host, :port => port}) + end + + def store_error_info(error_id : String, job_id : String) + @client.create({ + :index => "regression", + :type => "_doc", + :body => { + "error_id" => error_id, + "job_id" => job_id, + }, + }) + end + + def check_error_id(error_id : String) + query_body = { + "query" => { + "term" => { + "error_id" => error_id, + }, + }, + } + result = @client.search({ + :index => "regression", + :type => "_doc", + :body => query_body, + }) + raise "query failed." unless result["hits"]? || result["hits"]["total"]? + total = result["hits"]["total"] + if total.is_a?(JSON::Any) + total = total.as_i + else + raise "query result type error." + end + return total > 0 + end +end diff --git a/src/extract-stats/stats_worker.cr b/src/extract-stats/stats_worker.cr new file mode 100644 index 0000000000000000000000000000000000000000..9ea882ee613975fb823d37df64add5d8932c0cb3 --- /dev/null +++ b/src/extract-stats/stats_worker.cr @@ -0,0 +1,121 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "../lib/taskqueue_api" +require "../scheduler/elasticsearch_client" +require "../scheduler/redis_client" +require "../scheduler/constants" +require "./regression_client" +require "./constants.cr" + +class StatsWorker + def initialize + @es = Elasticsearch::Client.new + @tq = TaskQueueAPI.new + @rc = RegressionClient.new + end + + def consume_sched_queue(queue_path : String) + loop do + begin + response = @tq.consume_task(queue_path) + rescue e + STDERR.puts e.message + next + end + if response[0] == 200 + job_id = JSON.parse(response[1].to_json)["id"] + + job = @es.get_job(job_id.to_s) + if job + result_root = job.result_root + # extract stats.json + system "#{ENV["CCI_SRC"]}/sbin/result2stats #{result_root}" + # storage job to es + begin + store_stats_es(result_root, job) if result_root + # send mail to submitter for job results + system "#{ENV["CCI_SRC"]}/sbin/mail-job #{job_id}" + rescue e + STDERR.puts e.message + next + end + end + + @tq.delete_task(queue_path + "/in_process", "#{job_id}") + else + sleep(2) + end + end + end + + def store_stats_es(result_root : String, job : Job) + stats_path = "#{result_root}/stats.json" + raise "#{stats_path} file not exists." unless File.exists?(stats_path) + + stats = File.open(stats_path) do |file| + JSON.parse(file) + end + + job_stats = {"stats" => stats.as_h} + job.update(job_stats) + + error_ids = get_error_ids_by_json(result_root) + job.update(JSON.parse({"error_ids" => error_ids}.to_json)) unless error_ids.empty? + + @es.set_job_content(job) + + new_error_ids = check_new_error_ids(error_ids, job.id) + unless new_error_ids.empty? + STDOUT.puts "send a delimiter task: job_id is #{job.id}" + @tq.add_task(DELIMITER_TASK_QUEUE, JSON.parse({"error_id" => new_error_ids.sample, + "job_id" => job.id}.to_json)) + end + puts %({"job_id": "#{job.id}", "job_state": "extract_finished"}) + end + + def check_new_error_ids(error_ids : Array, job_id : String) + new_error_ids = [] of String + error_ids.each do |error_id| + begin + is_exists = @rc.check_error_id error_id + rescue e + STDERR.puts e.message + next + end + next if is_exists + new_error_ids << error_id + @rc.store_error_info error_id, job_id + end + new_error_ids + end + + def get_error_ids_by_json(result_root : String) + error_ids = [] of String + ERROR_ID_FILES.each do |filename| + filepath = File.join(result_root, filename) + next unless File.exists?(filepath) + content = File.open(filepath) do |file| + JSON.parse(file) + end + error_ids.concat(content.as_h.keys) + end + error_ids + end + + def back_fill_task(queue_path) + redis_client = Redis::Client.new + # this queue may have leftover task_ids + queue_name = "queues/#{queue_path}/in_process" + begin + job_ids = redis_client.@client.zrange(queue_name, 0, -1) + return if job_ids.empty? + + job_ids.each do |job_id| + @tq.hand_over_task(queue_path, queue_path, job_id.to_s) + end + rescue e + STDERR.puts e.message + end + end +end diff --git a/src/features/jobs/right_iperf.yaml b/src/features/jobs/right_iperf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..843564dfa8a0b35e877f7932e91b7da515114178 --- /dev/null +++ b/src/features/jobs/right_iperf.yaml @@ -0,0 +1,37 @@ +--- + +#! jobs/iperf.yaml +suite: iperf +testcase: iperf +testbox: vm-hi1620-2p8g-chief +category: benchmark +runtime: 30 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +job_origin: jobs/iperf.yaml +arch: aarch64 +node_roles: server client + +#! include/category/benchmark +kmsg: +boot-time: +uptime: +iostat: +heartbeat: +vmstat: +numa-numastat: +numa-vmstat: +numa-meminfo: +proc-vmstat: +proc-stat: +meminfo: + +LKP_SERVER: 172.168.131.113 +LKP_CGI_PORT: 3000 +result_root: /result/iperf +LKP_DEBUG_PREFIX: bash -x + diff --git a/src/features/scheduler.feature b/src/features/scheduler.feature new file mode 100644 index 0000000000000000000000000000000000000000..c335fb4e1b298a442a7c1d824180803e4afb13de --- /dev/null +++ b/src/features/scheduler.feature @@ -0,0 +1,19 @@ +Feature: Scheduler + + # use API: "/submit_job", "/set_host_mac" + # "/boot.ipxe/mac/$mac" "/job_initrd_tmpfs/$id/job.cgz" + Scenario: submit basic iperf job and consume from API boot.ipxe/mac/$mac + Given prepared a job "right_iperf.yaml" + When call with API: post "submit_job" job from add_job.sh + Then return with job id + When call with API: put "set_host_mac" "vm-hi1620-2p8g-chief => ef-01-02-03-04-05" + And call with API: get "boot.ipxe/mac/ef-01-02-03-04-05" + Then return with basic ipxe boot parameter and initrd and kernel + + # more API need test (group as a scenario) : + # "/job_initrd_tmpfs/$id/job.cgz" + # "/~lkp/cgi-bin/lkp-jobfile-append-var" + # "/~lkp/cgi-bin/lkp-post-run" + + # and more job to test : + # which will call no covered code diff --git a/src/features/step_definitions/scheduler_step.rb b/src/features/step_definitions/scheduler_step.rb new file mode 100644 index 0000000000000000000000000000000000000000..882ddd82399b06f8e0e133ec35d86ff4895b828e --- /dev/null +++ b/src/features/step_definitions/scheduler_step.rb @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +sched_port = ENV['SCHED_PORT'] || 3000 +job_file_path = Pathname.new("#{__FILE__}/../../jobs").realpath + +Given('prepared a job {string}') do |string| + @job = YAML.safe_load(File.read("#{job_file_path}/#{string}")).to_json +end + +# submit_job +When('call with API: post {string} job from add_job.sh') do |string| + _, o = curl_post_result(sched_port, string, @job) + @result = o.gets +end + +Then('return with job id') do + @id_submited = @result.to_i + puts "Submit job (id = #{@id_submited})" + raise 'Failed to submit_job' unless @id_submited.positive? +end + +# set_host_mac +# host_mac => "vm-hi1620-2p8g-chief => ef-01-02-03-04-05" +Given('call with API: put {string} {string}') do |url, host_mac| + host_mac_params = host_mac.split(' ') + url_with_params = "#{url}?hostname=#{host_mac_params[0]}\\&mac=#{host_mac_params[2]}" + _, o = curl_put_result(sched_port, url_with_params) + @result = o.gets + raise "Failed to #{url}" unless @result == 'Done' +end + +# boot.ipxe/mac/ef-01-02-03-04-05 +When('call with API: get {string}') do |url| + _, o = curl_get_result(sched_port, url) + @result = '' + o.each_line { |line| @result += line } +end + +Then('return with basic ipxe boot parameter and initrd and kernel') do + result = @result.split("\n") + len = result.size + + raise "Not start with #!ipxe, but #{result[0]}" unless result[0] == '#!ipxe' + + raise "Not end with boot, but #{result[len - 1]}" unless result[len - 1] == 'boot' + + (2..(len - 2)).each do |i| + id = %r{.*/(\d+)/job}.match(result[i]) + puts "Check job (id = #{id[1]})" if id + end + + (2..(len - 2)).each do |i| + test_initrd_or_kernel(result[i]) + end +end diff --git a/src/features/step_definitions/taskqueue_steps.rb b/src/features/step_definitions/taskqueue_steps.rb new file mode 100644 index 0000000000000000000000000000000000000000..3cb7a117652c586b8e1fd88b96eb482b1536ad2e --- /dev/null +++ b/src/features/step_definitions/taskqueue_steps.rb @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +taskqueue_port = ENV['TASKQUEUE_PORT'] || 3060 + +Given('has a task') do |doc_string| + @task = doc_string +end + +When('call with post api {string} task') do |url| + _, o = curl_post_result(taskqueue_port, url, @task, '-i') + @result = get_http_status_and_content(o.readlines) +end + +# @result = [Http_Status_code, Body_Json] +Then('return with task id > {int}') do |task_id| + result = @result[1]['id'] + raise 'failed' unless result.to_i > task_id +end + +Then('return with task id = {int}') do |task_id| + result = @result[1]['id'] + raise 'failed' unless result.to_i == task_id +end + +When('call with put api {string}') do |url| + _, o = curl_put_result(taskqueue_port, url, '-i') + @result = get_http_status_and_content(o.readlines) +end + +Then('return with task tbox_group == {string}') do |tbox_group| + result = @result[1]['tbox_group'] + raise 'failed' unless result == tbox_group +end + +When('call with put api {string} and previous get id') do |url| + url += @result[1]['id'].to_s + _, o = curl_put_result(taskqueue_port, url, '-i') + @result = get_http_status_and_content(o.readlines) +end + +Then('return with http status_code = {int}') do |http_status_code| + result = @result[0] + raise 'failed' unless result.to_i == http_status_code +end diff --git a/src/features/support/env.rb b/src/features/support/env.rb new file mode 100644 index 0000000000000000000000000000000000000000..945ca135fd22afc012515724f2eae4e5a58c0eab --- /dev/null +++ b/src/features/support/env.rb @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'open3' +require 'json' +require 'yaml' +require 'pathname' +require 'fileutils' + +def curl_post_result(port, url, data, with_head = nil) + curl_post_format = 'curl %s -X POST http://localhost:%d/%s -H "Content-Type: application/json" --data \'%s\'' + cmd = format(curl_post_format, with_head, port, url, data) + Open3.popen3(cmd) +end + +def curl_put_result(port, url, with_head = nil) + curl_put_format = 'curl %s -X PUT http://localhost:%d/%s' + cmd = format(curl_put_format, with_head, port, url) + Open3.popen3(cmd) +end + +def curl_get_result(port, url, with_head = nil) + curl_get_format = 'curl %s http://localhost:%d/%s' + cmd = format(curl_get_format, with_head, port, url) + Open3.popen3(cmd) +end + +# raw exmples: +# [ +# "HTTP/1.1 200 OK\r\n", +# "Connection: keep-alive\r\n", "X-Powered-By: Kemal\r\n", +# "Content-Type: text/html\r\n", "Content-Length: 10\r\n", "\r\n", +# "{\"id\":11}\n" +# ] +def get_http_status_and_content(raw) + array_size = raw.size + status_code = raw[0].match(/ (\d+) /) + status_code = status_code[1].to_i + + content_json = case status_code + when 200 + JSON.parse(raw[array_size - 1]) + end + + [status_code, content_json] +end + +def test_initrd(initrd) + filename_download = %r{.*/(.*)}.match(initrd)[1] + saved_filename = "/tmp/#{filename_download}" + cmd = "curl -# -o #{saved_filename} #{initrd}" + Open3.popen3(cmd) + + return unless File.size(saved_filename) < 1000 + + lines = File.readlines(saved_filename) + raise "Faile to get initrd #{initrd}" if lines[0].chomp == '' +end + +def test_kernel(kernel_params_list) + # need more detail implementation + raise 'Too few of kernel parameters' if kernel_params_list.size < 10 +end + +def test_initrd_or_kernel(cmd_line) + puts "Chech #{cmd_line}" + cmd_line_list = cmd_line.split(' ') + case cmd_line_list[0] + when 'initrd' + test_initrd(cmd_line_list[1]) + when 'kernel' + test_kernel(cmd_line_list) + else + puts "No check to #{cmd_line}" + end +end diff --git a/src/features/taskqueue.feature b/src/features/taskqueue.feature new file mode 100644 index 0000000000000000000000000000000000000000..ffd6f765b03f6f1cd665fce4946df37d9481252a --- /dev/null +++ b/src/features/taskqueue.feature @@ -0,0 +1,42 @@ +Feature: TaskQueue + + Scenario: add a task without id + Given has a task + """ + {"suite":"test01", "tbox_group":"host"} + """ + When call with post api "add?queue=scheduler/host" task + Then return with task id > 0 + +# Scenario: add a task with reuse id +# {"suite":"test01", "id":1, "tbox_group":"host"} +# Scenario: add a task with id large then global one +# {"suite":"test01", "id":65536, "tbox_group":"host"} + + Scenario: consume an exists task + Given has a task + """ + {"suite":"test01", "tbox_group":"host"} + """ + And call with post api "add?queue=scheduler/host" task + When call with put api "consume?queue=scheduler/host" + Then return with task tbox_group == "host" + + Scenario: hand over an exists task + Given has a task + """ + {"suite":"test01", "tbox_group":"host"} + """ + And call with post api "add?queue=scheduler/host" task + And call with put api "consume?queue=scheduler/host" + When call with put api "hand_over?from=scheduler/host\&to=extract_stats\&id=" and previous get id + Then return with http status_code = 201 + + Scenario: delete an exists task + Given has a task + """ + {"suite":"test01", "tbox_group":"host"} + """ + And call with post api "add?queue=scheduler/host" task + When call with put api "delete?queue=scheduler/host\&id=" and previous get id + Then return with http status_code = 201 diff --git a/src/lib/block_helper.cr b/src/lib/block_helper.cr new file mode 100644 index 0000000000000000000000000000000000000000..37ad367143bf20b77a933fb9dc403e0ff250179c --- /dev/null +++ b/src/lib/block_helper.cr @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# helper for (task) block +class BlockHelper + def initialize + @block_helper = Hash(String, Fiber).new + end + + # waiting untill special uuid's task is finished + # - yield (block) returns false, all uuid's task will block + # - yield returns true, then all uuid's task will continue + # + # examples: + # block_helper = BlockHelp.new # global instance + # + # # fiber-A call below code (checkfile: function / variable) + # # when "checkfile == fase", then fiber-A blocked + # # fiber-B call below code too + # # when "checkfile == true", then fiber-A and B continues + # block_helper.block_until_finished("1") { checkfile } + # + def block_until_finished(uuid) + if @block_helper[uuid]? + fiber = @block_helper[uuid] + else + fiber = Fiber.new { puts "uuid {#{uuid}} finished" } + @block_helper[uuid] = fiber + end + + if yield == true + spawn fiber.run + end + + until fiber.dead? + Fiber.yield + end + + @block_helper.delete(uuid) + end +end diff --git a/src/lib/job.cr b/src/lib/job.cr new file mode 100644 index 0000000000000000000000000000000000000000..76d7bcb845f627ed07e229a4d4e49ddce250eacb --- /dev/null +++ b/src/lib/job.cr @@ -0,0 +1,295 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "json" +require "yaml" +require "any_merge" + +require "scheduler/constants.cr" + +struct JSON::Any + def []=(key : String, value : String) + case object = @raw + when Hash(String, JSON::Any) + object[key] = JSON::Any.new(value) + else + raise "Expect Hash for #[](String, JSON::Any), not #{object.class}" + end + end +end + +module JobHelper + def self.match_tbox_group(testbox : String) + tbox_group = testbox + find = testbox.match(/(.*)(\-\d{1,}$)/) + if find != nil + tbox_group = find.not_nil![1] + end + return tbox_group + end + + def self.service_path(path) + temp_path = File.real_path(path) + return temp_path.split("/srv")[-1] + end +end + +class Job + getter hash : Hash(String, JSON::Any) + + INIT_FIELD = { + os: "debian", + lab: LAB, + os_arch: "aarch64", + os_version: "sid", + lkp_initrd_user: "latest", + docker_image: "centos:7", + } + + def initialize(job_content : JSON::Any, id) + @hash = job_content.as_h + + # init job with "-1", or use the original job_content["id"] + if "#{id}" == "" + @hash["id"] = JSON::Any.new("-1") + end + + check_required_keys() + set_defaults() + end + + METHOD_KEYS = %w( + id + os + os_arch + os_version + os_dir + os_mount + arch + suite + tbox_group + testbox + lab + initrd_pkg + initrd_deps + result_root + access_key + access_key_file + lkp_initrd_user + kernel_append_root + docker_image + ) + + macro method_missing(call) + if METHOD_KEYS.includes?({{ call.name.stringify }}) + @hash[{{ call.name.stringify }}].to_s + else + raise "Unassigned key or undefined method: #{{{ call.name.stringify }}}" + end + end + + def dump_to_json + @hash.to_json + end + + def dump_to_yaml + @hash.to_yaml + end + + def dump_to_json_any + JSON.parse(dump_to_json) + end + + def update(hash : Hash) + hash_dup = hash.dup + if hash_dup.has_key?("id") + hash_dup.delete("id") + puts "Should not direct update id, use update_id, ignore this" + end + if hash_dup.has_key?("tbox_group") + raise "Should not direct update tbox_group, use update_tbox_group" + end + + @hash.any_merge!(hash_dup) + end + + def update(json : JSON::Any) + update(json.as_h) + end + + private def set_defaults + append_init_field() + set_os_dir() + set_result_root() + set_result_service() + set_os_mount() + set_kernel_append_root() + set_pp_initrd() + set_lkp_server() + end + + private def append_init_field + INIT_FIELD.each do |k, v| + k = k.to_s + if !@hash[k]? || @hash[k] == nil + self[k] = v + end + end + end + + private def set_lkp_server + if self["SCHED_HOST"] != SCHED_HOST # remote submited job + # ?further fix to 127.0.0.1 (from remote ssh port forwarding) + # ?even set self["SCHED_HOST"] and self["SCHED_PORT"] + + self["LKP_SERVER"] = SCHED_HOST + self["LKP_CGI_PORT"] = SCHED_PORT.to_s + + if self["uuid"] == "" + puts "Job's SCHED_HOST is #{self["SCHED_HOST"]}, " + + "current scheduler IP is: #{SCHED_HOST}" + raise "Missing uuid for remote job" + end + end + end + + private def set_os_dir + self["os_dir"] = "#{os}/#{os_arch}/#{os_version}" + end + + private def set_result_root + update_tbox_group_from_testbox # id must exists, need update tbox_group + date = Time.local.to_s("%F") + self["result_root"] = "/result/#{suite}/#{tbox_group}/#{date}/#{id}" + + # access_key has information based on "result_root" + # so when set result_root, we need redo set_ to update it + set_access_key() + end + + private def set_access_key + self["access_key"] = "#{Random::Secure.hex(10)}" unless @hash["access_key"]? + self["access_key_file"] = File.join("/srv/", "#{result_root}", ".#{access_key}") + end + + private def set_result_service + self["result_service"] = "raw_upload" + end + + # if not assign tbox_group, set it to a match result from testbox + # ?if job special testbox, should we just set tbox_group=textbox + private def update_tbox_group_from_testbox + if self["tbox_group"] == "" + @hash["tbox_group"] = JSON::Any.new(JobHelper.match_tbox_group(testbox)) + end + end + + def [](key : String) : String + "#{@hash[key]?}" + end + + def []?(key : String) + @hash.[key]? + end + + def []=(key : String, value : String | Nil) + if key == "id" || key == "tbox_group" + raise "Should not []= id and tbox_group, use update_#{key}" + end + @hash[key] = JSON::Any.new(value) if value + end + + # defaults to the 1st value + VALID_OS_MOUNTS = ["nfs", "initramfs", "cifs", "container"] + + private def set_os_mount + if @hash["os_mount"]? + if !VALID_OS_MOUNTS.includes?(@hash["os_mount"].to_s) + raise "Invalid os_mount: #{@hash["os_mount"]}, should be in #{VALID_OS_MOUNTS}" + end + else + self["os_mount"] = VALID_OS_MOUNTS[0] + end + end + + REQUIRED_KEYS = %w[ + id + suite + testbox + ] + + private def check_required_keys + REQUIRED_KEYS.each do |key| + if !@hash[key]? + raise "Missing required job key: '#{key}'" + end + end + end + + private def set_kernel_append_root + os_real_path = JobHelper.service_path("#{SRV_OS}/#{os_dir}") + lkp_real_path = JobHelper.service_path("#{SRV_OS}/#{os_dir}/initrd.lkp") + lkp_basename = File.basename(lkp_real_path) + + current_basename = "" + if "#{os_mount}" == "initramfs" + current_real_path = JobHelper.service_path("#{SRV_INITRD}/osimage/#{os_dir}/current") + current_basename = File.basename(current_real_path) + end + + fs2root = { + "nfs" => "root=#{OS_HTTP_HOST}:#{os_real_path} initrd=#{lkp_basename}", + "cifs" => "root=cifs://#{OS_HTTP_HOST}#{os_real_path}" + + ",guest,ro,hard,vers=1.0,noacl,nouser_xattr initrd=#{lkp_basename}", + "initramfs" => "rdinit=/sbin/init prompt_ramdisk=0 initrd=#{current_basename}", + "container" => "" + } + self["kernel_append_root"] = fs2root[os_mount] + end + + private def set_pp_initrd + initrd_deps_arr = Array(String).new + initrd_pkg_arr = Array(String).new + initrd_http_prefix = "http://#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT}" + mount_type = os_mount == "cifs" ? "nfs" : os_mount.dup + if @hash["pp"]? + program_params = @hash["pp"].as_h + program_params.keys.each do |program| + dest_file = "#{mount_type}/#{os_dir}/#{program}" + if File.exists?("#{ENV["LKP_SRC"]}/distro/depends/#{program}") && + File.exists?("#{SRV_INITRD}/deps/#{dest_file}.cgz") + initrd_deps_arr << "#{initrd_http_prefix}" + + JobHelper.service_path("#{SRV_INITRD}/deps/#{dest_file}.cgz") + end + if File.exists?("#{ENV["LKP_SRC"]}/pkg/#{program}") && + File.exists?("#{SRV_INITRD}/pkg/#{dest_file}.cgz") + initrd_pkg_arr << "#{initrd_http_prefix}" + + JobHelper.service_path("#{SRV_INITRD}/pkg/#{dest_file}.cgz") + end + end + end + self["initrd_deps"] = initrd_deps_arr.join(" ") + self["initrd_pkg"] = initrd_pkg_arr.join(" ") + end + + def update_tbox_group(tbox_group) + @hash["tbox_group"] = JSON::Any.new(tbox_group) + + # "result_root" is based on "tbox_group" + # so when update tbox_group, we need redo set_ + set_result_root() + end + + def update_id(id) + @hash["id"] = JSON::Any.new(id) + + # "result_root" is based on "id" + # so when update id, we need redo set_ + set_result_root() + end + + def get_uuid_tag + uuid = self["uuid"] + uuid != "" ? "/#{uuid}" : nil + end +end diff --git a/src/lib/remote_git_client.cr b/src/lib/remote_git_client.cr new file mode 100644 index 0000000000000000000000000000000000000000..b4a74aa55662f5bf1cd6f08445501e0eb999c495 --- /dev/null +++ b/src/lib/remote_git_client.cr @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "json" +require "http/client" + +class RemoteGitClient + def initialize + @host = ENV.has_key?("REMOTE_GIT_HOST") ? ENV["REMOTE_GIT_HOST"] : "172.17.0.1" + @port = ENV.has_key?("REMOTE_GIT_PORT") ? ENV["REMOTE_GIT_PORT"].to_i32 : 8100 + end + + def git_command(data : JSON::Any) + response = HTTP::Client.post("http://#{@host}:#{@port}/git_command", body: data.to_json) + return response + end +end diff --git a/src/lib/sched.cr b/src/lib/sched.cr new file mode 100644 index 0000000000000000000000000000000000000000..8b10b34295997db70283fa986814dce30a061f6a --- /dev/null +++ b/src/lib/sched.cr @@ -0,0 +1,628 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "kemal" +require "yaml" + +require "./job" +require "./block_helper" +require "./taskqueue_api" +require "./remote_git_client" +require "../scheduler/constants" +require "../scheduler/jobfile_operate" +require "../scheduler/redis_client" +require "../scheduler/elasticsearch_client" + +class Sched + property es + property redis + property block_helper + + def initialize + @es = Elasticsearch::Client.new + @redis = Redis::Client.new + @task_queue = TaskQueueAPI.new + @block_helper = BlockHelper.new + @rgc = RemoteGitClient.new + end + + def normalize_mac(mac : String) + mac.gsub(":", "-") + end + + def set_host_mac(mac : String, hostname : String) + @redis.hash_set("sched/mac2host", normalize_mac(mac), hostname) + end + + def del_host_mac(mac : String) + @redis.hash_del("sched/mac2host", normalize_mac(mac)) + end + + # return: + # Hash(String, Hash(String, String)) + def get_cluster_state(cluster_id) + cluster_state = @redis.hash_get("sched/cluster_state", cluster_id) + if cluster_state + cluster_state = Hash(String, Hash(String, String)).from_json(cluster_state) + else + cluster_state = Hash(String, Hash(String, String)).new + end + return cluster_state + end + + # get -> modify -> set + def update_cluster_state(cluster_id, job_id, property, value) + cluster_state = get_cluster_state(cluster_id) + if cluster_state[job_id]? + cluster_state[job_id].merge!({property => value}) + @redis.hash_set("sched/cluster_state", cluster_id, cluster_state.to_json) + end + end + + # Return response according to different request states. + # all request states: + # wait_ready | abort | failed | finished | wait_finish | + # write_state | roles_ip + def request_cluster_state(env) + request_state = env.params.query["state"] + job_id = env.params.query["job_id"] + cluster_id = @redis.hash_get("sched/id2cluster", job_id).not_nil! + cluster_state = "" + + states = {"abort" => "abort", + "finished" => "finish", + "failed" => "abort", + "wait_ready" => "ready", + "wait_finish" => "finish"} + + case request_state + when "abort", "finished", "failed" + # update node state only + update_cluster_state(cluster_id, job_id, "state", states[request_state]) + when "wait_ready" + update_cluster_state(cluster_id, job_id, "state", states[request_state]) + @block_helper.block_until_finished(cluster_id) { + cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) + cluster_state == "ready" || cluster_state == "abort" + } + + return cluster_state + when "wait_finish" + update_cluster_state(cluster_id, job_id, "state", states[request_state]) + while 1 + sleep(10) + cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) + break if (cluster_state == "finish" || cluster_state == "abort") + end + + return cluster_state + when "write_state" + node_roles = env.params.query["node_roles"] + node_ip = env.params.query["ip"] + update_cluster_state(cluster_id, job_id, "roles", node_roles) + update_cluster_state(cluster_id, job_id, "ip", node_ip) + when "roles_ip" + role = "server" + server_ip = get_ip(cluster_id, role) + return "server=#{server_ip}" + end + + # show cluster state + return @redis.hash_get("sched/cluster_state", cluster_id) + end + + # get the ip of role from cluster_state + def get_ip(cluster_id, role) + cluster_state = get_cluster_state(cluster_id) + cluster_state.each_value do |config| + if %(#{config["roles"]}) == role + return config["ip"] + end + end + end + + # node_state: "finish" | "ready" + def sync_cluster_state(cluster_id, job_id, node_state) + cluster_state = get_cluster_state(cluster_id) + cluster_state.each_value do |host_state| + state = host_state["state"] + return "abort" if state == "abort" + end + + cluster_state.each_value do |host_state| + state = host_state["state"] + next if "#{state}" == "#{node_state}" + return "retry" + end + + # cluster state is node state when all nodes are normal + return node_state + end + + # EXAMPLE: + # cluster_file: "cs-lkp-hsw-ep5" + # return: Hash(YAML::Any, YAML::Any) | Nil, 0 | + # {"lkp-hsw-ep5" => {"roles" => ["server"], "macs" => ["ec:f4:bb:cb:7b:92"]}, + # "lkp-hsw-ep2" => {"roles" => ["client"], "macs" => ["ec:f4:bb:cb:54:92"]}}, 2 + def get_cluster_config(cluster_file, lkp_initrd_user, os_arch) + lkp_src = Jobfile::Operate.prepare_lkp_tests(lkp_initrd_user, os_arch) + cluster_file_path = Path.new(lkp_src, "cluster", cluster_file) + + if File.file?(cluster_file_path) + cluster_config = YAML.parse(File.read(cluster_file_path)).as_h + hosts_size = cluster_config.values.size + return cluster_config, hosts_size + end + + return nil, 0 + end + + def get_commit_date(job) + if (job["upstream_repo"] != "") && (job["upstream_commit"] != "") + data = JSON.parse(%({"git_repo": "#{job["upstream_repo"]}.git", + "git_command": ["git-log", "--pretty=format:%cd", "--date=unix", + "#{job["upstream_commit"]}", "-1"]})) + response = @rgc.git_command(data) + return response.body if response.status_code == 200 + end + + return nil + end + + def submit_job(env : HTTP::Server::Context) + begin + body = env.request.body.not_nil!.gets_to_end + + job_content = JSON.parse(body) + job = Job.new(job_content, job_content["id"]?) + job["commit_date"] = get_commit_date(job) + + cluster_file = job["cluster"] + if cluster_file != "" + cluster_config, hosts_size = get_cluster_config( + cluster_file, job.lkp_initrd_user, job.os_arch) + + return submit_cluster_job( + job, cluster_config.not_nil!) if hosts_size >= 2 + end + + return submit_single_job(job) + rescue ex + return [{ + "job_id" => "0", + "message" => ex.to_s, + "job_state" => "submit", + }] + end + end + + # return: + # success: [{"job_id" => job_id1, "message => "", "job_state" => "submit"}, ...] + # failure: [..., {"job_id" => 0, "message" => err_msg, "job_state" => "submit"}] + def submit_cluster_job(job, cluster_config) + job_messages = Array(Hash(String, String)).new + lab = job.lab + + # collect all job ids + job_ids = [] of String + + # steps for each host + cluster_config.each do |host, config| + tbox_group = host.to_s + job_id = add_task(tbox_group, lab) + + # return when job_id is '0' + # 2 Questions: + # - how to deal with the jobs added to DB prior to this loop + # - may consume job before all jobs done + return job_messages << { + "job_id" => "0", + "message" => "add task queue sched/#{tbox_group} failed", + "job_state" => "submit", + } unless job_id + + job_ids << job_id + + # add to job content when multi-test + job["testbox"] = tbox_group + job.update_tbox_group(tbox_group) + job["node_roles"] = config["roles"].as_a.join(" ") + job["node_macs"] = config["macs"].as_a.join(" ") + + response = add_job(job, job_id) + message = (response["error"]? ? response["error"]["root_cause"] : "") + job_messages << { + "job_id" => job_id, + "message" => message.to_s, + "job_state" => "submit", + } + return job_messages if response["error"]? + end + + cluster_id = job_ids[0] + + # collect all host states + cluster_state = Hash(String, Hash(String, String)).new + job_ids.each do |job_id| + cluster_state[job_id] = {"state" => ""} + # will get cluster id according to job id + @redis.hash_set("sched/id2cluster", job_id, cluster_id) + end + + @redis.hash_set("sched/cluster_state", cluster_id, cluster_state.to_json) + + return job_messages + end + + # return: + # success: [{"job_id" => job_id, "message" => "", job_state => "submit"}] + # failure: [{"job_id" => "0", "message" => err_msg, job_state => "submit"}] + def submit_single_job(job) + tbox_group = job.tbox_group + return [{ + "job_id" => "0", + "message" => "get tbox group failed", + "job_state" => "submit", + }] unless tbox_group + + # only single job will has "idle job" and "execute rate limiter" + if %.includes?("allot/idle/") + tbox_group = "#{tbox_group}/idle" + else + tbox_group += "#{job.get_uuid_tag}" + end + + job_id = add_task(tbox_group, job.lab) + return [{ + "job_id" => "0", + "message" => "add task queue sched/#{tbox_group} failed", + "job_state" => "submit", + }] unless job_id + + response = add_job(job, job_id) + message = (response["error"]? ? response["error"]["root_cause"] : "") + + return [{ + "job_id" => job_id, + "message" => message.to_s, + "job_state" => "submit", + }] + end + + # return job_id + def add_task(tbox_group, lab) + task_desc = JSON.parse(%({"domain": "compass-ci", "lab": "#{lab}"})) + response = @task_queue.add_task("sched/#{tbox_group}", task_desc) + JSON.parse(response[1].to_json)["id"].to_s if response[0] == 200 + end + + # add job content to es and return a response + def add_job(job, job_id) + job.update_id(job_id) + @es.set_job_content(job) + end + + private def ipxe_msg(msg) + "#!ipxe + echo ... + echo #{msg} + echo ... + reboot" + end + + private def grub_msg(msg) + "#!grub + echo ... + echo #{msg} + echo ... + reboot" + end + + private def get_boot_container(job : Job) + response = Hash(String, String).new + response["docker_image"] = "#{job.docker_image}" + response["lkp"] = "http://#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT}" + + JobHelper.service_path("#{SRV_INITRD}/lkp/#{job.lkp_initrd_user}/lkp-#{job.arch}.cgz") + response["job"] = "http://#{SCHED_HOST}:#{SCHED_PORT}/job_initrd_tmpfs/#{job.id}/job.cgz" + + puts %({"job_id": "#{job.id}", "job_state": "boot"}) + return response.to_json + end + + private def get_boot_grub(job : Job) + initrd_lkp_cgz = "lkp-#{job.os_arch}.cgz" + + response = "#!grub\n\n" + response += "linux (http,#{OS_HTTP_HOST}:#{OS_HTTP_PORT})" + response += "#{JobHelper.service_path("#{SRV_OS}/#{job.os_dir}/vmlinuz")} user=lkp" + response += " job=/lkp/scheduled/job.yaml RESULT_ROOT=/result/job" + response += " rootovl ip=dhcp ro root=#{job.kernel_append_root}\n" + + response += "initrd (http,#{OS_HTTP_HOST}:#{OS_HTTP_PORT})" + response += JobHelper.service_path("#{SRV_OS}/#{job.os_dir}/initrd.lkp") + response += " (http,#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT})" + response += JobHelper.service_path("#{SRV_INITRD}/lkp/#{job.lkp_initrd_user}/#{initrd_lkp_cgz}") + response += " (http,#{SCHED_HOST}:#{SCHED_PORT})/job_initrd_tmpfs/" + response += "#{job.id}/job.cgz\n" + + response += "boot\n" + + puts %({"job_id": "#{job.id}", "job_state": "boot"}) + return response + end + + def touch_access_key_file(job : Job) + FileUtils.touch(job.access_key_file) + end + + def boot_content(job : Job | Nil, boot_type : String) + touch_access_key_file(job) if job + + case boot_type + when "ipxe" + return job ? get_boot_ipxe(job) : ipxe_msg("No job now") + when "grub" + return job ? get_boot_grub(job) : grub_msg("No job now") + when "container" + return job ? get_boot_container(job) : Hash(String, String).new.to_json + else + raise "Not defined boot type #{boot_type}" + end + end + + def find_job_boot(env : HTTP::Server::Context) + api_param = env.params.url["value"] + + case env.params.url["boot_type"] + when "ipxe" + hostname = @redis.hash_get("sched/mac2host", normalize_mac(api_param)) + when "grub" + hostname = @redis.hash_get("sched/mac2host", normalize_mac(api_param)) + if hostname.nil? # auto name new/unknown machine + hostname = "sut-#{api_param}" + set_host_mac(api_param, hostname) + + # auto submit a job to collect the host information + # grub hostname is link with ":", like "00:01:02:03:04:05" + # remind: if like with "-", last "-05" is treated as host number + # then hostname will be "sut-00-01-02-03-04" !!! + Jobfile::Operate.auto_submit_job( + "#{ENV["LKP_SRC"]}/jobs/host-info.yaml", + "testbox: #{hostname}") + end + when "container" + hostname = api_param + end + + get_testbox_boot_content(hostname, env.params.url["boot_type"]) + end + + def find_next_job_boot(env) + hostname = env.params.query["hostname"]? + mac = env.params.query["mac"]? + if !hostname && mac + hostname = @redis.hash_get("sched/mac2host", normalize_mac(mac)) + end + + get_testbox_boot_content(hostname, "ipxe") + end + + def get_testbox_boot_content(testbox, boot_type) + job = find_job(testbox) if testbox + Jobfile::Operate.create_job_cpio(job.dump_to_json_any, + Kemal.config.public_folder) if job + + return boot_content(job, boot_type) + end + + private def find_job(testbox : String, count = 1) + tbox = JobHelper.match_tbox_group(testbox) + tbox_group = tbox.partition("--")[0] + + boxes = [testbox, tbox, tbox_group] + boxes.each do |box| + count.times do + job = prepare_job("sched/#{box}", testbox) + return job if job + + sleep(1) unless count == 1 + end + end + + # when find no job at "sched/#{tbox_group}" + # try to get from "sched/#{tbox_group}/idle" + return get_idle_job(tbox_group, testbox) + end + + private def prepare_job(queue_name, testbox) + response = @task_queue.consume_task(queue_name) + job_id = JSON.parse(response[1].to_json)["id"] if response[0] == 200 + job = nil + + if job_id + begin + job = @es.get_job(job_id.to_s) + rescue ex + puts "Invalid job (id=#{job_id}) in es. Info: #{ex}" + end + end + + if job + job.update({"testbox" => testbox}) + @redis.set_job(job) + end + return job + end + + private def get_idle_job(tbox_group, testbox) + job = prepare_job("sched/#{tbox_group}/idle", testbox) + + # if there has no idle job, auto submit and get 1 + if job.nil? + auto_submit_idle_job(tbox_group) + job = prepare_job("sched/#{tbox_group}/idle", testbox) + end + + return job + end + + def auto_submit_idle_job(tbox_group) + full_path_patterns = "#{ENV["CCI_REPOS"]}/lab-#{ENV["lab"]}/allot/idle/#{tbox_group}/*.yaml" + Jobfile::Operate.auto_submit_job( + full_path_patterns, + "testbox: #{tbox_group}") if Dir.glob(full_path_patterns).size > 0 + end + + private def add_kernel_console_param(arch_tmp) + returned = "" + if arch_tmp == "x86_64" + returned = " console=ttyS0,115200 console=tty0" + end + return returned + end + + private def get_pp_initrd(job : Job) + initrd_deps = "" + initrd_pkg = "" + if job.os_mount == "initramfs" + initrd_deps += job.initrd_deps.split.join() { |item| "initrd #{item}\n" } + initrd_pkg += job.initrd_pkg.split.join() { |item| "initrd #{item}\n" } + end + return initrd_deps, initrd_pkg + end + + private def get_boot_ipxe(job : Job) + initrd_lkp_cgz = "lkp-#{job.os_arch}.cgz" + + initrd_deps, initrd_pkg = get_pp_initrd(job) + + initrd_http_prefix = "http://#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT}" + sched_http_prefix = "http://#{SCHED_HOST}:#{SCHED_PORT}" + os_http_prefix = "http://#{OS_HTTP_HOST}:#{OS_HTTP_PORT}" + + response = "#!ipxe\n\n" + if job.os_mount == "initramfs" + response += "initrd #{initrd_http_prefix}" + + "#{JobHelper.service_path("#{SRV_INITRD}/osimage/#{job.os_dir}/current")}\n" + response += "initrd #{initrd_http_prefix}" + + "#{JobHelper.service_path("#{SRV_INITRD}/osimage/#{job.os_dir}/run-ipconfig.cgz")}\n" + else + response += "initrd #{os_http_prefix}" + + "#{JobHelper.service_path("#{SRV_OS}/#{job.os_dir}/initrd.lkp")}\n" + end + response += "initrd #{initrd_http_prefix}" + + "#{JobHelper.service_path("#{SRV_INITRD}/lkp/#{job.lkp_initrd_user}/#{initrd_lkp_cgz}")}\n" + response += "initrd #{sched_http_prefix}/job_initrd_tmpfs/#{job.id}/job.cgz\n" + response += initrd_deps + response += initrd_pkg + response += "kernel #{os_http_prefix}" + + "#{JobHelper.service_path("#{SRV_OS}/#{job.os_dir}/vmlinuz")}" + response += " user=lkp" + response += " job=/lkp/scheduled/job.yaml RESULT_ROOT=/result/job rootovl ip=dhcp ro" + response += " #{job.kernel_append_root}" + response += add_kernel_console_param(job.os_arch) + if job.os_mount == "initramfs" + response += " initrd=#{initrd_lkp_cgz} initrd=job.cgz" + job.initrd_deps.split.each do |initrd_dep| + response += " initrd=#{File.basename(initrd_dep)}" + end + response += " initrd=#{File.basename(JobHelper.service_path("#{SRV_INITRD}/osimage/#{job.os_dir}/run-ipconfig.cgz"))}\n" + else + response += " initrd=#{initrd_lkp_cgz} initrd=job.cgz\n" + end + response += "boot\n" + + puts %({"job_id": "#{job.id}", "job_state": "boot"}) + return response + end + + def update_job_parameter(env : HTTP::Server::Context) + job_id = env.params.query["job_id"]? + if !job_id + return false + end + + # try to get report value and then update it + job_content = {} of String => String + job_content["id"] = job_id + + (%w(start_time end_time loadavg job_state)).each do |parameter| + value = env.params.query[parameter]? + if !value || value == "" + next + end + if parameter == "start_time" || parameter == "end_time" + value = Time.unix(value.to_i).to_local.to_s("%Y-%m-%d %H:%M:%S") + end + + job_content[parameter] = value + end + + @redis.update_job(job_content) + + # json log + log = job_content.dup + log["job_id"] = log.delete("id").not_nil! + puts log.to_json + end + + def update_tbox_wtmp(env : HTTP::Server::Context) + testbox = "" + hash = Hash(String, String).new + + time = Time.local.to_s("%Y-%m-%d %H:%M:%S") + hash["time"] = time + + %w(mac ip job_id tbox_name tbox_state).each do |parameter| + if (value = env.params.query[parameter]?) + case parameter + when "tbox_name" + testbox = value + when "tbox_state" + hash["state"] = value + when "mac" + hash["mac"] = normalize_mac(value) + else + hash[parameter] = value + end + end + end + + @redis.update_wtmp(testbox, hash) + + # json log + hash["testbox"] = testbox + puts hash.to_json + end + + def report_ssh_port(testbox : String, ssh_port : String) + @redis.hash_set("sched/tbox2ssh_port", testbox, ssh_port) + end + + def delete_access_key_file(job : Job) + File.delete(job.access_key_file) if File.exists?(job.access_key_file) + end + + def close_job(job_id : String) + job = @redis.get_job(job_id) + + delete_access_key_file(job) if job + + response = @es.set_job_content(job) + if response["_id"] == nil + # es update fail, raise exception + raise "es set job content fail!" + end + + response = @task_queue.hand_over_task( + "sched/#{job.tbox_group}", "extract_stats", job_id + ) + if response[0] != 201 + raise "#{response}" + end + + @redis.remove_finished_job(job_id) + + puts %({"job_id": "#{job_id}", "job_state": "complete"}) + end +end diff --git a/src/lib/taskqueue_api.cr b/src/lib/taskqueue_api.cr new file mode 100644 index 0000000000000000000000000000000000000000..8d0db5d6b67230e3120e3bcebaf56f95e228c566 --- /dev/null +++ b/src/lib/taskqueue_api.cr @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "json" +require "http/client" + +class TaskQueueAPI + def initialize + @port = ENV.has_key?("TASKQUEUE_PORT") ? ENV["TASKQUEUE_PORT"].to_i32 : 3060 + @host = ENV.has_key?("TASKQUEUE_HOST") ? ENV["TASKQUEUE_HOST"] : "172.17.0.1" + end + + def add_task(service_queue_path : String, task : JSON::Any) + params = HTTP::Params.encode({"queue" => service_queue_path}) + response = HTTP::Client.post("http://#{@host}:#{@port}/add?" + params, body: task.to_json) + arrange_response(response) + end + + def consume_task(service_queue_path : String) + params = HTTP::Params.encode({"queue" => service_queue_path}) + response_put_api("consume", params) + end + + def hand_over_task(service_queue_path_from : String, service_queue_path_to : String, task_id : String) + params = HTTP::Params.encode({"from" => service_queue_path_from, "to" => service_queue_path_to, "id" => task_id}) + response_put_api("hand_over", params) + end + + def delete_task(service_queue_path : String, task_id : String) + params = HTTP::Params.encode({"queue" => service_queue_path, "id" => task_id}) + response_put_api("delete", params) + end + + private def response_put_api(cmd : String, params : String) + response = HTTP::Client.put("http://#{@host}:#{@port}/#{cmd}?" + params) + arrange_response(response) + end + + private def arrange_response(response) + case status_code = response.status_code + when 200 + [status_code, JSON.parse(response.body)] + when 201 + [status_code, nil] + else + if response.headers["CCI-error-Description"]? + [status_code, response.headers["CCI-error-Description"]] + else + [status_code, response.status_message] + end + end + end +end diff --git a/src/monitoring.cr b/src/monitoring.cr new file mode 100644 index 0000000000000000000000000000000000000000..b1b534be399381e866d5083517804ffd4060ac3e --- /dev/null +++ b/src/monitoring.cr @@ -0,0 +1,6 @@ +require "monitoring/monitoring" +require "monitoring/constants" + +module Monitoring + Kemal.run(MONITOR_PORT) +end diff --git a/src/monitoring/amqp.cr b/src/monitoring/amqp.cr new file mode 100644 index 0000000000000000000000000000000000000000..1a97dc4fa9f91d7696543f57179f2fb36f7552a9 --- /dev/null +++ b/src/monitoring/amqp.cr @@ -0,0 +1,34 @@ +require "amqp-client" + +require "./monitoring" +require "./filter" +require "./constants" + +class MessageQueueClient + def initialize(host = MQ_HOST, port = MQ_PORT) + @client = AMQP::Client.new("amqp://#{host}:#{port}") + end + + private def start + conn = @client.connect + yield conn + ensure + conn.try &.close + end + + def monitoring_message_queue(filter : Filter, exchange_name : String, queue_name : String) + start do |conn| + conn.channel do |channel| + queue = channel.queue(queue_name) + queue.bind(exchange_name, "") + queue.subscribe(tag: queue_name, block: true) do |msg| + begin + filter.filter_msg(msg.body_io) + rescue e + puts "filter message error: #{e}" + end + end + end + end + end +end diff --git a/src/monitoring/constants.cr b/src/monitoring/constants.cr new file mode 100644 index 0000000000000000000000000000000000000000..154c536c77a69e1825bb7f2b9c3ded3651df0a6b --- /dev/null +++ b/src/monitoring/constants.cr @@ -0,0 +1,4 @@ +MQ_HOST = (ENV.has_key?("MQ_HOST") ? ENV["MQ_HOST"] : "172.17.0.1") +MQ_PORT = (ENV.has_key?("MQ_PORT") ? ENV["MQ_PORT"] : 5672).to_i32 + +MONITOR_PORT = (ENV.has_key?("MONITOR_PORT") ? ENV["MONITOR_PORT"] : 11310).to_i32 diff --git a/src/monitoring/filter.cr b/src/monitoring/filter.cr new file mode 100644 index 0000000000000000000000000000000000000000..5cd6532fe74c554cd05902b7eb40050d31fcdc52 --- /dev/null +++ b/src/monitoring/filter.cr @@ -0,0 +1,56 @@ +require "set" +require "json" + +require "./parse_serial_logs" + +class Filter + def initialize + # use @hash to save query and socket + # like {query => [socket1, socket2]} + @hash = Hash(JSON::Any, Array(HTTP::WebSocket)).new + @sp = SerialParser.new + end + + def add_filter_rule(query : JSON::Any, socket : HTTP::WebSocket) + @hash[query] = Array(HTTP::WebSocket).new unless @hash[query]? + @hash[query] << socket + end + + def remove_filter_rule(query : JSON::Any, socket : HTTP::WebSocket) + return unless @hash[query]? + + @hash[query].delete(socket) + @hash.delete(query) if @hash[query].empty? + end + + def send_msg(query, msg) + return unless @hash[query]? + + @hash[query].each do |socket| + socket.send msg.to_json + end + end + + def filter_msg(msg) + msg = JSON.parse(msg.to_s).as_h? + return unless msg + + @sp.save_dmesg_to_result_root(msg) + @hash.keys.each do |query| + if match_query(query.as_h, msg) + send_msg(query, msg) + end + end + end + + def match_query(query : Hash(String, JSON::Any), msg : Hash(String, JSON::Any)) + query.each do |key, value| + if value == nil + return false unless msg.has_key?(key) + else + return false unless value == msg[key]? + end + end + return true + end +end diff --git a/src/monitoring/monitoring.cr b/src/monitoring/monitoring.cr new file mode 100644 index 0000000000000000000000000000000000000000..178e4c9c54acc67d8f837e7794e0f9f0d70da1c7 --- /dev/null +++ b/src/monitoring/monitoring.cr @@ -0,0 +1,30 @@ +require "kemal" +require "json" + +require "./filter" +require "./amqp" + +module Monitoring + filter = Filter.new + + message_queue_client = MessageQueueClient.new + + spawn message_queue_client.monitoring_message_queue(filter, "logging-test", "logging-test") + + ws "/filter" do |socket| + query = JSON::Any.new("") + + socket.on_message do |msg| + # query like {"job_id": 1} + query = JSON.parse(msg) + if query.as_h? + filter.add_filter_rule(query, socket) + end + end + + socket.on_close do + next unless query.as_h? + filter.remove_filter_rule(query, socket) + end + end +end diff --git a/src/monitoring/parse_serial_logs.cr b/src/monitoring/parse_serial_logs.cr new file mode 100644 index 0000000000000000000000000000000000000000..a000ca3618a3eddfc69e850a45e7a6d870c816ff --- /dev/null +++ b/src/monitoring/parse_serial_logs.cr @@ -0,0 +1,121 @@ +require "../scheduler/elasticsearch_client" +require "set" +require "json" + +# This parses dmesg in a stream of serial log, finding a number of patterns +# in various places of the dmesg and take actions accordingly. +# dmesg action +# ================================================================================== +# START_PATTERN close @host2file[host]; cache line to @host2head[host] +# header lines w/o job info cache line to @host2head[host] +# JOB_PATTERN get job_id --> job --> result_root; dump cache to file +# dmesg lines w/ job info append line to dmesg file under result_root; +# CRASH_PATTERN notify oops/crash/warn --> reboot machine +# END_PATTERN close @host2file[host] +# ================================================================================== +# steps for a dmesg: +# 1) stash the head of dmesg file to hash before getting the job_id. +# 2) create dmesg file under the result_root of the job when successfully +# matched the job_id from the received message. +# 3) detect kernel oops/crash and the end of dmesg file. + +class SerialParser + START_PATTERNS = [ + "starting QEMU", + "Start PXE over IPv4", + "iPXE initialising devices", + "Open Source Network Boot Firmware", + "BIOS Build Version:", + "BIOS Log @ ", + ] + + END_PATTERNS = [ + "Total QEMU duration: ", + "No job now", + "Restarting system", + ] + + def initialize + @host2head = Hash(String, Array(String)).new + @host2rt = Hash(String, String).new + end + + def host_in_msg(msg) + return unless msg["serial_path"]? + + host = File.basename(msg["serial_path"].to_s) + end + + def detect_start_or_end(msg, host, pattern_list) + message = msg["message"].to_s + pattern_list.each do |pattern| + matched = message.match(/.*(?#{pattern})/) + return matched.named_captures["signal"] unless matched.nil? + end + end + + def delete_host(msg, host, signal) + boundary_signal = detect_start_or_end(msg, host, signal) + return unless boundary_signal + + @host2head.delete(host) + @host2rt.delete(host) + end + + def save_dmesg_to_result_root(msg) + host = host_in_msg(msg) + return unless host + + delete_host(msg, host, START_PATTERNS) + + check_save = check_save_dmesg(msg, host) + delete_host(msg, host, END_PATTERNS) + return if check_save + + job_id = match_job_id(msg) + job = find_job(job_id) + return if dump_cache(job, msg, host) + + cache_dmesg(msg, host) + end + + def check_save_dmesg(msg, host) + return unless @host2rt.has_key?(host) + + File.open("#{@host2rt[host]}/dmesg", "a+") do |f| + f.puts msg["message"] + end + return true + end + + def match_job_id(msg) + matched = msg["message"].to_s.match(/.*\/job_initrd_tmpfs\/(?.*?)\//) + return unless matched + + job_id = matched.named_captures["job_id"] + end + + def find_job(job_id) + return unless job_id + + job = Elasticsearch::Client.new.get_job_content(job_id) + end + + def dump_cache(job, msg, host) + return unless job + + result_root = File.join("/srv", job["result_root"].to_s) + @host2rt[host] = result_root + File.open("#{result_root}/dmesg", "w") do |f| + f.puts @host2head[host].join("\n") + f.puts msg["message"] + end + @host2head.delete(host) + return true + end + + def cache_dmesg(msg, host) + @host2head[host] ||= Array(String).new + @host2head[host] << msg["message"].to_s + end +end diff --git a/src/scheduler.cr b/src/scheduler.cr new file mode 100644 index 0000000000000000000000000000000000000000..41702e7493cbd68601bcaf032d7dc6c6b9cf02d6 --- /dev/null +++ b/src/scheduler.cr @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "scheduler/scheduler" +require "./scheduler/constants.cr" + +module Scheduler + Kemal.run(SCHED_PORT) +end diff --git a/src/scheduler/README.md b/src/scheduler/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5da1bded507a16cc252a69b4bef1190c4fad7d59 --- /dev/null +++ b/src/scheduler/README.md @@ -0,0 +1,427 @@ +[TOC] + +--- +# restful-API +- First of all, you should deploy the lkp-tests and compass-ci project +- And set the environment variables needed for running (modify "/etc/compass-ci/defaults/crystal.yaml", and run $CCI_SRC/container/defconfig.rb | defconfig.sh) + LKP_SRC: ~/code/lkp-tests # path to your lkp-tests repo + CCI_SRC: ~/code/compass-ci # path to your compass-ci repo + SCHED_HOST: 172.168.131.131 # hostname or ip of the scheduler + SCHED_PORT: 3000 # port of the scheduler + JOB_INDEX_TYPE: "jobs/_doc" # es job document index and type +- '$'{variable} is used at shell command, means the value of variable +- '#'{variable} is used at crystal languange, means the value of variable +- is just let this variable can be show stronger +- [variable] means this variable is optional + +## submit a job +- restAPI: POST "/submit_job" +- request body: {"#!jobs/iperf.yaml":null, "suite":"iperf", ...} +- response body: "#{job_id}" (job_id is a global unique sequence number, e.g. 6) +- debug cmd: + curl -X POST --data '{"suite": "iperf", "testbox": "myhost", "test-group": "mygroup", "result_root": "/result/ipef"}' http://${SCHED_HOST}:${SCHED_PORT}/submit_job + +- v0.1.x inner process: +```sequence +User->Scheduler: POST "/submit_job" with job content +Note left of User: job content\nin json format +Scheduler->Sched: sched.\nsubmit_job(env) +Sched->Redis: = get_new_job_id +Redis->Redis: increase string key\n"sched/seqno2jobid" +Sched->Sched: = Job.new(JSON.parse(HTML::body)\n+ "id":#{job_id}) +Sched->Sched: = job.tbox_group,\n =\n"sched/jobs_to_run/#{tbox_group}" +Sched->Redis: add2queue(tbox_group_queue, job_id) +Redis->Redis: push to ready queue\n(redis key) +Sched->ElasticSearch: set_job_content(job) +ElasticSearch->ElasticSearch: create \ndocument about job +Sched->Scheduler: +Scheduler->User: +``` +-- doing what: + 1. add job_id to ready queue in redis + 2. add a job document in es + +-- redis storage: +Key|Value|Type +:-|:-|:- +sched/seqno2jobid |last_job_id => 64bit number |String + |[{member => job_id, score => enqueue_time},] |Sorted_Set + + Notes: + enqueue_time: float64 + tbox_group_queue: jobs to run queue (e.g. "sched/jobs_to_run/wfg-e595") + use redis Sorted_Set as a job queue, one per tbox_group. + +- v0.2.x inner process: +```sequence +User->Scheduler: POST "/submit_job" with job content +Note left of User: job content\nin json format +Scheduler->Sched: sched.submit_job(env) +Sched->Sched: job_content = JSON.parse(http.body) +Sched->JobHelper: = get_tbox_group +Sched->TaskQueue: add_task(task_queue_name,\ntask_description) +TaskQueue->Sched: +Sched->Sched: = job_content\n+ "id":#{task_id as job_id} +Sched->ElasticSearch: set_job_content(job) +ElasticSearch->ElasticSearch: create \ndocument about job +Sched->Scheduler: +Scheduler->User: +``` +-- doing what: + 1. use TaskQueue.add_task to add a task (for current job) + 2. add a job document in es + +-- redis storage: no change + +- es storage: + add JOB_INDEX_TYPE document (contents of job) + +- class members related: + Sched.submit_job(env) + ElasticSearch::Client.set_job_content(job) + Job.new, Job.tbox_group + +## qemu-pxe testbox consume a job +- restAPI: GET "/boot.ipxe/mac/:mac" (e.g. "/boot.ipxe/mac/52-54-00-12-34-56") +- request body: none +- response body: "#{ipxe_command}" +- debug cmd: + curl http://${SCHED_HOST}:${SCHED_PORT}/boot.ipxe/mac/54-52-00-12-24-46 + +### case 1: when find a job + #!ipxe + initrd http://#{OS_HTTP_HOST}:#{OS_HTTP_PORT}/os/#{job.os_dir}/initrd.lkp + initrd http://#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT}/initrd/lkp/#{job.lkp_initrd_use}/lkp-#{job.os_arch}.cgz + initrd http://#{SCHED_HOST}:#{SCHED_PORT}/job_initrd_tmpfs/#{job.id}/job.cgz + kernel http://#{OS_HTTP_HOST}:#{OS_HTTP_PORT}/os/#{job.os_dir}/vmlinuz user=lkp job=/lkp/scheduled/job.yaml RESULT_ROOT=/result/job root=#{job.kernel_append_root} rootovl ip=dhcp ro initrd=initrd.lkp initrd=lkp-#{job.os_arch}.cgz initrd=job.cgz + boot + +#### who generate this output + - initrd.lkp: container dracut-initrd generate this for special os. basic lkp-tests system. + - vmlinuz: container dracut-initrd generate this for special os. os kernal. + - lkp-#{job.os_arch}.cgz: container init-lkp generate this for current user. customized installation of external programs and/or system packages. + - job.cgz: service scheduler generate this. the content is job.yaml, job.sh about the job. + +### case 2: when find no job + #!ipxe + echo ... + No job now + echo ... + reboot + +- v0.1.x inner process: +```sequence +TestBox->Scheduler: GET "/boot.ipxe/mac/52-54-00-12-34-56" +Scheduler->Sched: = Sched.\nfind_job_boot("52-54-00-12-34-56") +Sched->Redis: = redis.hget("sched/mac2host",\n"52-54-00-12-34-56") +Sched->Sched: = JobHelper.get_tbox_group(hostname) +Sched->Redis: = find_job(tbox_group) +Redis->Redis: move_job("sched/jobs_to_run/#{tbox_group}",\n"sched/jobs_running",\njob_id) +Sched->ElasticSearch: job = get_job(job_id) +Sched->Redis: set_job(job) +Sched->Sched: Jobfile::Operate.create_job_cpio\ngenerate job.cgz from job +Sched->Scheduler: +Scheduler->TestBox: +``` +-- doing what: + 1. use mac to search hostname in redis key "sched/mac2host" + 2. translate hostname to tbox_group + 3. move job_id from redis key "sched/#{tbox_group}" to redis key "sched/jobs_running" + 4. get_job from es (with job_id) + 5. record {job_id => job} to redis key "sched/id2job" + 6. create job.cgz, save it to /job_initrd_tmpfs/#{job_id}/ + 7. generate ipxe_command and return it to caller + +-- redis storage: +Key|Value|Type +:-|:-|:- +sched/jobs_to_run/#{tbox_group} |[{member => job_id, score => enqueue_time},] |Sorted_Set +sched/jobs_running |[{member => job_id, score => dequeue_time},] |Sorted_Set +sched/id2job |[{field => job_id, value => job_info},] |Hash + + Notes: + dequeue_time: float64, times when the job_id put in redis key "sched/jobs_running" + job_info: record information about a job + +- v0.2.x inner process: +```sequence +TestBox->Scheduler: GET "/boot.ipxe/mac/52-54-00-12-34-56" +Scheduler->Sched: = Sched.\nfind_job_boot("52-54-00-12-34-56") +Sched->Redis: = redis.hget("sched/mac2host",\n"52-54-00-12-34-56") +Sched->Sched: = JobHelper.get_tbox_group(hostname) +Sched->TaskQueue: consume_task("sched/#{tbox_group}") +TaskQueue->Sched: {"id":task_id} | nil +Sched->ElasticSearch: job = get_job(task_id as job_id) +Sched->Sched: Jobfile::Operate.create_job_cpio\ngenerate job.cgz from job +Sched->Scheduler: +Scheduler->TestBox: +``` +-- doing what: + 1. use mac to search hostname in redis key "sched/mac2host" + 2. translate hostname to tbox_group + 3. call consume_task with tbox_group to get available task_id + 4. get_job from es (with the task_id as job_id) + 5. record {job_id => job} to redis key "sched/id2job" + 6. create job.cgz, save it to /job_initrd_tmpfs/#{job_id}/ + 7. generate ipxe_command and return it to caller + +-- redis storage: +Key|Value|Type +:-|:-|:- +sched/id2job |[{field => job_id, value => job_info},] |Hash + +- es storage: + query JOB_INDEX_TYPE document (contents of job) + +- class members related: + Sched.find_job_boot + Elasticsearch::Client.get_job + Jobfile::Operate.create_job_cpio + +## docker container testbox consume a job +- restAPI: GET "/boot.container/host/:host" (e.g. "/boot.container/host/dc-1g-1") +- request body: none +- response body: + "{ + "docker_images": "centos:7", + "lkp": "http://#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT}/initrd/lkp/#{job.lkp_initrd_use}/lkp-#{job.os_arch}.cgz", + "job": "http://#{SCHED_HOST}:#{SCHED_PORT}/job_initrd_tmpfs/#{job.id}/job.cgz" + }" | + "{}" +- debug cmd: + curl http://${SCHED_HOST}:${SCHED_PORT}/boot.container/host/dc-1g-1 + +- inner process: +-- doing what: + most same to , but + 1. direct gives hostname (testbox), no need search from the mac address + 2. only 3 parameter, use <"docker_images"> to start a container, and + mount the <"lkp"> initrd to run the user submited <"job"> + +## physical[|qemu-grub] testbox consume a job +- restAPI: GET "/boot.grub/mac/:mac" (e.g. "/boot.grub/mac/52:54:00:12:34:56") +- request body: none +- response body: "#{grub_command}" +- debug cmd: + curl http://${SCHED_HOST}:${SCHED_PORT}/boot.grub/mac/52:54:00:12:34:56 + +### case 1: when find a job + #!grub + linux (http,#{OS_HTTP_HOST}:#{OS_HTTP_PORT})/os/#{job.os_dir}/vmlinuz user=lkp job=/lkp/scheduled/job.yaml RESULT_ROOT=/result/job rootovl ip=dhcp ro root=#{job.kernel_append_root} + initrd (http,#{OS_HTTP_HOST}:#{OS_HTTP_PORT})/os/#{job.os_dir}/initrd.lkp (http,#{INITRD_HTTP_HOST}:#{INITRD_HTTP_PORT})/initrd/lkp/#{job.lkp_initrd_user}/#{initrd_lkp_cgz} (http,#{SCHED_HOST}:#{SCHED_PORT})/job_initrd_tmpfs/#{job.id}/job.cgz" + boot + +### case 2: when find no job + #!grub + echo ... + No job now + echo ... + reboot + +- inner process: +-- doing what: + same to + +## job download +- restAPI: GET "/job_initrd_tmpfs/:job_id/job.cgz" (e.g. "/job_initrd_tmpfs/6/job.cgz") +- request body: none +- response body: + #less job.cgz + lkp + lkp/scheduled + lkp/scheduled/job.yaml + lkp/scheduled/job.sh +- debug cmd: + no need + +- inner process: +```sequence +TestBox->Scheduler: GET "/job_initrd_tmpfs//job.cgz" +Scheduler->Scheduler: = Kemal.config.public_folder +Note right of Scheduler: send fsdir_root//job.cgz\nto testbox +Scheduler->TestBox: send_file job.cgz +``` +- doing what: + 1. send job.cgz to client + 2. remove job.cgz + +- redis storage: no change +- es storage: no change + +## report job var +- restAPI: GET "/~lkp/cgi-bin/lkp-jobfile-append-var?job_file=/lkp/scheduled/job.yaml&job_id=:job_id&[:parameter=:value]" + e.g. "/~lkp/cgi-bin/lkp-jobfile-append-var?job_file=/lkp/scheduled/job.yaml&job_id=6&start_time=1587725398 +- request body: none +- response body: "Done" + +- inner process: +```sequence +TestBox->Scheduler: GET "/~lkp/cgi-bin/lkp-jobfile-append-var?\njob_file=/lkp/scheduled/job.yaml\n&job_id=&[ => ]" +Scheduler->Sched: sched.updatea_job_parameter(env) +Sched->Sched: job_content = {"id" => #{job_id},\n"" => ""} +Sched->Redis: update_job(job_content) +Scheduler->TestBox: Done +``` +- doing what: + 1. update "sched/id2job" content in redis + +- redis storage: update "sched/id2job" content +- es storage: no change + +- class members + Sched.update_job_prarameter(env) + Redis::Client.updatea_job(job_content) + +## report job finished +- restAPI: GET "/~lkp/cgi-bin/lkp-post-run?job_file=/lkp/scheduled/job.yaml&job_id=" +- request body: none +- response body: "Done" +- debug cmd: + curl "http://${SCHED_HOST}:${SCHED_PORT}\n/~lkp/cgi-bin/lkp-post-run?job_file=/lkp/scheduled/job.yaml&job_id=40" + +- v0.1.x inner process: +```sequence +User->Scheduler: GET "/~lkp/cgi-bin/lkp-post-run\n?job_file=/lkp/scheduled/job.yaml&job_id=" +Scheduler->Sched: close_job(job_id) +Sched->Redis: = get_job(job_id) +Sched->ElasticSearch: set_job_content(job) +Sched->Redis: move_job("sched/jobs_running",\n"extract_stats", job_id) +Sched->Redis: remove_finished_job(job_id) +Scheduler->User: Done +``` +-- doing what: + 1. update JOB_INDEX_TYPE document (contents of job) + 2. move job from redis queue "sched/jobs_running" to "queue/extract_stats" + 3. remove job from redis queue "sched/id2job" + +-- redis storage: + move_job job_id from redis key "sched/jobs_running" to "queue/extract_stats" + del key job_id from "sched/id2job" + +- v0.2.x inner process: +```sequence +User->Scheduler: GET "/~lkp/cgi-bin/lkp-post-run\n?job_file=/lkp/scheduled/job.yaml&job_id=" +Scheduler->Sched: close_job(job_id) +Sched->Redis: = get_job(job_id) +Sched->ElasticSearch: set_job_content(job) +Sched->TaskQueue: hand_over_task("sched/#{jobs.tbox_group}",\n"extract_stats", job_id) +Sched->Redis: remove_finished_job(job_id) +Scheduler->User: Done +``` +-- doing what: + 1. update JOB_INDEX_TYPE document (contents of job) + 2. call hand_over_task with parameter "sched/#{job.tbox_group}", "extract_stats", job_id + 3. remove job from redis queue "sched/id2job" + +-- redis storage: + del key job_id from "sched/id2job" + +- es storage: update job documents + +## report mac's hostname +- restAPI: PUT "/set_host_mac?hostname=:hostname&mac=:mac" (e.g. "/set_host_mac?hostname=wfg-e595&mac=52-54-00-12-34-56") +- request body: none +- response body: "Done" +- debug cmd: + curl -X PUT "http://${SCHED_HOST}:${SCHED_PORT}/set_host_mac?hostname=wfg-e595&mac=52-54-00-12-34-56" + +- inner process: +```sequence +User->Scheduler: PUT "/set_host_mac?hostname=\n&mac=" +Scheduler->Sched: set_host_mac(mac, hostname) +Sched->Redis: hash_set("sched/mac2host",\nmac, hostname) +Scheduler->User: Done +``` +- doing what: + 1. create/update redis hash key "sched/mac2host" + +- redis storage: no change +Key|Value|Type +:-|:-|:- +sched/mac2host |[{field => mac, value => hostname,] |Hash +- es storage: no change + + +--- +# es storage +- job saved in JOB_INDEX_TYPE documents +- debug cmd: + curl http://localhost:9200/${JOB_INDEX_TYPE}/6 # query a job with job_id=6 + curl http://localhost:9200/${JOB_INDEX_TYPE}/_search # query all jobs + + +--- +# redis client debug cmd +- list all keys: keys sched* +- get String key value: get sched/seqno2jobid +- get Sorted-Set key value: zrange sched/jobs_running 0 -1 | zrange sched/jobs_running 0 -1 withscores | zrange sched/jobs_to_run/mygroup 0 -1 +- get all Hash keys field: hkeys sched/id2job +- get a Hash key value: hget sched/id2job 6 #->6 is a job_id + +--- +# API use scenario +## scenario 1: developer debug, submit a job and consume with quem.sh +1. use [PUT "/set_host_mac?hostname=:hostname&mac=:mac"] to register a {mac => hostname} + debug shell md: curl -X PUT "http://${SCHED_HOST}:${SCHED_PORT}/set_host_mac?hostname=wfg-e595&mac=52-54-00-12-34-56" +2. use [POST "/submit_job"] to submit a job + debug shell cmd: ./0_addjob.sh iperf.yaml # at cci/user-client/helper + or lkp cmd: submit-job iperf.yaml +3. runs qemu.sh at cci/providers to get a job and run it + qemu.sh will call [GET "/boot.ipxe/mac/:mac"] to get ipxe boot paramater + qemu.sh will call [GET "/job_initrd_tmpfs//job.cgz"] go get the job package + qemu.sh start a testbox, and the testbox will call [GET "/~lkp/cgi-bin/..."] + + +--- +# develper help + +## 1. how to build exe file +at compass-ci host, scheduler will be run as a docker container. + use the script at $CCI_SRC/container/scheduler/build to build the container image + use the script at $CCI_SRC/container/scheduler/run to start the container. + +if you want manually build scheduler in crystal languange environment, you need: + 1) run shards: to install essential require package (see $CCI_SRC/src/shards.yml) + 2) fix current elasticSearch err: + lib/elasticsearch-crystal/src/elasticsearch/api/namespace/common.cr + L79 response = HTTP::Client.post(url: endpoint, body: post_data) -> response = HTTP::Client.post(url: endpoint, body: post_data, headers: HTTP::Headers{"Content-Type" => "application/json"}) + L82 response = HTTP::Client.put(url: endpoint, body: post_data) -> response = HTTP::Client.put(url: endpoint, body: post_data, headers: HTTP::Headers{"Content-Type" => "application/json"}) + 3) run crystal build scheduler.cr -o m_scheduler + +we have construct docker images and for scheduler container to use. +you can simply call the scheduler's build and run script. + + +# How to start your own scheduler container + +## 1.Add port configuration, select an unused port number like <3001>, write in xxx.yaml like "example.yaml" + cat > ~/.config/compass-ci/defaults/example.yaml < host, :port => port}) + end + + # caller should judge response["_id"] != nil + def set_job_content(job : Job) + response = get_job_content(job.id) + if response["id"]? + response = update(job.dump_to_json_any, job.id) + else + response = create(job.dump_to_json_any, job.id) + end + + return response + end + + # caller should judge response["id"]? + def get_job_content(job_id : String) + if @client.exists({:index => "jobs", :type => "_doc", :id => job_id}) + response = @client.get_source({:index => "jobs", :type => "_doc", :id => job_id}) + else + response = {"_id" => job_id, "found" => false} + end + + return response + end + + def get_job(job_id : String) + response = get_job_content(job_id) + + case response + when JSON::Any + job = Job.new(response, job_id) + else + job = nil + end + + return job + end + + private def create(job_content : JSON::Any, job_id : String) + return @client.create( + { + :index => "jobs", :type => "_doc", + :id => job_id, + :body => job_content, + } + ) + end + + private def update(job_content : JSON::Any, job_id : String) + return @client.update( + { + :index => "jobs", :type => "_doc", + :id => job_id, + :body => {:doc => job_content}, + } + ) + end + + # [no use now] add a yaml file to es documents_path + def add(documents_path : String, fullpath_file : String, id : String) + yaml = YAML.parse(File.read(fullpath_file)) + return add(documents_path, yaml, id) + end +end diff --git a/src/scheduler/jobfile_operate.cr b/src/scheduler/jobfile_operate.cr new file mode 100644 index 0000000000000000000000000000000000000000..0e2ccb879515ed94e88452edf53b5e9636bfa87a --- /dev/null +++ b/src/scheduler/jobfile_operate.cr @@ -0,0 +1,209 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "file_utils" +require "json" +require "yaml" + +# require from '/c/lkp-tests/lib/' +require "shellwords" + +if ENV["LKP_SRC"] != "/c/lkp-tests" + raise "ENV LKP_SRC mismatch: #{ENV["LKP_SRC"]} '/c/lkp-tests'" +end + +module Jobfile::Operate + def self.prepare_dir(file_path : String) + file_path_dir = File.dirname(file_path) + if !File.exists?(file_path_dir) + FileUtils.mkdir_p(file_path_dir) + end + end + + def self.valid_shell_variable?(key) + key =~ /^[a-zA-Z_]+[a-zA-Z0-9_]*$/ + end + + def self.create_job_sh(job_sh_content : Array(JSON::Any), path : String) + File.open(path, "w", File::Permissions.new(0o775)) do |file| + file.puts "#!/bin/sh\n\n" + job_sh_content.each do |line| + if line.as_a? + line.as_a.each { |val| file.puts val } + else + file.puts line + end + end + file.puts "\"$@\"" + end + end + + def self.shell_escape(val) + val = val.join "\n" if val.is_a?(Array) + + if val.nil? || val.empty? + return nil + elsif val =~ /^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$/ + return val + elsif !val.includes?("'") && !val.includes?("$") + return "'#{val}'" + elsif !val.includes?('"') + return "\"#{val}\"" + else + return Shellwords.shellescape(val) + end + end + + def self.parse_one(script_lines, key, val) + if valid_shell_variable?(key) + if val.as_h? + return false + end + if val.as_a? + value = shell_escape(val.as_a) + else + value = shell_escape(val.to_s) + end + script_lines << "\texport #{key}=" + value if value + end + end + + def self.sh_export_top_env(job_content : Hash) + script_lines = ["export_top_env()", "{"] of String + + job_content.each { |key, val| parse_one(script_lines, key, val) } + + script_lines << "\n" + script_lines << "\t[ -n \"$LKP_SRC\" ] ||" + script_lines << "\texport LKP_SRC=/lkp/${user:-lkp}/src" + script_lines << "}\n\n" + + script_lines = "#{script_lines}" + script_lines = JSON.parse(script_lines) + end + + def self.create_job_cpio(job_content : JSON::Any, base_dir : String) + job_content = job_content.as_h + + # put job2sh in an array + if job_content.has_key?("job2sh") + tmp_job_sh_content = job_content["job2sh"] + + job_sh_array = [] of JSON::Any + tmp_job_sh_content.as_h.each do |_key, val| + job_sh_array += val.as_a + end + else + job_sh_array = [] of JSON::Any + end + + # generate job.yaml + temp_yaml = base_dir + "/#{job_content["id"]}/job.yaml" + prepare_dir(temp_yaml) + + # no change to content { "#! jobs/pixz.yaml": null } + # - this will create a <'#! jobs/pixz.yaml':> in the yaml file + # - but the orange is <#! jobs/pixz.yaml> in the user job.yaml + # tested : no effect to job.sh + File.open(temp_yaml, "w") do |file| + YAML.dump(job_content, file) + end + + # generate unbroken job shell content + sh_export_top_env = sh_export_top_env(job_content) + job_sh_content = sh_export_top_env.as_a + job_sh_array + + # generate job.sh + job_sh = base_dir + "/#{job_content["id"]}/job.sh" + create_job_sh(job_sh_content.to_a, job_sh) + + job_dir = base_dir + "/#{job_content["id"]}" + + if job_sh_array.empty? + lkp_src = prepare_lkp_tests(job_content["lkp_initrd_user"], + job_content["os_arch"]) + + cmd = "#{lkp_src}/sbin/create-job-cpio.sh #{temp_yaml}" + idd = `#{cmd}` + else + cmd = "./create-job-cpio.sh #{job_dir}" + idd = `#{cmd}` + end + + # if the create job cpio failed, what to do? + if idd.match(/ERROR/) + puts idd + end + # create result dir and copy job.sh, job.yaml and job.cgz to result dir + src_dir = File.dirname(temp_yaml) + dst_dir = File.join("/srv", job_content["result_root"].to_s) + FileUtils.mkdir_p(dst_dir) + # the job.yaml is not final version + files = ["#{src_dir}/job.sh", + "#{src_dir}/job.yaml", + "#{src_dir}/job.cgz"] + FileUtils.cp(files, dst_dir) + end + + def self.unzip_cgz(source_path : String, target_path : String) + FileUtils.mkdir_p(target_path) + cmd = "cd #{target_path};gzip -dc #{source_path}|cpio -id" + system cmd + end + + def self.prepare_lkp_tests(lkp_initrd_user = "latest", os_arch = "aarch64") + expand_dir_base = File.expand_path(Kemal.config.public_folder + + "/expand_cgz") + FileUtils.mkdir_p(expand_dir_base) + + # update lkp-xxx.cgz if they are different + target_path = update_lkp_when_different(expand_dir_base, + lkp_initrd_user, + os_arch) + + # delete oldest lkp, if exists too much + del_lkp_if_too_much(expand_dir_base) + + return "#{target_path}/lkp/lkp/src" + end + + # list *.cgz (lkp initrd), sorted in reverse time order + # and delete 10 oldest cgz file, when exists more than 100 + # also delete the DIR expand from the cgz file + def self.del_lkp_if_too_much(base_dir) + file_list = `ls #{base_dir}/*.cgz -tr` + file_array = file_list.split("\n") + if file_array.size > 100 + 10.times do |index| + FileUtils.rm_rf(file_array[index]) + FileUtils.rm_rf(file_array[index].chomp(".cgz")) + end + end + end + + def self.update_lkp_when_different(base_dir, lkp_initrd_user, os_arch) + target_path = base_dir + "/#{lkp_initrd_user}-#{os_arch}" + bak_lkp_filename = target_path + ".cgz" + source_path = "#{SRV_INITRD}/lkp/#{lkp_initrd_user}/lkp-#{os_arch}.cgz" + + if File.exists?(bak_lkp_filename) + # no need update + return target_path if FileUtils.cmp(source_path, bak_lkp_filename) + + # remove last expanded lkp initrd DIR + FileUtils.rm_rf(target_path) + end + + # bakup user lkp-xxx.cgz (for next time check) + FileUtils.cp(source_path, bak_lkp_filename) + unzip_cgz(bak_lkp_filename, target_path) + return target_path + end + + def self.auto_submit_job(job_file, override_parameter) + cmd = "#{ENV["LKP_SRC"]}/sbin/submit SCHED_HOST=localhost" + cmd += " SCHED_PORT=#{ENV["SCHED_PORT"]}" + cmd += " -s '#{override_parameter}' #{job_file}" + puts `#{cmd}` + end +end diff --git a/src/scheduler/redis_client.cr b/src/scheduler/redis_client.cr new file mode 100644 index 0000000000000000000000000000000000000000..b816d6758271b1e1ece2bbc86d66e1e8a33edee0 --- /dev/null +++ b/src/scheduler/redis_client.cr @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "json" +require "redis" + +require "./constants" +require "../lib/job" + +class Redis::Client + class_property :client + HOST = (ENV.has_key?("REDIS_HOST") ? ENV["REDIS_HOST"] : JOB_REDIS_HOST) + PORT = (ENV.has_key?("REDIS_PORT") ? ENV["REDIS_PORT"] : JOB_REDIS_PORT).to_i32 + + def initialize(host = HOST, port = PORT) + @client = Redis::PooledClient.new(host: host, port: port, pool_size: 25, pool_timeout: 0.01) + end + + def hash_set(key : String, field, value) + @client.hset(key, field.to_s, value.to_s) + end + + def hash_get(key : String, field) + @client.hget(key, field.to_s) + end + + def hash_del(key : String, field) + @client.hdel(key, field.to_s) + end + + def get_job(job_id : String) + job_hash = @client.hget("sched/id2job", job_id) + if !job_hash + raise "Get job (id = #{job_id}) from redis failed." + end + Job.new(JSON.parse(job_hash), job_id) + end + + def update_wtmp(testbox : String, wtmp_hash : Hash) + @client.hset("sched/tbox_wtmp", testbox, wtmp_hash.to_json) + end + + def update_job(job_content : JSON::Any | Hash) + job_id = job_content["id"].to_s + + job = get_job(job_id) + job.update(job_content) + + hash_set("sched/id2job", job_id, job.dump_to_json) + end + + def set_job(job : Job) + hash_set("sched/id2job", job.id, job.dump_to_json) + end + + def remove_finished_job(job_id : String) + @client.hdel("sched/id2job", job_id) + end +end diff --git a/src/scheduler/scheduler.cr b/src/scheduler/scheduler.cr new file mode 100644 index 0000000000000000000000000000000000000000..d5852d3a48b6c1ab4f8612846b32395196dee91b --- /dev/null +++ b/src/scheduler/scheduler.cr @@ -0,0 +1,198 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "kemal" + +require "../lib/sched" + +# ------------------------------------------------------------------------------------------- +# end_user: +# - restful API [post "/submit_job"] to submit a job to scheduler +# -- json formated [job] in the request data +# +# ------------------------------------------------------------------------------------------- +# runner: +# - restful API [get "/boot.ipxe/mac/52-54-00-12-34-56"] to get a job for ipxe qemu-runner +# -- when find then return <#!ipxe and job.cgz kernal initrd> +# -- when no job return <#!ipxe no job messages> +# +# - restful API [put "/set_host_mac?hostname=myhostname&mac=ff-ff-ff-ff-ff-ff"] to report testbox's {mac => hostname} +# - restful API [get "/job_initrd_tmpfs/11/job.cgz"] to download job(11) job.cgz file +# - restful API [get "/~lkp/cgi-bin/lkp-jobfile-append-var"] report job var that should be append +# - restful API [get "/~lkp/cgi-bin/lkp-cluster-sync"] for nodes to request cluster state +# - restful API [get "/~lkp/cgi-bin/lkp-post-run" ] to move job from redis queue "sched/jobs_running" to "sched/extract_stats" and remove job from redis queue "sched/id2job" +# +# ------------------------------------------------------------------------------------------- +# scheduler: +# - use [redis incr] as job_id, a 64bit int number +# - restful API [get "/"] default echo +# +module Scheduler + VERSION = "0.2.0" + + sched = Sched.new + + # for debug (maybe kemal debug|logger does better) + def self.debug_message(env, response) + puts "\n\n" + puts ">> #{env.request.remote_address}" + puts "<< #{response}" + end + + # echo alive + get "/" do |_| + "LKP Alive! The time is #{Time.local}, version = #{VERSION}" + end + + # for XXX_runner get job + # + # /boot.ipxe/mac/${mac} + # /boot.xxx/host/${hostname} + # /boot.yyy/mac/${mac} + get "/boot.:boot_type/:parameter/:value" do |env| + response = sched.find_job_boot(env) + + debug_message(env, response) + + response + end + + # /~lkp/cgi-bin/gpxelinux.cgi?hostname=:hostname&mac=:mac&last_kernel=:last_kernel + get "/~lkp/cgi-bin/gpxelinux.cgi" do |env| + response = sched.find_next_job_boot(env) + + debug_message(env, response) + + response + end + + # enqueue + # - echo job_id to caller + # -- job_id = "0" ? means failed + post "/submit_job" do |env| + job_messages = sched.submit_job(env) + + job_messages.each do |job_message| + puts job_message.to_json + end + + job_messages.to_json + end + + # file download server + get "/job_initrd_tmpfs/:job_id/:job_package" do |env| + job_id = env.params.url["job_id"] + job_package = env.params.url["job_package"] + file_path = ::File.join [Kemal.config.public_folder, job_id, job_package] + + puts %({"job_id": "#{job_id}", "job_state": "download"}) + debug_message(env, file_path) + + send_file env, file_path + end + + # client(runner) report its hostname and mac + # - when a runner pull jobs with it's mac infor, scheduler find out what hostname is it + # /set_host_mac?hostname=$hostname&mac=$mac (mac like ef-01-02-03-04-05) + # add a => + # + # curl -X PUT "http://localhost:3000/set_host_mac?hostname=wfg&mac=00-01-02-03-04-05" + put "/set_host_mac" do |env| + if (client_hostname = env.params.query["hostname"]?) && (client_mac = env.params.query["mac"]?) + sched.set_host_mac(client_mac, client_hostname) + + debug_message(env, "Done") + + "Done" + else + "No yet!" + end + end + + # curl -X PUT "http://localhost:3000/del_host_mac?mac=00-01-02-03-04-05" + put "/del_host_mac" do |env| + if client_mac = env.params.query["mac"]? + sched.del_host_mac(client_mac) + + debug_message(env, "Done") + + "Done" + else + "No yet!" + end + end + + # client(runner) report job's status + # /~lkp/cgi-bin/lkp-jobfile-append-var + # ?job_file=/lkp/scheduled/job.yaml&job_state=running&job_id=10 + # ?job_file=/lkp/scheduled/job.yaml&job_state=post_run&job_id=10 + # ?job_file=/lkp/scheduled/job.yaml&loadavg=0.28 0.82 0.49 1/105 3389&start_time=1587725398&end_time=1587725698&job_id=10 + get "/~lkp/cgi-bin/lkp-jobfile-append-var" do |env| + # get job_id from request + debug_message(env, "Done") + + sched.update_job_parameter(env) + "Done" + end + + # node in cluster requests cluster state + # wget 'http://localhost:3000/~lkp/cgi-bin/lkp-cluster-sync?job_id=&state=' + # 1) state : "wait_ready" + # response: return "abort" if one node state is "abort", + # "ready" if all nodes are "ready", "retry" otherwise. + # 2) state : wait_finish + # response: return "abort" if one node state is "abort", + # "finish" if all nodes are "finish", "retry" otherwise. + # 3) state : abort | failed + # response: update the node state to "abort", + # return all nodes states at this moment. + # 4) state : write_state + # response: add "roles" and "ip" fields to cluster state, + # return all nodes states at this moment. + # 5) state : roles_ip + # response: get "server ip" from cluster state, + # return "server=". + get "/~lkp/cgi-bin/lkp-cluster-sync" do |env| + response = sched.request_cluster_state(env) + + debug_message(env, response) + + response + end + + # client(runner) report job post_run finished + # /~lkp/cgi-bin/lkp-post-run?job_file=/lkp/scheduled/job.yaml&job_id=40 + # curl "http://localhost:3000/~lkp/cgi-bin/lkp-post-run?job_file=/lkp/scheduled/job.yaml&job_id=40" + get "/~lkp/cgi-bin/lkp-post-run" do |env| + # get job_id from request + job_id = env.params.query["job_id"]? + if job_id + debug_message(env, "Done") + + sched.close_job(job_id) + end + "Done" + end + + get "/~lkp/cgi-bin/lkp-wtmp" do |env| + debug_message(env, "Done") + + sched.update_tbox_wtmp(env) + "Done" + end + + get "/~lkp/cgi-bin/report_ssh_port" do |env| + testbox = env.params.query["tbox_name"] + ssh_port = env.params.query["ssh_port"].to_s + job_id = env.params.query["job_id"].to_s + + if testbox && ssh_port + debug_message(env, "Done") + + sched.report_ssh_port(testbox, ssh_port) + end + + puts %({"job_id": "#{job_id}", "state": "set ssh port", "ssh_port": "#{ssh_port}", "tbox_name": "#{testbox}"}) + "Done" + end +end diff --git a/src/shard.yml b/src/shard.yml new file mode 100644 index 0000000000000000000000000000000000000000..db3946fcc73e6159812083fdf0185ffc336783f9 --- /dev/null +++ b/src/shard.yml @@ -0,0 +1,28 @@ +name: scheduler +version: 0.1.0 + +authors: + - tongqunfeng + +targets: + scheduler: + main: scheduler.cr + +crystal: 0.33.0 + +license: MIT + +dependencies: + kemal: + github: kemalcr/kemal + redis: + github: stefanwille/crystal-redis + version: ~> 2.5.3 + elasticsearch-crystal: + github: paktek123/elasticsearch-crystal + version: ~> 0.14 + any_merge: + github: icyleaf/any_merge + branch: master + deep-merge: + gitlab: peterhoeg/deep-merge.cr diff --git a/src/spec/scheduler/boot_spec.cr b/src/spec/scheduler/boot_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..783ea2a75538dcafb2d13aaed091c70228a9201d --- /dev/null +++ b/src/spec/scheduler/boot_spec.cr @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "file_utils" + +require "kemal" +require "scheduler/scheduler/boot" +require "scheduler/constants.cr" + +def create_request_and_return_io_and_context(handler, request) + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + context = HTTP::Server::Context.new(request, response) + handler.call(context) + response.close + io.rewind + {io, context} +end + +describe Scheduler::Boot do + describe "ipxe boot for global" do + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + request = HTTP::Request.new("GET", "/boot.ipxe/mac/52-54-00-12-34-56") + context = HTTP::Server::Context.new(request, response) + + resources = Scheduler::Resources.new + it "job content has no os, respon default debian" do + job_content = JSON.parse(%({"test": "test no os","id": 10, "arch": "aarch64"})) + respon, _ = Scheduler::Boot.respon(job_content, context, resources) + respon_list = respon.split("\n") + + respon_list[0].should eq("#!ipxe") + respon_list[2].should contain("debian/aarch64/sid") + end + + it "job content has os, os_arch, os_version, respon the spliced value" do + job_content = JSON.parse(%({"id": 10, "arch": "aarch64", "os": "openeuler", "os_arch": "aarch64", "os_version": "current"})) + respon, _ = Scheduler::Boot.respon(job_content, context, resources) + respon_list = respon.split("\n") + os_dir = job_content["os"].to_s.downcase + "/" + job_content["os_arch"].to_s.downcase + "/" + job_content["os_version"].to_s.downcase + + respon_list[0].should eq("#!ipxe") + respon_list[2].should contain(os_dir) + respon_list[5].should contain(os_dir) + respon_list[respon_list.size - 2].should eq("boot") + end + + it "respon should contain the value of constants.cr" do + job_content = JSON.parse(DEMO_JOB) + respon, _ = Scheduler::Boot.respon(job_content, context, resources) + respon_list = respon.split("\n") + + respon_list[2].should contain(OS_HTTP_HOST) + respon_list[2].should contain(OS_HTTP_PORT.to_s) + respon_list[3].should contain(INITRD_HTTP_HOST) + respon_list[3].should contain(INITRD_HTTP_PORT.to_s) + respon_list[4].should contain(SCHED_HOST) + respon_list[4].should contain(SCHED_PORT.to_s) + respon_list[5].should contain(OS_HTTP_HOST) + end + + it "job has program dependence, find and return the initrd path to depends program" do + job_content = JSON.parse(%({"id": 10, "arch": "aarch64", "os": "test", "os_arch": "test", "os_version": "test","os_mount": "initramfs", "pp": {"want_program": " is valid because relate file exist"}})) + + Dir.mkdir_p("/#{ENV["LKP_SRC"]}/distro/depends/") + File.touch("/#{ENV["LKP_SRC"]}/distro/depends/want_program") + dir_path = "initrd/deps/test/test/test/" + Dir.mkdir_p("/srv/#{dir_path}") + File.touch("/srv/#{dir_path}want_program.cgz") + + respon, _ = Scheduler::Boot.respon(job_content, context, resources) + respon_list = respon.split("\n") + + FileUtils.rm_rf("/#{ENV["LKP_SRC"]}/distro/depends/want_program") + + respon_list[0].should eq("#!ipxe") + respon_list[2].should contain("#{dir_path}want_program.cgz") + end + + it "job has pkg dependence, find and return the initrd path to depends pkg" do + job_content = JSON.parse(%({"id": 10, "arch": "aarch64", "os": "test", "os_arch": "test", "os_version": "test","os_mount": "initramfs", "pp": {"want_program": " is valid because relate file exist"}})) + + Dir.mkdir_p("/#{ENV["LKP_SRC"]}/pkg/") + File.touch("/#{ENV["LKP_SRC"]}/pkg/want_program") + dir_path = "initrd/pkg/test/test/test/" + Dir.mkdir_p("/srv/#{dir_path}") + File.touch("/srv/#{dir_path}want_program.cgz") + + respon, _ = Scheduler::Boot.respon(job_content, context, resources) + respon_list = respon.split("\n") + + FileUtils.rm_rf("/#{ENV["LKP_SRC"]}/pkg/want_program") + + respon_list[0].should eq("#!ipxe") + respon_list[2].should contain("#{dir_path}want_program.cgz") + end + + it "job has program dependence, but not find relate file, ignore it" do + job_content = JSON.parse(%({"id": 10, "arch": "aarch64", "pp": {"want_program": " is invalid because relate file not exist"}})) + respon, _ = Scheduler::Boot.respon(job_content, context, resources) + respon_list = respon.split("\n") + file_name = "want_program.cgz" + + respon_list[0].should eq("#!ipxe") + respon_list[2].should_not contain(file_name) + end + end +end diff --git a/src/spec/scheduler/dequeue_spec.cr b/src/spec/scheduler/dequeue_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..9fe195a74a1a39debfcb4ffd0ecb76cb77ecdf0a --- /dev/null +++ b/src/spec/scheduler/dequeue_spec.cr @@ -0,0 +1,130 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" + +require "scheduler/constants" +require "scheduler/scheduler/resources" +require "scheduler/scheduler/dequeue" +require "kemal/src/kemal/ext/response" + +def gen_context(url : String) + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + headers = HTTP::Headers{"content" => "application/json"} + request = HTTP::Request.new("GET", url, headers) + context = HTTP::Server::Context.new(request, response) + return context +end + +describe Scheduler::Dequeue do + # there has pending testgroup queue + # testbox search the job in testgroup, testbox => testgroup[-n] + describe "testbox queue dequeue respon" do + it "return job_id > 0, when find a pending job in special testbox queue" do + context = gen_context("/boot.ipxe/mac/ef-01-02-03-0f-ee") + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + raw_es = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + + testbox = "tcm001" + + job_list = "sched/jobs_to_run/#{testbox}" + raw_redis.del(job_list) + + # running_list = "sched/jobs_running" and job_info_list = "sched/id2job" + raw_redis.del("sched/jobs_running") + raw_redis.del("sched/id2job") + + raw_redis.zadd(job_list, "1.1", "1") + raw_redis.zadd(job_list, "1.2", "2") + + job_json = JSON.parse({"testbox" => "#{testbox}"}.to_json) + raw_es.create( + { + :index => "jobs", + :type => "_doc", + :id => "1", + :body => job_json, + } + ) + + before_dequeue_time = Time.local.to_unix_f + job_id, _ = Scheduler::Dequeue.respon_testbox(testbox, context, resources).not_nil! + (job_id).should eq("1") + + # check redis data at pending queue + first_job = raw_redis.zrange(job_list, 0, 0) + (first_job[0]).should eq("2") + + # check redis data at running queue + job_index_in_running = raw_redis.zrank("sched/jobs_running", job_id) + running_job = raw_redis.zrange("sched/jobs_running", job_index_in_running, job_index_in_running, true) + (running_job[1].to_s.to_f64).should be_close(before_dequeue_time, 0.1) + + # check append info + append_info = raw_redis.hget("sched/id2job", job_id) + respon = JSON.parse(append_info.not_nil!) + (respon["testbox"]).should eq("tcm001") + end + + it "return job_id = 0, when there has no this testbox (testgroup) queue" do + context = gen_context("/boot.ipxe/mac/ef-01-02-03-0f-ee") + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + + testbox = "tcm001" + + job_list = "sched/jobs_to_run/#{testbox}" + raw_redis.del(job_list) + raw_redis.del("sched/jobs_running") + + job_id, _ = Scheduler::Dequeue.respon_testbox(testbox, context, resources).not_nil! + (job_id).should eq("0") + + # check redis data at running queue + job_index_in_running = raw_redis.zrange("sched/jobs_running", 0, -1) + (job_index_in_running.size).should eq(0) + end + + it "raise exception, when es not has this job" do + context = gen_context("/boot.ipxe/mac/ef-01-02-03-0f-ee") + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + raw_es = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + + testbox = "tcm001" + + job_list = "sched/jobs_to_run/#{testbox}" + raw_redis.del(job_list) + + # running_list = "sched/jobs_running" and job_info_list = "sched/id2job" + raw_redis.del("sched/jobs_running") + raw_redis.del("sched/id2job") + + raw_redis.zadd(job_list, "1.1", "1") + raw_redis.zadd(job_list, "1.2", "2") + + # delete :index to make the specific exception raise + raw_es.indices.delete({:index => "jobs"}) + + begin + Scheduler::Dequeue.respon_testbox(testbox, context, resources) + rescue e : Exception + (e.to_s).should eq("Invalid job (id=1) in es") + end + end + end +end diff --git a/src/spec/scheduler/elasticsearch_client_spec.cr b/src/spec/scheduler/elasticsearch_client_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..81c28409bfebb782a5e06e7db59a4d467d257f7d --- /dev/null +++ b/src/spec/scheduler/elasticsearch_client_spec.cr @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "scheduler/elasticsearch_client" +require "scheduler/constants" + +describe Elasticsearch::Client do + describe "add job" do + it "add job without job id success" do + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "jobs"}) + + es_client = Elasticsearch::Client.new(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + es_client.add(JOB_INDEX_TYPE, {"foo" => "bar", "result_root" => "iperf"}, "1") + + # when not find + # { "error" => {"root_cause" => [{"type" => "index_not_found_exception",..."index" => "jobs"}], + # "type" => "index_not_found_exception",..."index" => "jobs"}, + # "status" => 404} + # when find + # {"_index" => "jobs", "_type" => "job", "_id" => "1", ..."found" => true, "_source" => {"foo" => "bar"}} + respon = raw_es_client.get({:index => "jobs", :id => "1"}) + (respon["_id"]).should_not be_nil + (respon["_source"]["id"]?).should_not be_nil + (respon["_source"]["id"].to_s).should eq("1") + + raw_es_client.indices.delete({:index => "jobs"}) + end + + it "add job with job id success" do + es_client = Elasticsearch::Client.new(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "jobs"}) + es_client.add(JOB_INDEX_TYPE, {"foo" => "bar", "id" => "3", "result_root" => nil}, "2") + + respon = raw_es_client.get({:index => "jobs", :id => "2"}) + (respon["_id"]).should_not be_nil + (respon["_source"]["id"]?).should_not be_nil + (respon["_source"]["id"].to_s).should eq("2") + + raw_es_client.indices.delete({:index => "jobs"}) + end + + it "get job content with right job id" do + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "jobs"}) + + es_client = Elasticsearch::Client.new(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + test_json = JSON.parse({"foo" => "bar", "id" => "10", "result_root" => nil}.to_json) + + raw_es_client.create( + { + :index => "jobs", + :type => "_doc", + :id => "10", + :body => test_json, + } + ) + + respon = es_client.get_job_content("10") + (respon).should_not be_nil + (respon.not_nil!["id"]?).should_not be_nil + (respon.not_nil!["id"].to_s).should eq("10") + + raw_es_client.indices.delete({:index => "jobs"}) + end + + it "get job content with wrong job id" do + es_client = Elasticsearch::Client.new(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "jobs"}) + + respon = es_client.get_job_content("10") + (respon.not_nil!["id"]?).should be_nil + end + end +end diff --git a/src/spec/scheduler/enqueue_spec.cr b/src/spec/scheduler/enqueue_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..d973a0ceb359553d90678b2e4025a6633f31beda --- /dev/null +++ b/src/spec/scheduler/enqueue_spec.cr @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" + +require "scheduler/constants" +require "scheduler/redis_client" +require "scheduler/scheduler/enqueue" +require "kemal/src/kemal/ext/response" +require "kemal/src/kemal/ext/context" + +def create_post_context(hash : Hash) + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + headers = HTTP::Headers{"content" => "application/json"} + body = hash.to_json + request = HTTP::Request.new("POST", "/submit_job", headers, body) + context = HTTP::Server::Context.new(request, response) + return context +end + +describe Scheduler::Enqueue do + describe "assign testbox | testgroup enqueue respon" do + it "job has property testbox, but no test-group, save to testgroup_testbox queue" do + context = create_post_context({:testcase => "1234", :testbox => "myhost"}) + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + # here test for testbox == testgroup + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + job_list = "testbox_myhost" + raw_redis.zremrangebyrank(job_list, 0, -1) + job_list = "sched/jobs_to_run/myhost" + raw_redis.zremrangebyrank(job_list, 0, -1) + + job_id, _ = Scheduler::Enqueue.respon(context, resources) + job_list = "sched/jobs_to_run/myhost" + job_info = raw_redis.zrange(job_list, 0, -1, true) + (job_id).should eq(job_info[0]) + + job_list = "testbox_myhost" + job_info = raw_redis.zrange(job_list, 0, -1, true) + (job_info.size).should eq(0) + end + + it "job has property testbox and test-group, save to sched/jobs_to_run/xxx queue not to testbox_xxx" do + context = create_post_context({:testcase => "1234", :testbox => "mygroup-1", "test-group" => "mygroup"}) + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + job_list = "sched/jobs_to_run/mygroup" + raw_redis.zremrangebyrank(job_list, 0, -1) + job_list = "testbox_myhost" + raw_redis.zremrangebyrank(job_list, 0, -1) + + job_id, _ = Scheduler::Enqueue.respon(context, resources) + job_list = "testbox_myhost" + job_info_b = raw_redis.zrange(job_list, 0, -1, true) + job_list = "sched/jobs_to_run/mygroup" + job_info_g = raw_redis.zrange(job_list, 0, -1, true) + + (job_id).should eq(job_info_g[0]) + (job_info_b.size).should eq 0 + end + end +end diff --git a/src/spec/scheduler/jobfile_operate_spec.cr b/src/spec/scheduler/jobfile_operate_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..8f64ce9d5a0334237c41cab21109e6a150d2a444 --- /dev/null +++ b/src/spec/scheduler/jobfile_operate_spec.cr @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "scheduler/constants" +require "scheduler/jobfile_operate" + +describe Jobfile::Operate do + describe "update" do + file_path = "test/update.yaml" + section = "#! job" + kv = {"id" => "123456"} + + Jobfile::Operate.prepare_dir(file_path) + if File.exists?(file_path) + FileUtils.rm(file_path) + end + + it "When no find, then append at end" do + File.open(file_path, "w") do |f| + f.close + end + + Jobfile::Operate.update(file_path, section, kv) + + linepre = "" + File.each_line(file_path) do |line| + linepre = line + end + linepre.should eq("#{kv.first_key}: #{kv[kv.first_key]}") + FileUtils.rm(file_path) + end + + it "When find, then replace it" do + File.open(file_path, "w") do |f| + f.puts("id: 000000") + end + Jobfile::Operate.update(file_path, section, kv) + + linepre = "" + File.each_line(file_path) do |line| + match_info = line.match(/id: (.*)/) + if match_info + linepre = "id: #{match_info.[1]}" + end + end + + linepre.should eq("#{kv.first_key}: #{kv[kv.first_key]}") + FileUtils.rm(file_path) + end + + it "When no find, but find section, then append in the section" do + File.open(file_path, "w") do |f| + f.puts("#! job/") + f.puts("#! other") + end + Jobfile::Operate.update(file_path, section, kv) + + line_index = 0 + File.each_line(file_path) do |line| + match_info = line.match(/id: (.*)/) + line_index = line_index + 1 + if match_info + break + end + end + + line_index.should eq(2) + FileUtils.rm(file_path) + end + + # is this the real specification? + it "When find key & section, but they are not matched, ignore now" do + File.open(file_path, "w") do |f| + f.puts("#! job") + f.puts("#! other") + f.puts("id: 000000") + end + Jobfile::Operate.update(file_path, section, kv) + + line_index = 0 + File.each_line(file_path) do |line| + match_info = line.match(/id: (.*)/) + line_index = line_index + 1 + if match_info + break + end + end + + line_index.should eq(3) + FileUtils.rm(file_path) + end + end + + describe "create_job_cpio" do + # when debug this,it seems to execute "chmod +x /c/lkp-tests/sbin/create-job-cpio.sh" to get permission + it "from jobid create job.cgz" do + job_id = "100" + fs_root = "#{File.real_path(".")}/public" + + old_dir = ::File.join [fs_root, job_id] + FileUtils.rm_r(old_dir) if File.exists?(old_dir) + + job_hash = JSON.parse(DEMO_JOB).as_h + job_hash = job_hash.merge({"result_root" => fs_root, "id" => job_id}) + job_content = JSON.parse(job_hash.to_json) + + Jobfile::Operate.create_job_cpio(job_content, fs_root) + (File.exists?(::File.join [old_dir, "job.cgz"])).should be_true + FileUtils.rm_r(old_dir) if File.exists?(old_dir) + end + end +end diff --git a/src/spec/scheduler/monitor_spec.cr b/src/spec/scheduler/monitor_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..22b9bc59dbf48fb078a2a8a030a2a4c5d6232250 --- /dev/null +++ b/src/spec/scheduler/monitor_spec.cr @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "scheduler/scheduler/monitor" +require "scheduler/jobfile_operate" +require "scheduler/constants" +require "json" + +def gen_put_context(url : String) + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + headers = HTTP::Headers{"content" => "application/json"} + request = HTTP::Request.new("PUT", url, headers) + context = HTTP::Server::Context.new(request, response) + return context +end + +describe Scheduler::Monitor do + describe "job maintain" do + it "recieve job parameters, then update the job parameter in redis" do + context = gen_put_context("/~lkp/cgi-bin/lkp-jobfile-append-var?job_file=/lkp/scheduled/job.yaml&job_id=100&loadavg=0.28 0.82 0.49 1/105 3389&start_time=1587725398&end_time=1587725698") + parameter_key = "start_time" + + # job_id = context.request.query_params["job"] + job_id = "100" + parameter_value = context.request.query_params[parameter_key] + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + raw_redis.del("sched/id2job") + + # add 100, this job contains { testbox: wfg-e595, tbox_group: wfg-e595} + resources.fsdir_root("./public") + + job_content = {"id" => job_id, parameter_key => parameter_value} + Scheduler::Monitor.update_job_parameter(job_content, context, resources) + + response = resources.@redis_client.not_nil!.get_job_content(job_id) + (response[parameter_key]).should eq("1587725398") + end + + it "when job finished, update the job status" do + context = gen_put_context("/~lkp/cgi-bin/lkp-post-run?job_file=/lkp/scheduled/job.yaml&job_id=1") + job_id = context.request.query_params["job_id"] + + running_queue = "sched/jobs_running" + result_queue = "queue/extract_stats" + job_info_queue = "sched/id2job" + + resources = Scheduler::Resources.new + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_redis_client = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + + raw_redis_client.del(running_queue) + raw_redis_client.del(result_queue) + raw_redis_client.hset(job_info_queue, job_id, "{\"testbox\" : \"test\", \"id\" : #{job_id}}") + raw_redis_client.zrem(result_queue, job_id) + priority_as_score = Time.local.to_unix_f + raw_redis_client.zadd(running_queue, priority_as_score, job_id) + raw_es_client.indices.delete({:index => "jobs"}) + resources.@es_client.not_nil!.set_job_content(JSON.parse(DEMO_JOB)) + + Scheduler::Monitor.update_job_when_finished(job_id, resources) + + respon = resources.@es_client.not_nil!.get_job_content(job_id) + (respon["testbox"]).should eq("test") + (respon["id"]).should eq(job_id.to_i) + + running_job_count = raw_redis_client.zcount(running_queue, 0, -1) + (running_job_count).should eq 0 + + rusult_job = raw_redis_client.zrange(result_queue, 0, -1, true) + (rusult_job[0]).should eq job_id + + job_info = raw_redis_client.hget(job_info_queue, job_id) + job_info.should eq nil + end + end +end diff --git a/src/spec/scheduler/redis_client_spec.cr b/src/spec/scheduler/redis_client_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..b88102d73a9c370e39c838bb2780ee1a14bee62c --- /dev/null +++ b/src/spec/scheduler/redis_client_spec.cr @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "scheduler/redis_client" +require "scheduler/constants" + +describe Redis::Client do + describe "enqueue" do + it "enqueue success" do + redis_client = Redis::Client.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + id = redis_client.get_new_job_id + + before_add_priority = Time.local.to_unix_f + redis_client.add2queue("test", id) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + index = raw_redis.zrank("test", id) + + index.should_not be_nil + + # job priority is more later + respon = raw_redis.zrange("test", index, index, true) + respon[1].to_s.to_f64.should be_close(before_add_priority, 0.1) + end + end +end diff --git a/src/spec/scheduler/tools_spec.cr b/src/spec/scheduler/tools_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..0a71af0e74711712472e521b5c1b19c0e5655a85 --- /dev/null +++ b/src/spec/scheduler/tools_spec.cr @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "scheduler/tools" +require "file_utils" + +describe Public do + describe "hash replace" do + it "can replace k:v" do + hash_old = {"name" => "OldValue"} + hash_new = {"name" => "NewValue"} + hash_result = Public.hash_replace_with(hash_old, hash_new) + + (hash_result["name"]).should eq("NewValue") + end + + it "can add k:v" do + hash_old = {"name2" => "OldValue"} + hash_new = {"name" => "NewValue"} + hash_result = Public.hash_replace_with(hash_old, hash_new) + + (hash_result["name"]).should eq("NewValue") + (hash_result["name2"]).should eq("OldValue") + end + end + + describe "get testgroup name from testbox name" do + it "not end with -[n]" do + testbox_name = "wfg-e595" + testgroup_name = "wfg-e595" + result = Public.get_tbox_group_name(testbox_name) + + result.should eq testgroup_name + end + + it "no -" do + testbox_name = "test_" + testgroup_name = "test_" + result = Public.get_tbox_group_name(testbox_name) + + result.should eq testgroup_name + end + + it "end with -" do + testbox_name = "myhost-" + testgroup_name = "myhost-" + result = Public.get_tbox_group_name(testbox_name) + + result.should eq testgroup_name + end + + it "end with 1 -[n]" do + testbox_name = "hostname-002" + testgroup_name = "hostname" + result = Public.get_tbox_group_name(testbox_name) + + result.should eq testgroup_name + end + + it "instance: vm-pxe-hi1620-1p1g-chief-1338976" do + testbox = "vm-pxe-hi1620-1p1g-chief-1338976" + tbox_group = "vm-pxe-hi1620-1p1g-chief" + result = Public.get_tbox_group_name(testbox) + + result.should eq tbox_group + end + + it "end with 2 -[n]" do + testbox_name = "hostname-001-001" + testgroup_name = "hostname-001" + result = Public.get_tbox_group_name(testbox_name) + + result.should eq testgroup_name + end + end + + describe "unzip cgz" do + it "can unzip the cgz completely in the target_path" do + test_file_tree = "/c/cci/scheduler/test_dir/test_dir/" + FileUtils.mkdir_p(test_file_tree) + + content = "Only if the content of the unzipped file have this content.\nSpec will passed" + File.write("#{test_file_tree}check_file.check", content) + + source_path = "/c/cci/scheduler/test.cgz" + target_path = "/c/cci/scheduler/expand_cgz/1024/" + zip_cmd = "find test_dir | cpio --quiet -o -H newc | gzip > #{source_path}" + system zip_cmd + + FileUtils.rm_rf("/c/cci/scheduler/test_dir") + + Public.unzip_cgz(source_path, target_path) + + if File.exists?("#{target_path}test_dir/test_dir/check_file.check") + read_content = File.read("#{target_path}test_dir/test_dir/check_file.check") + else + content = "something was wrong" + read_content = "null" + end + + read_content.should eq content + + FileUtils.rm_rf("/c/cci/scheduler/expand_cgz") + FileUtils.rm("/c/cci/scheduler/test.cgz") + end + end +end diff --git a/src/spec/scheduler/utils_spec.cr b/src/spec/scheduler/utils_spec.cr new file mode 100644 index 0000000000000000000000000000000000000000..e5be37b2fa4bbebff0dd7f34383876ad99a1b8ec --- /dev/null +++ b/src/spec/scheduler/utils_spec.cr @@ -0,0 +1,243 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "spec" +require "scheduler/scheduler/utils" +require "scheduler/tools" + +require "scheduler/constants" + +describe Scheduler::Utils do + describe "ipxe boot for special testbox" do + describe "if the runner has register hostname then find job in testgroup_[hostname] queue" do + it "job_id = 0, respon no job" do + mac = "52-54-00-12-34-56" + remote_host_name = "testHost" + remote_address = "127.0.0.1:5555" + + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + request = HTTP::Request.new("GET", "/boot.ipxe/mac/#{mac}") + + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "report"}) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + pending_list = "sched/jobs_to_run/#{remote_host_name}" + raw_redis.del(pending_list) + pending_list = "testbox_#{remote_host_name}" + raw_redis.del(pending_list) + + # request has remote_address + request.remote_address = remote_address + context = HTTP::Server::Context.new(request, response) + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + # register {mac => hostname} + resources.@redis_client.not_nil!.@client.hset("sched/mac2host", mac, remote_host_name) + + time_start = Time.utc + respon = Scheduler::Utils.find_job_boot(mac, context, resources) + time_stop = Time.utc + time_len = time_stop - time_start + + (time_len.seconds).should eq 10 + respon.includes?("No job now").should be_true + end + + it "job_id != 0, respon initrd kernel job in .cgz file with testbox == test-group" do + job_id = "100" + mac = "52-54-00-12-34-56" + remote_host_name = "wfg-e595" + remote_address = "127.0.0.1:5555" + + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + request = HTTP::Request.new("GET", "/boot.ipxe/mac/#{mac}") + + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "report"}) + raw_es_client.indices.delete({:index => "jobs"}) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + pending_list = "sched/jobs_to_run/#{remote_host_name}" + raw_redis.del(pending_list) + raw_redis.del("sched/jobs_running") + raw_redis.zadd(pending_list, "1.1", job_id) + + # request has remote_address + request.remote_address = remote_address + context = HTTP::Server::Context.new(request, response) + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + # register runner hostname + resources.@redis_client.not_nil!.@client.hset("sched/mac2host", mac, remote_host_name) + + # client testbox is wfg-e595 + # job's testbox is wfg-e595 + + # add 100, this job contains { testbox: wfg-e595, tbox_group: wfg-e595} + resources.fsdir_root("./public") + resources.@es_client.not_nil!.add(JOB_INDEX_TYPE, JSON.parse(DEMO_JOB).as_h, job_id) + + time_start = Time.utc + respon = Scheduler::Utils.find_job_boot(mac, context, resources) + time_stop = Time.utc + time_len = time_stop - time_start + + (time_len.seconds).should eq 0 + + respon_list = respon.split("\n") + respon_list[0].should eq("#!ipxe") + respon_list[2].should start_with("initrd") + respon_list[respon_list.size - 2].should eq("boot") + + pending_list = "sched/jobs_to_run/#{remote_host_name}" + respon = raw_redis.zrange(pending_list, 0, -1, true) + (respon.size).should eq(0) + respon = raw_redis.zrange("sched/jobs_running", 0, -1, true) + (respon.size).should eq(2) + + # validate the testbox updated + # raw_es_client (mybe use raw client is more real test) + respon = resources.@es_client.not_nil!.get(JOB_INDEX_TYPE, job_id) + (respon["_source"]["testbox"]).should eq(remote_host_name) + end + + it "job_id != 0, respon initrd kernel job in .cgz file with test-group != testbox" do + job_id = "100" + testgroup = "wfg-e595" + mac = "52-54-00-12-34-56" + remote_host_name = "wfg-e595-002" + remote_address = "127.0.0.1:5555" + + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + request = HTTP::Request.new("GET", "/boot.ipxe/mac/#{mac}") + + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "report"}) + raw_es_client.indices.delete({:index => "jobs"}) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + pending_list = "sched/jobs_to_run/#{testgroup}" + raw_redis.del(pending_list) + raw_redis.del("sched/jobs_running") + raw_redis.del("sched/id2job") + raw_redis.zadd(pending_list, "1.1", job_id) + + # request has remote_address + request.remote_address = remote_address + context = HTTP::Server::Context.new(request, response) + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + # register runner hostname + resources.@redis_client.not_nil!.@client.hset("sched/mac2host", mac, remote_host_name) + + # client testbox is wfg-e595-002 + # job's testbox is wfg-e595 + + # add 100, this job contains { testbox: wfg-e595, tbox_group: wfg-e595} + resources.fsdir_root("./public") + resources.@es_client.not_nil!.add(JOB_INDEX_TYPE, JSON.parse(DEMO_JOB).as_h, job_id) + + time_start = Time.utc + respon = Scheduler::Utils.find_job_boot(mac, context, resources) + time_stop = Time.utc + time_len = time_stop - time_start + + (time_len.seconds).should eq 0 + + respon_list = respon.split("\n") + respon_list[0].should eq("#!ipxe") + respon_list[2].should start_with("initrd") + respon_list[respon_list.size - 2].should eq("boot") + + pending_list = "sched/jobs_to_run/#{testgroup}" + respon = raw_redis.zrange(pending_list, 0, -1, true) + (respon.size).should eq(0) + respon = raw_redis.zrange("sched/jobs_running", 0, -1, true) + (respon.size).should eq(2) + + # respon = resources.@es_client.not_nil!.get(JOB_INDEX_TYPE, job_id) + # (respon["_source"]["testbox"]).should eq(remote_host_name) + respon = resources.@redis_client.not_nil!.get_job_content(job_id) + (respon["testbox"]).should eq(remote_host_name) + end + + it "job_id != 0, respon initrd kernel job in .cgz file with test-group != testbox != client hostname" do + job_id = "100" + testgroup = "wfg-e595" + mac = "52-54-00-12-34-56" + remote_host_name = "wfg-e595-001" + remote_address = "127.0.0.1:5555" + + io = IO::Memory.new + response = HTTP::Server::Response.new(io) + request = HTTP::Request.new("GET", "/boot.ipxe/mac/#{mac}") + + raw_es_client = Elasticsearch::API::Client.new({:host => JOB_ES_HOST, :port => JOB_ES_PORT_DEBUG}) + raw_es_client.indices.delete({:index => "report"}) + raw_es_client.indices.delete({:index => "jobs"}) + + raw_redis = Redis.new(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + pending_list = "sched/jobs_to_run/#{testgroup}" + raw_redis.del(pending_list) + raw_redis.del("sched/jobs_running") + raw_redis.zadd(pending_list, "1.1", job_id) + + # request has remote_address + request.remote_address = remote_address + context = HTTP::Server::Context.new(request, response) + + resources = Scheduler::Resources.new + resources.redis_client(JOB_REDIS_HOST, JOB_REDIS_PORT_DEBUG) + resources.es_client(JOB_ES_HOST, JOB_ES_PORT_DEBUG) + + # register runner hostname + resources.@redis_client.not_nil!.@client.hset("sched/mac2host", mac, remote_host_name) + + # client testbox is wfg-e595-002 + # job's testbox is wfg-e595-001 + + # add 100, this job contains { testbox: wfg-e595, tbox_group: wfg-e595} + resources.fsdir_root("./public") + json = JSON.parse(DEMO_JOB) + json_hash = Public.hash_replace_with(json.as_h, {"testbox" => "wfg-e595-002"}) + resources.@es_client.not_nil!.add(JOB_INDEX_TYPE, json_hash, job_id) + + time_start = Time.utc + respon = Scheduler::Utils.find_job_boot(mac, context, resources) + time_stop = Time.utc + time_len = time_stop - time_start + + (time_len.seconds).should eq 0 + + respon_list = respon.split("\n") + respon_list[0].should eq("#!ipxe") + respon_list[2].should start_with("initrd") + respon_list[respon_list.size - 2].should eq("boot") + + pending_list = "sched/jobs_to_run/#{testgroup}" + respon = raw_redis.zrange(pending_list, 0, -1, true) + (respon.size).should eq(0) + respon = raw_redis.zrange("sched/jobs_running", 0, -1, true) + (respon.size).should eq(2) + + # respon = resources.@es_client.not_nil!.get(JOB_INDEX_TYPE, job_id) + # (respon["_source"]["testbox"]).should eq(remote_host_name) + respon = resources.@redis_client.not_nil!.get_job_content(job_id) + (respon["testbox"]).should eq(remote_host_name) + end + end + end +end diff --git a/src/taskqueue.cr b/src/taskqueue.cr new file mode 100644 index 0000000000000000000000000000000000000000..b9e921bd1e575b3d50bcc4dca500f58eab452b97 --- /dev/null +++ b/src/taskqueue.cr @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "./taskqueue/taskqueue" + +taskqueue = TaskQueue.new +taskqueue.run diff --git a/src/taskqueue/constants.cr b/src/taskqueue/constants.cr new file mode 100644 index 0000000000000000000000000000000000000000..c7023829531a439d8ec33065539d5f194ee0a436 --- /dev/null +++ b/src/taskqueue/constants.cr @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +TASKQUEUE_PORT = 3060 + +QUEUE_NAME_BASE = "queues" + +REDIS_HOST = "172.17.0.1" +REDIS_PORT = 6379 + +# delimiter and exttract-ststs will loop consume job +# when use 32 (scheduler use 25), meet Exception: +# Exception: No free connection (used 32 of 32) +REDIS_POOL_NUM = 64 + +REDIS_POOL_TIMEOUT = 10 # ms + +HTTP_MAX_TIMEOUT = 57000 # less to 1 minute (most http longest timeout) + +# redis-benchmark: 100000 request in 1.88 seconds (0.0188ms/each) +# so we use 0.015ms for timeout, means no retry at default +HTTP_DEFAULT_TIMEOUT = 0.015 diff --git a/src/taskqueue/queue.cr b/src/taskqueue/queue.cr new file mode 100644 index 0000000000000000000000000000000000000000..02236f5bd01249cdb6992fae713d9385155b127f --- /dev/null +++ b/src/taskqueue/queue.cr @@ -0,0 +1,202 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "uuid" + +require "./redis_client" + +class TaskQueue + def queue_respond_add(env) + body = env.request.body + if body.nil? + return queue_respond_header_set(env, 400, "Missing http body") + end + + queue_name, ext_set = queue_check_params(env, ["queue"]) + return ext_set if ext_set + + type, prefix = get_matched_queue_name(queue_name[0]) + param_queue = "#{prefix}/#{type}" + + body_content = body.gets_to_end + env.request.body = body_content # restore back for debug message + + task_content = JSON.parse(body_content) + id = task_content["id"]? + if id + case task_in_queue_status(id.to_s, param_queue) + when TaskInQueueStatus::TooBigID + return queue_respond_header_set(env, 409, + "Add with error id <#{id}>") + when TaskInQueueStatus::SameQueue + return queue_respond_header_set(env, 409, + "Queue <#{queue_name[0]}> already has id <#{id}>") + when TaskInQueueStatus::SameService + service_name = service_name_of_queue(param_queue) + return queue_respond_header_set(env, 409, + "Service <#{service_name}> still has id <#{id}> in process") + when TaskInQueueStatus::InTaskQueue + return queue_respond_header_set(env, 409, + "TaskQueue still has id <#{id}> in process") + when TaskInQueueStatus::NotExists + end + else + return queue_respond_header_set(env, 409, "Need the lab in the task content") unless task_content["lab"]? + end + + task_id = add2redis("#{param_queue}", task_content.as_h) + env.response.status_code = 200 + {id: task_id}.to_json + end + + def queue_respond_header_set(env, code, message) + env.response.status_code = code + env.response.headers.add("CCI-Error-Description", message) + end + + def queue_check_params(env, parameter_list) + params = Array(String).new + ext_set = nil + + parameter_list.each do |parameter_name| + parameter_value = env.params.query[parameter_name]? + + if parameter_value.nil? + ext_set = queue_respond_header_set(env, 400, "Missing parameter <#{parameter_name}>") + return params, ext_set + end + params << parameter_value + end + + return params, ext_set + end + + def queue_respond_consume(env) + queue_name, ext_set = queue_check_params(env, ["queue"]) + return ext_set if ext_set + + # type can be "idle" | "ready", can not be "uuid" + type, prefix = get_matched_queue_name(queue_name[0]) + queue_name_from = "#{prefix}/#{type}" + case type + when "ready" + move_uuid_task2ready(prefix) + when "idle" + # doing noting + else # uuid + puts "Warring: Should not direct comsume job with uuid." + end + + queue_name_from = "#{prefix}/#{type}" + queue_name_to = prefix + "/in_process" + + begin + timeout = "#{env.params.query["timeout"]?}".to_i + timeout = HTTP_MAX_TIMEOUT if timeout > HTTP_MAX_TIMEOUT + rescue + timeout = HTTP_DEFAULT_TIMEOUT + end + + task = operate_with_timeout(timeout) { + move_first_task_in_redis(queue_name_from, queue_name_to) + } + + if task.nil? + env.response.status_code = 201 + else + env.response.status_code = 200 + end + return task + end + + def queue_respond_hand_over(env) + params, ext_set = queue_check_params(env, ["from", "to", "id"]) + return ext_set if ext_set + + from = params[0] + "/in_process" + to = params[1] + "/ready" + id = params[2] + + if move_task_in_redis(from, to, id) + env.response.status_code = 201 + else + queue_respond_header_set(env, 409, "Can not find id <#{id}> in queue <#{params[0]}>") + end + end + + def service_name_of_queue(queue_name : String) + find_slash = queue_name.index('/') + return find_slash ? queue_name[0, find_slash] : queue_name + end + + def queue_respond_delete(env) + params, ext_set = queue_check_params(env, ["queue", "id"]) + return ext_set if ext_set + + # input queue parameter may like "scheduler/$tbox_group/..." + # we just need make sure the "id" blongs to queue "scheduler" + # the (queue "scheduler") is a queue for scheduler-service + queue = service_name_of_queue(params[0]) + id = params[1] + + if delete_task_in_redis(queue, id) + env.response.status_code = 201 + else + queue_respond_header_set(env, 409, "Can not find id <#{id}> in queue <#{params[0]}>") + end + end + + # loop try: when there has no task, return until get one or timeout + # default timeout is 0.015ms, we delay for 10ms at each time + private def operate_with_timeout(timeout) + result = nil + time_span = Time::Span.new(nanoseconds: (REDIS_POOL_TIMEOUT + 1) * 1000) + time_start = Time.local.to_unix_f + timeout_seconds_f = timeout / 1000 + + loop do + result = yield + break if result + + break if (Time.local.to_unix_f - time_start) > timeout_seconds_f + sleep(time_span) + end + + return result + end + + private def get_matched_queue_name(queue_name) + test_queue_name = "#{queue_name}" + match = test_queue_name.match(/(.*)\/(.*)$/) + if !match.nil? + return "idle", match[1] if match[2] == "idle" + + begin + uuid = UUID.new(match[2]) + return uuid.to_s, match[1] + rescue + end + end + + return "ready", test_queue_name + end + + private def move_uuid_task2ready(queue_name) + # get uuid lists + # uuid_keys ["queues/sched/vm-hi1620-2p8g/ee44b164-90e3-49a7-9798-5e7cc9bc7451", ] + uuid_keys = get_uuid_keys("#{queue_name}") + return unless uuid_keys # nil + return unless uuid_keys.size > 0 + + # move task to ready, and with rate control to each uuid + uuid_keys.each do |key| + uuid, prefix = get_matched_queue_name(key) + if @@rate_limiter.rate_limited?(:l48pD, uuid) == false + # prefix is start with "queues", need delete it + s_name = prefix.sub("#{QUEUE_NAME_BASE}/", "") + move_first_task_in_redis_with_score("#{s_name}/#{uuid}", + "#{s_name}/ready") + end + end + end +end diff --git a/src/taskqueue/redis_client.cr b/src/taskqueue/redis_client.cr new file mode 100644 index 0000000000000000000000000000000000000000..3bac92431a91c0bc2b31357edb28b5c5c9f5586d --- /dev/null +++ b/src/taskqueue/redis_client.cr @@ -0,0 +1,288 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "redis" +require "json" + +# redis key like: queues/service/subkey/ready|in_process +# queue name like: service/[.../]subkey +enum TaskInQueueStatus + TooBigID # 0 + NotExists # 1 + SameQueue # 2, match with [queues/]service/[.../]queue + SameService # 3, match with [queues/]service + InTaskQueue # 4, match with [queues/] +end + +class TaskQueue + def initialize + redis_host = (ENV.has_key?("REDIS_HOST") ? ENV["REDIS_HOST"] : REDIS_HOST) + redis_port = (ENV.has_key?("REDIS_PORT") ? ENV["REDIS_PORT"].to_i32 : REDIS_PORT) + + redis_pool_num = (ENV.has_key?("REDIS_POOL_NUM") ? ENV["REDIS_POOL_NUM"].to_i32 : REDIS_POOL_NUM) + + redis_pool_timeout = (ENV.has_key?("REDIS_POOL_TIMEOUT") ? ENV["REDIS_POOL_TIMEOUT"].to_i32 : REDIS_POOL_TIMEOUT) + + @redis = Redis::PooledClient.new(host: redis_host, + port: redis_port, pool_size: redis_pool_num, + pool_timeout: redis_pool_timeout / 1000) + end + + private def get_new_seqno + return @redis.incr("#{QUEUE_NAME_BASE}/seqno") + end + + private def task_in_queue_status(id : String, queue_name : String) + current_seqno = @redis.get("#{QUEUE_NAME_BASE}/seqno") + current_seqno = "0" if current_seqno.nil? + current_seqno = current_seqno.to_i64 + return TaskInQueueStatus::TooBigID if id.split('.')[-1].to_i64 > current_seqno + + data_f = Redis::Future.new + loop_till_done() { + @redis.watch("#{QUEUE_NAME_BASE}/id2content") + op_result = @redis.multi do |multi| + data_f = multi.hget("#{QUEUE_NAME_BASE}/id2content", id) + end + op_result + } + data = data_f.value.as(String?) + return TaskInQueueStatus::NotExists if data.nil? + + data_hash = JSON.parse(data) + data_queue = data_hash["queue"].to_s + return TaskInQueueStatus::SameQueue if data_queue == queue_name + + if service_name_of_queue(data_queue) == service_name_of_queue(queue_name) + return TaskInQueueStatus::SameService + else + return TaskInQueueStatus::InTaskQueue + end + end + + private def add2redis(queue_name : String, content : Hash) + operate_time = Time.local.to_unix_f # do prepare thing early + + if content["id"]? + # this means we'll add like duplicate id + # will operate to same redis key (queues/id2content) + task_id = content["id"] + else + task_id = "#{content["lab"]}.#{get_new_seqno()}" + content = content.merge({:id => task_id}) + end + data = { + :add_time => operate_time, + :queue => queue_name, + :data => content, + } + + loop_till_done() { + @redis.watch("#{QUEUE_NAME_BASE}/id2content") + op_result = @redis.multi do |multi| + multi.zadd("#{QUEUE_NAME_BASE}/#{queue_name}", operate_time, task_id) + multi.hset("#{QUEUE_NAME_BASE}/id2content", task_id, data.to_json) + end + op_result + } + + return task_id + end + + # need loop_till_done ? + private def find_first_task_in_redis(queue_name) + first_task = @redis.zrange("#{QUEUE_NAME_BASE}/#{queue_name}", 0, 0) + if first_task.size == 0 + return nil + else + return first_task[0].to_s + end + end + + # need loop_till_done ? + private def find_task(id : String) + task_content_raw = @redis.hget("#{QUEUE_NAME_BASE}/id2content", id) + if task_content_raw.nil? + return nil + else + return JSON.parse(task_content_raw).as_h + end + end + + private def move_task_in_redis(from : String, to : String, id : String) + content = find_task(id) + return nil if content.nil? + return nil if (content["queue"] != from) + + operate_time = Time.local.to_unix_f + content = content.merge({"queue" => to}) + content = content.merge({"move_time" => operate_time}) + + # if another zrem first, then the result will be [] + # or result will be [1, 1, 1|0] + result = loop_till_done() { + @redis.watch("#{QUEUE_NAME_BASE}/#{from}") + op_result = @redis.multi do |multi| + multi.zadd("#{QUEUE_NAME_BASE}/#{to}", operate_time, id) + multi.zrem("#{QUEUE_NAME_BASE}/#{from}", id) + multi.hset("#{QUEUE_NAME_BASE}/id2content", id, content.to_json) + end + op_result + } + if (result.not_nil![0] != 1) || (result.not_nil![1] != 1) + puts "#{Time.utc} WARN -- operate error in move task." + end + + return content["data"].to_json + end + + private def move_first_task_in_redis(from : String, to : String) + first_task_id = Redis::Future.new + result = loop_till_done() { + @redis.watch("#{QUEUE_NAME_BASE}/#{from}") + op_result = @redis.multi do |multi| + first_task_id = multi.zrange("#{QUEUE_NAME_BASE}/#{from}", 0, 0) + multi.zremrangebyrank("#{QUEUE_NAME_BASE}/#{from}", 0, 0) + end + op_result + } + return nil if result.not_nil![1].as(Int) == 0 # 0 means no delete == no id + + # result was [[id], 1] + id = first_task_id.value.as(Array)[0].to_s + content = find_task(id) + return nil if content.nil? + + operate_time = Time.local.to_unix_f + content = content.merge({"queue" => to}) + content = content.merge({"move_time" => operate_time}) + + loop_till_done() { + @redis.multi do |multi| + multi.zadd("#{QUEUE_NAME_BASE}/#{to}", operate_time, id) + multi.hset("#{QUEUE_NAME_BASE}/id2content", id, content.to_json) + end + } + + return content["data"].to_json + end + + private def delete_task_in_redis(queue : String, id : String) + content = find_task(id) + return nil if content.nil? + return nil if (service_name_of_queue(content["queue"].to_s) != queue) + + # if another hdel first, then the result will be [] + # or result will be [1, 1] + loop_till_done() { + @redis.watch("#{QUEUE_NAME_BASE}/#{content["queue"]}") + op_result = @redis.multi do |multi| + multi.zrem("#{QUEUE_NAME_BASE}/#{content["queue"]}", id) + multi.hdel("#{QUEUE_NAME_BASE}/id2content", id) + end + op_result + } + + return content["data"].to_json + end + + # when use redis PooledClient and command, + # there maybe a conflict need to fix: + # 1) thread-1 try to find something that thread-2 will write in + # 2) thread-3 may do write too, and this will make thread-2 + # can do write at first time + # so we need let the thread-2's command retry as soon as possible. + # + # loop until there has no operate conflict + # when use redis.watch(keys) command, if another + # thread is modify the key, all redis.multi + # command will not do. that returns []. + # when no conflict, all redis.multi command will + # be done. return [result, ...] for each command. + # + # yield block like this + # { + # redis.watch + # op_result = redis.multi do |multi| + # end + # op_result <- this value will return + # } + # + # connect pool timeout is 0.01 second (10 ms) + # here keep try for 30ms + private def loop_till_done + result = nil + time_start = Time.local.to_unix_ms + + # i = 0 + loop do + result = yield + break if result.size > 0 + # i = i + 1 + if (Time.local.to_unix_ms - time_start) > 30 + # there should only retry 1-2 times + puts "#{Time.utc} WARN -- should not retry so long." + break + end + end + + # call record: 5208 times of command + # 115 times of retry + # max retried 7 times ( occurence 1 times) + # p "retry #{i} times" + return result + end + + private def get_uuid_keys(queue_name) + return nil unless queue_name[0..5] == "sched/" + + # search = "queues/sched/vm-hi1620-2p8g/ee44b164-90e3-49a7-9798-5e7cc9bc7451" + # only 3 matchs keyword: * [] ? + search = "#{QUEUE_NAME_BASE}/#{queue_name}/[0-9a-eA-F\-]*" + lua_script = "return redis.call('keys', KEYS[1])" + keys = @redis.eval(lua_script, [search]) + + case keys + when Array(Redis::RedisValue) + return nil unless keys.size > 0 + else + return nil + end + + keys.each do |key| + # must end with uuid, then keep it + # some queue name that has "uuid" at middle, also will delete + uuid, _ = get_matched_queue_name(key) + case uuid + when "idle", "ready" + keys.delete(key) + end + end + return keys + end + + private def move_first_task_in_redis_with_score(from : String, to : String) + # result was ["crystal.87230", "1600782938.9017849"] + result = @redis.zrange("#{QUEUE_NAME_BASE}/#{from}", 0, 0, with_scores: true) + case result + when Array(Redis::RedisValue) + # empty queue will be auto delete by redis + return if result.size != 2 + else + return + end + + @redis.zremrangebyrank("#{QUEUE_NAME_BASE}/#{from}", 0, 0) + content = find_task(result[0].to_s) + return if content.nil? + + operate_time = Time.local.to_unix_f + content = content.merge({"queue" => to}) + content = content.merge({"move_with_score_time" => operate_time}) + + @redis.zadd("#{QUEUE_NAME_BASE}/#{to}", result[1], result[0]) + @redis.hset("#{QUEUE_NAME_BASE}/id2content", result[0], content.to_json) + + # return content["data"].to_json + end +end diff --git a/src/taskqueue/taskqueue.cr b/src/taskqueue/taskqueue.cr new file mode 100644 index 0000000000000000000000000000000000000000..c5c202d03911b784d2b8ba99577fb1e22865303c --- /dev/null +++ b/src/taskqueue/taskqueue.cr @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +require "kemal" +require "rate_limiter" + +require "./constants" +require "./queue" + +class TaskQueue + VERSION = "0.0.2" + + # l2pH: limit 2 / hours + # l100pms: limit 100 / ms + @@rate_limiter = RateLimiter(String).new + @@rate_limiter.bucket(:l48pD, 48_u32, 1.days) + @@rate_limiter.bucket(:l2pH, 1_u32, 30.minutes) + @@rate_limiter.bucket(:l1pms, 1_u32, 0.001.seconds) + @@rate_limiter.bucket(:l100pms, 1_u32, 0.00001.seconds) + + def debug_message(env, response, time_in) + puts("\n") + + from_message = "#{time_in} --> #{env.request.remote_address}" + if env.request.body != nil + from_message += " #{env.request.body}" + end + puts(from_message) + + puts("#{Time.utc} <-- #{response}") + end + + def run + # ------------------- + # request: curl http://localhost:3060 + # + # response: TaskQueue@v0.0.1 is alive. + get "/" do |env| + response = "TaskQueue@v#{VERSION} is alive." + debug_message(env, response, Time.utc) + + "#{response.to_json}\n" + end + + # ------------------- + # request: curl -X POST http://localhost:3060/add?queue=scheduler/$tbox_group + # -H "Content-Type: application/json" + # --data '{"suite":"test01", "tbox_group":"host"}' + # | --data '{"suite":"test01", "id":$id, "tbox_group":"host"}' + # + # response: 200 {id: 1}.to_json + # 409 "Queue already has id <$id>" + # 409 "Add with error id <65536>" + # 400 "Missing parameter " + # 400 "Missing http body" + post "/add" do |env| + response = queue_respond_add(env) + debug_message(env, response, Time.utc) + response if env.response.status_code == 200 + end + + # ------------------- + # request: curl -X PUT http://localhost:3060/consume?queue=scheduler/$tbox_group + # option parameter timeout=XXXX (default as 3000ms, max 57000ms) + # + # response: 200 {"suite":"test01", "tbox_group":"host", "id":1}.to_json + # 201 ## when there has no task in queue (scheduler/$tbox_group) + # 400 "Missing parameter " + put "/consume" do |env| + response = queue_respond_consume(env) + debug_message(env, response, Time.utc) + response if env.response.status_code == 200 + end + + # ------------------- + # request: curl -X PUT http://localhost:3060/hand_over? + # from=scheduler/$tbox_group&to=extract_stats&id=$id + # + # response: 201 ## when succeed hand over + # 400 "Missing parameter " + # 409 "Can not find id <$id> in queue " + put "/hand_over" do |env| + response = queue_respond_hand_over(env) + debug_message(env, response, Time.utc) + nil + end + + # ------------------- + # request: curl -X PUT http://localhost:3060/delete? + # from=scheduler/$tbox_group&id=$id + # + # response: 201 ## when succeed delete + # 400 "Missing parameter " + # 409 "Can not find id <$id> in queue " + put "/delete" do |env| + response = queue_respond_delete(env) + debug_message(env, response, Time.utc) + nil + end + + @port = (ENV.has_key?("TASKQUEUE_PORT") ? ENV["TASKQUEUE_PORT"].to_i32 : TASKQUEUE_PORT) + Kemal.run(@port) + end +end diff --git a/user-client/README.md b/user-client/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1e1437f84c54a978bbace3e78341c38a17e87894 --- /dev/null +++ b/user-client/README.md @@ -0,0 +1,33 @@ +--- +# client dependence + +## system env +yum install ruby-devel + +gem install cucumber +gem install rest-client + +## how to use +ruby ./client/src/lkp.rb queue ./iperf.yaml + + +--- +# helper shell command + +## empty hostname regist +curl -X DELETE http://localhost:9200/report +## run qemu will "get no job" : because of no host regist + +## regist a hostname +curl -X PUT "http://localhost:3000/report?hostname=wfg-e595&mac=52-54-00-12-34-56" + +## add job +0_addjob.sh iperf.yaml + +## run qemu +2_runqemu.sh +## rerun qemu got "no job now" : because the only job has been done + +## other +1_showjob.sh job_id # show job_id's content in es +3_dispcgz.sh job_id # list job_id's file diff --git a/user-client/developer/mail_bisect_result.rb b/user-client/developer/mail_bisect_result.rb new file mode 100755 index 0000000000000000000000000000000000000000..b9149ac8608c172c59735febea8c6a63e2620f49 --- /dev/null +++ b/user-client/developer/mail_bisect_result.rb @@ -0,0 +1,19 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require_relative '../../lib/mail_bisect_result' + +# defaults +bisect_hash = { + 'repo' => 'pixz/pixz', + 'commit' => 'b0e2e5b8efc4c7f1994805797f65d77573d9649c', + 'job_id' => '59037', + 'error_id' => 'build-pkg.gcc:internal-compiler-error:Segmentation-fault(program-as)' +} + +ARGV.each do |arg| + k, v = arg.split '=' + bisect_hash[k] = v +end + +MailBisectResult.new(bisect_hash).create_send_email diff --git a/user-client/features/queue.feature b/user-client/features/queue.feature new file mode 100644 index 0000000000000000000000000000000000000000..c936c4300c796adaedd6c166ce8a1d3a22dd8fc0 --- /dev/null +++ b/user-client/features/queue.feature @@ -0,0 +1,14 @@ +Feature: queue client + + Background: default user has logged in server + Given lkp server is ready + And user "centos" has logged in + + Scenario: add job to queue + When user "centos" use "lkp queue jobs/myjobs.yaml" to add job + Then the lkp server echo add job status + + Scenario: user queue job result + When user "centos" use "lkp queue jobs/myjobs.yaml" to add job + And user "centos" use "lkp result jobs/myjobs.yaml" to queue job result + Then the lkp server echo queue job result diff --git a/user-client/features/step_definitions/queue_steps.rb b/user-client/features/step_definitions/queue_steps.rb new file mode 100644 index 0000000000000000000000000000000000000000..f9a9edd8eb8822ec25ee25bcfd74564e2aa96736 --- /dev/null +++ b/user-client/features/step_definitions/queue_steps.rb @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require "#{File.dirname(__FILE__)}/../../src/lkp_server_info" +require "#{File.dirname(__FILE__)}/../../src/lkp_client" + +Given(/^lkp server is ready$/) do + @lkp_si = LkpServerInfo.new + raise "Can not connect to server #{@lkp_si.host}" unless @lkp_si.connect_able +end + +# task_description: {"add job status", "queue job result"} +Then(/^the lkp server echo (.*?)$/) do |task_description| + puts @job_status + raise "Server #{@lkp_si.host} not respond to #{task_description}" unless @job_status != '' +end + +# user: centos +# cmd: {"lkp queue jobs/myjobs.yaml", "lkp result job/myjobs.yaml"} +# cmd_desc: {"add job", "queue job result"} +When(/^user "([^"]*)" use "([^"]*)" to (.*?)$/) do |_user, cmd, _cmd_desc| + @lkp_client.cmd(cmd) + respond = @lkp_client.run + + @job_status = '' + raise "Post to server #{@lkp_si.host} error, code = #{respond.code}" unless respond.code != '200' + + @job_status = respond.body +end + +And(/^user "([^"]*)" has logged in$/) do |_user| + @lkp_client = LkpClient.new(@lkp_si) + @lkp_client.basic_authorization +end diff --git a/user-client/features/support/env.rb b/user-client/features/support/env.rb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/user-client/helper/0_addjob.sh b/user-client/helper/0_addjob.sh new file mode 100755 index 0000000000000000000000000000000000000000..1b2a3c97c1c15b6f24c0278a4ee5fe9aacb55c47 --- /dev/null +++ b/user-client/helper/0_addjob.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +DIR=$(dirname $(realpath $0)) +ruby $DIR/../src/lkp.rb queue $1 diff --git a/user-client/helper/1_showjob.sh b/user-client/helper/1_showjob.sh new file mode 100755 index 0000000000000000000000000000000000000000..2da269b5ff3c9615c113874ddef0f7595b539c3f --- /dev/null +++ b/user-client/helper/1_showjob.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +curl http://localhost:9200/jobs/_doc/$1 diff --git a/user-client/helper/2_runqemu.sh b/user-client/helper/2_runqemu.sh new file mode 100755 index 0000000000000000000000000000000000000000..63c451f43e81f35b2898bbf01067f9e847a294ff --- /dev/null +++ b/user-client/helper/2_runqemu.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +: ${CCI_SRC:=/c/cci} +: ${LKP_SRC:=/c/lkp-tests} + +$CCI_SRC/providers/my-qemu.sh diff --git a/user-client/helper/3_dispcgz.sh b/user-client/helper/3_dispcgz.sh new file mode 100755 index 0000000000000000000000000000000000000000..b2c22edc780578b342293cd4bf2676df15700c20 --- /dev/null +++ b/user-client/helper/3_dispcgz.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +sudo ls /srv/cci/scheduler/alpine/public/$1 diff --git a/user-client/helper/batch_submit_job b/user-client/helper/batch_submit_job new file mode 100755 index 0000000000000000000000000000000000000000..581bc49bcc39cd75aba077a53a63e3d549a795bb --- /dev/null +++ b/user-client/helper/batch_submit_job @@ -0,0 +1,40 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +# split and batch submit job.yaml + +function message_and_exit { + cat tmp.txt + exit $? +} + +function batch_submit { + sed -i 's/^.* => //' tmp.txt + + for line in $(cat tmp.txt) + do + ruby ./client/src/lkp.rb queue $line; + rm $line + done + + rm tmp.txt + exit 0 +} + +export LANG= + +: ${LKP_SRC:=/c/lkp-tests} + +`${LKP_SRC}/sbin/split-job $1 > tmp.txt` + +first_line=`sed -n '1p' tmp.txt` + +if [ "$1" != "" ]; then + if [[ $first_line =~ $1 ]]; then + batch_submit + fi +fi + +message_and_exit + diff --git a/user-client/helper/iperf.yaml b/user-client/helper/iperf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..06980c3f4c13e8118c1b5572e8cc7b7e71f1879f --- /dev/null +++ b/user-client/helper/iperf.yaml @@ -0,0 +1,36 @@ +--- + +#! jobs/iperf.yaml +suite: iperf +testcase: iperf +category: benchmark +runtime: 300 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +job_origin: jobs/iperf.yaml +testbox: vm-pxe-hi1620-1p1g-1 +arch: x86_64 +node_roles: server client + +#! include/category/benchmark +kmsg: +boot-time: +uptime: +iostat: +heartbeat: +vmstat: +numa-numastat: +numa-vmstat: +numa-meminfo: +proc-vmstat: +proc-stat: +meminfo: + +LKP_SERVER: 172.168.131.113 +LKP_CGI_PORT: 3000 +result_root: /result/iperf +LKP_DEBUG_PREFIX: bash -x diff --git a/user-client/helper/redis_op/0_redis_trace_id b/user-client/helper/redis_op/0_redis_trace_id new file mode 100755 index 0000000000000000000000000000000000000000..45166412ea73ca01d31c531f5e0e5628d07f1689 --- /dev/null +++ b/user-client/helper/redis_op/0_redis_trace_id @@ -0,0 +1,41 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# != 1 ]; then + echo "USAGE: $0 id # trace the task " + exit 1 +fi + +echo +echo -e "--> \033[34m[TaskQueue]\033[0m trace start" + +# get task description +result=`redis-cli --eval key_cmd_params.lua queues/id2content , hget $1` +if [ "$result" == "" ]; then + echo -e "\033[31mNo task\033[0m id=$1" + exit 1 +fi + +# show the task id information +echo $result + +# find the id's queue owner +queue_info=${result##*queue\":\"} +queue_name=${queue_info%%\",*} + +# check exists of the job id +result=`redis-cli --eval key_cmd_params.lua queues/${queue_name} , zrank $1` +if [ "$result" == "" ]; then + echo -e "\033[31mNo task\033[0m id=$1 at queues/$queue_name" + exit 1 +fi + +# show the task id rank information +echo "task id=$1 at $result order of queues/$queue_name" + +echo -e "<-- \033[32m[TaskQueue]\033[0m trace end" +echo + +service_name=${queue_name%%/*} +. ./${service_name}_trace_id $1 diff --git a/user-client/helper/redis_op/key_cmd_params.lua b/user-client/helper/redis_op/key_cmd_params.lua new file mode 100644 index 0000000000000000000000000000000000000000..a507bb5773b6d6da48868ee1d770a3e775f4ee71 --- /dev/null +++ b/user-client/helper/redis_op/key_cmd_params.lua @@ -0,0 +1,34 @@ +-- this scripte will fit for redis comand: command key param1 param2... +-- ARGV[1] is redis command name +-- #ARGV is the param (nums + 1) + +local result + +for i = #ARGV, 1, -1 do + if i == 1 then + result = redis.call(ARGV[1], KEYS[1]) + break + end + + if i == 2 then + result = redis.call(ARGV[1], KEYS[1], ARGV[2]) + break + end + + if i == 3 then + result = redis.call(ARGV[1], KEYS[1], ARGV[2], ARGV[3]) + break + end + + if i == 4 then + result = redis.call(ARGV[1], KEYS[1], ARGV[2], ARGV[3], ARGV[4]) + break + end + + if i == 5 then + result = redis.call(ARGV[1], KEYS[1], ARGV[2], ARGV[3], ARGV[4], ARGV[5]) + break + end +end + +return result diff --git a/user-client/helper/redis_op/redis_op_hdel b/user-client/helper/redis_op/redis_op_hdel new file mode 100755 index 0000000000000000000000000000000000000000..8fa4624256578ac8c330b8bc6ecc84a4f573e905 --- /dev/null +++ b/user-client/helper/redis_op/redis_op_hdel @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# -lt 1 ] || [ $# -gt 2 ]; then + echo "USAGE: $0 id # hdel for id2content" + echo " : $0 queues/your_key id # hdel for your key's" + exit 1 +fi + +if [[ $2 ]]; then + key_name=$1 + id=$2 +else + key_name=queues/id2content + id=$1 +fi + +# use current filename's last string as lua command script +cmd_lua=${0##*_} + +redis-cli --eval key_cmd_params.lua $key_name , $cmd_lua $id diff --git a/user-client/helper/redis_op/redis_op_hget b/user-client/helper/redis_op/redis_op_hget new file mode 100755 index 0000000000000000000000000000000000000000..4afff6331a6d901b1f8b92bb82feb82f37f451c3 --- /dev/null +++ b/user-client/helper/redis_op/redis_op_hget @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# -gt 2 ] || [ $# -lt 1 ]; then + echo "USAGE: $0 id # hget for id2content" + echo " : $0 queues/your_key id # hget for your key" + exit 1 +fi + +if [[ $2 ]]; then + key_name=$1 + id=$2 +else + key_name=queues/id2content + id=$1 +fi + +# use current filename's last string as lua command script +cmd_lua=${0##*_} + +redis-cli --eval key_cmd_params.lua $key_name , $cmd_lua $id diff --git a/user-client/helper/redis_op/redis_op_hgetall b/user-client/helper/redis_op/redis_op_hgetall new file mode 100755 index 0000000000000000000000000000000000000000..6f03740c1e809a55a8e5c9a45bc39f9ee7168b64 --- /dev/null +++ b/user-client/helper/redis_op/redis_op_hgetall @@ -0,0 +1,20 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# -gt 1 ]; then + echo "USAGE: $0 # hgetall for id2content" + echo " : $0 queues/your_key # hgetall for your key" + exit 1 +fi + +if [[ $1 ]]; then + key_name=$1 +else + key_name=queues/id2content +fi + +# use current filename's last string as lua command script +cmd_lua=${0##*_} + +redis-cli --eval key_cmd_params.lua $key_name , $cmd_lua diff --git a/user-client/helper/redis_op/redis_op_hkeys b/user-client/helper/redis_op/redis_op_hkeys new file mode 100755 index 0000000000000000000000000000000000000000..766cbdcae951b6c112f45eac1833b25cb26f40c9 --- /dev/null +++ b/user-client/helper/redis_op/redis_op_hkeys @@ -0,0 +1,20 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# -gt 1 ]; then + echo "USAGE: $0 # hget for id2content" + echo "e.g.: $0 queues/your_key # hget for your_key" + exit 1 +fi + +if [[ $1 ]]; then + key_name=$1 +else + key_name=queues/id2content +fi + +# use current filename's last string as lua command script +cmd_lua=${0##*_} + +redis-cli --eval key_cmd_params.lua $key_name , $cmd_lua diff --git a/user-client/helper/redis_op/redis_op_zrange b/user-client/helper/redis_op/redis_op_zrange new file mode 100755 index 0000000000000000000000000000000000000000..ce4d6e7f6e75fe24386a1bcb4198fdc6c9bcfc72 --- /dev/null +++ b/user-client/helper/redis_op/redis_op_zrange @@ -0,0 +1,21 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# -gt 1 ]; then + echo "USAGE: $0 # find all your job" + echo "e.g.: $0 queues/sched/vm-hi1620-2p8g/in_process # find jobs in queue" + exit 1 +fi + +if [[ $1 ]]; then + key_name=$1 +else + [[ $tbox_group ]] || tbox_group=vm-hi1620-2p8g + key_name=queues/sched/$tbox_group-$USER/ready +fi + +# use current filename's last string as lua command script +cmd_lua=${0##*_} + +redis-cli --eval key_cmd_params.lua $key_name , $cmd_lua 0 -1 diff --git a/user-client/helper/redis_op/redis_op_zremrangebyrank b/user-client/helper/redis_op/redis_op_zremrangebyrank new file mode 100755 index 0000000000000000000000000000000000000000..6b6365bb6d46219ff5b68d9da1ad241079bad605 --- /dev/null +++ b/user-client/helper/redis_op/redis_op_zremrangebyrank @@ -0,0 +1,14 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# != 3 ]; then + echo "USAGE: $0 key min max" + echo "e.g.: $0 queues/sched/vm-hi1620-2p8g/in_process 10 20" + exit 1 +fi + +# use current filename's last string as lua command script +cmd_lua=${0##*_} + +redis-cli --eval key_cmd_params.lua $1 , $cmd_lua $2 $3 diff --git a/user-client/helper/redis_op/sched_trace_id b/user-client/helper/redis_op/sched_trace_id new file mode 100755 index 0000000000000000000000000000000000000000..68aa277f1cf801b06945fce4300f1d01bc90bc1f --- /dev/null +++ b/user-client/helper/redis_op/sched_trace_id @@ -0,0 +1,24 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +if [ $# != 1 ]; then + echo "USAGE: $0 id # trace the job " + exit 1 +fi + +echo +echo -e "--> \033[34m[Scheduler]\033[0m trace start" + +# get job description +result=`redis-cli --eval key_cmd_params.lua sched/id2job , hget $1` +if [ "$result" == "" ]; then + echo -e "\033[31mNo job\033[0m id=$1" + exit 1 +fi + +# show the job information +echo $result + +echo -e "<-- \033[32m[Scheduler]\033[0m trace end" +echo diff --git a/user-client/helper/redis_op_gc/abnormal_id_detect.rb b/user-client/helper/redis_op_gc/abnormal_id_detect.rb new file mode 100644 index 0000000000000000000000000000000000000000..a3bfba767f8013f56cdd7044e99bc68e3d6b595a --- /dev/null +++ b/user-client/helper/redis_op_gc/abnormal_id_detect.rb @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'json' +require_relative './basic_env' + +# detect task's status, return value: +# - "Manual deleted id=#{@task_id}" +# - "Alive too long id=#{@task_id}" +# - "Normal task id=#{@task_id}" +class AbnormalIdDetect + def initialize(id, days = 3, content = nil) + @task_id = id + @day_number = days + + @content = get_taskqueue_content4id(content, id) + end + + def check_alive_time_too_long(queue, rank, days) + time = Time.now.to_f - days * 86_400 # 1 days = 24*60*60 seconds + cmd = "#{CMD_BASE} queues/#{queue} , zrange #{rank} #{rank} withscores" + result = `#{cmd}`.chomp + + # results[0]: task_id, member + # results[1]: enqueue time, score + results = result.split("\n") + task_enqueue_time = results[1].to_f + + task_enqueue_time < time + end + + def check + return NO_DATA + "=#{@task_id}" if @content.nil? + + queue = @content['queue'] + + # not found task_id at queue + cmd = "#{CMD_BASE} queues/#{queue} , zrank #{@task_id}" + result = `#{cmd}`.chomp + return MANUAL_DELETED + "=#{@task_id}" if result.length.zero? + + # alive time large than special days + result = check_alive_time_too_long(queue, result, @day_number) + return ALIVE_TOO_LONG + "=#{@task_id}" if result + + "Normal task id=#{@task_id}" + end + + def add_old_test_data(days_ago) + queue = @content['queue'] + + time = Time.now.to_f - days_ago * 86_400 + cmd = "#{CMD_BASE} queues/#{queue} , zadd #{time} #{@task_id}" + `#{cmd}`.chomp + end +end diff --git a/user-client/helper/redis_op_gc/basic_env.rb b/user-client/helper/redis_op_gc/basic_env.rb new file mode 100644 index 0000000000000000000000000000000000000000..7682b9458f50fe837693d9bdaca51f70743c999c --- /dev/null +++ b/user-client/helper/redis_op_gc/basic_env.rb @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +raise 'Need define env CCI_SRC' unless ENV['CCI_SRC'] != '' + +CCI_REDIS_OP_DIR ||= "#{ENV['CCI_SRC']}/user-client/helper/redis_op" +CMD_BASE ||= "redis-cli --eval #{CCI_REDIS_OP_DIR}/key_cmd_params.lua " + +GC4ID = 'Garbage collection for id' +GCN4ID = 'Doing nothing for id' +NO_DATA = 'No data id' +MANUAL_DELETED = 'Manual deleted id' +ALIVE_TOO_LONG = 'Alive too long id' + +def set_progress(index, max, char = '#') + percent = index * 100 / max + print (char * (percent / 2.5).floor).ljust(40, ' '), " #{percent}%\r" + $stdout.flush +end + +def get_taskqueue_content4id(content, id) + if content.nil? + cmd = "#{CMD_BASE} queues/id2content , hget #{id}" + content = `#{cmd}`.chomp + end + + return nil if content.nil? + + return nil if content.length.zero? + + return JSON.parse(content) +end diff --git a/user-client/helper/redis_op_gc/consistency.rb b/user-client/helper/redis_op_gc/consistency.rb new file mode 100755 index 0000000000000000000000000000000000000000..f81c68f16288e59cfe85e109b63ed992a3a744e1 --- /dev/null +++ b/user-client/helper/redis_op_gc/consistency.rb @@ -0,0 +1,95 @@ +#!/usr/bin/env ruby + +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'optparse' +require_relative './garbage_collection' + +options = { 'output' => false, 'remove' => false, 'days' => 3 } + +optparse = OptionParser.new do |opts| + opts.banner = 'Usage: consistency [options]' + + help = "check alive more than number days, default is #{options['days']}" + opts.on('-dN', '--days=number', Integer, help) do |n| + options['days'] = n + end + + help = "output abnormal task to man_del.id and too_long.id, default is #{options['output']}" + opts.on('-o', false, help) do + options['output'] = true + end + + opts.on('-r', false, + "remove not exists task, default is #{options['remove']}") do + options['remove'] = true + end +end + +optparse.parse! +puts "options: #{options.inspect}" + +# redis hash key "queues/id2content" is used by taskqueue +# it records all "task id" and task's current redis key value +# like: +# 25536 +# {"add_time":1596876735.944146, "queue":"sched/vm-hi1620-2p8g--$USER/ready"} +cmd = "#{CMD_BASE} queues/id2content , hgetall" +result = `#{cmd}`.chomp +results = result.split("\n") + +normal = 0 +manual_deleted = [] +alive_too_long = [] + +# use AbnormalIdDetect to check the task: +# [MANUAL_DELETED, ALIVE_TOO_LOONG, else] +# if set remove option +# then use GarbagCollection to remove not exists task +# no define process to alive too long task now +i = 0 +task_num = results.size / 2 +while i < task_num + task_id = results[i] + task_content = results[i + 1] + abn = AbnormalIdDetect.new(task_id, options['days'], task_content) + status = abn.check + + case status.gsub(/=.*/, '') + when MANUAL_DELETED + manual_deleted << task_id + if options['remove'] + gc = GarbageCollection.new(task_id, options['days'], task_content) + gc.run + end + when ALIVE_TOO_LONG + alive_too_long << task_id + else + normal += 1 + end + + i += 2 + set_progress(i, task_num) +end + +# print task statistics information +puts '' +puts "Total #{task_num} task" +puts " - #{manual_deleted.size} manual deleted" +puts " - #{alive_too_long.size} alive more than #{options['days']} days" +puts " - #{normal} others" + +# output task id to file +# man_del.id for manual deleted task +# too_long.id for alive too long task +if options['output'] + File.open('man_del.id', 'w') do |f| + f.write(manual_deleted.join("\n")) + end + + File.open('too_long.id', 'w') do |f| + f.write(alive_too_long.join("\n")) + end +end diff --git a/user-client/helper/redis_op_gc/garbage_collection.rb b/user-client/helper/redis_op_gc/garbage_collection.rb new file mode 100644 index 0000000000000000000000000000000000000000..f8806ed5e0dc61227680ba80586aa01d9ea607e2 --- /dev/null +++ b/user-client/helper/redis_op_gc/garbage_collection.rb @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require_relative './abnormal_id_detect' + +# garbage collection for redis key (not exists field) +# - taskqueue hash key: "queues/id2content" +# - scheduler hash key: "sched/id2job" +class GarbageCollection + def initialize(id, days = 3, content = nil) + @task_id = id + @day_number = days + + @content = get_taskqueue_content4id(content, id) + end + + def run + abn = AbnormalIdDetect.new(@task_id, @day_number, @content.to_json) + result = abn.check + case result.gsub(/=.*/, '') + when MANUAL_DELETED + gc4taskqueue + gc4scheduler + GC4ID + "=#{@task_id}" + when ALIVE_TOO_LONG + 'No define process now' + else + GCN4ID + "=#{@task_id}" + end + end + + def gc4taskqueue + cmd = "#{CMD_BASE} queues/id2content , hdel #{@task_id}" + `#{cmd}`.chomp + end + + def gc4scheduler + cmd = "#{CMD_BASE} sched/id2job , hdel #{@task_id}" + `#{cmd}`.chomp + end +end diff --git a/user-client/jobs/iperf-pxe.yaml b/user-client/jobs/iperf-pxe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ccf5c9de6cac5767047a060c5aa07987b75daf0d --- /dev/null +++ b/user-client/jobs/iperf-pxe.yaml @@ -0,0 +1,59 @@ +--- + +#! jobs/iperf.yaml + +#this job yaml file is used to run qemu test by using pxe, in compass-ci/sparrow/6-test/qemu, +#usage like this: 0_addjob iperf-pxe.yaml + +suite: iperf +testcase: iperf +category: benchmark +runtime: 300 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +job_origin: jobs/iperf.yaml +testbox: vm-pxe-hi1620-2p8g-1 +arch: x86_64 +node_roles: server client + +#! include/category/benchmark +kmsg: +boot-time: +uptime: +suite: iperf +testcase: iperf +category: benchmark +runtime: 300 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +job_origin: jobs/iperf.yaml +testbox: vm-pxe-hi1620-2p8g-1 +arch: x86_64 +node_roles: server client + +#! include/category/benchmark +kmsg: +boot-time: +uptime: +iostat: +heartbeat: +vmstat: +numa-numastat: +numa-vmstat: +numa-meminfo: +proc-vmstat: +proc-stat: +meminfo: + +LKP_SERVER: 172.168.131.113 +LKP_CGI_PORT: 3000 +result_root: /result/iperf +LKP_DEBUG_PREFIX: bash -x diff --git a/user-client/jobs/iperf-sparrow.yaml b/user-client/jobs/iperf-sparrow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ab521eabd8f92f8044285e7620c1b112c38d389 --- /dev/null +++ b/user-client/jobs/iperf-sparrow.yaml @@ -0,0 +1,10 @@ +suite: iperf +category: benchmark +runtime: 300 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +node_roles: server client diff --git a/user-client/jobs/iperf-vm.yaml b/user-client/jobs/iperf-vm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..31c5a3a43f9139ddb4844ece77e264f8b957372a --- /dev/null +++ b/user-client/jobs/iperf-vm.yaml @@ -0,0 +1,40 @@ +--- + +#! jobs/iperf.yaml + +#this job yaml file is used to run qemu test by using kvm, in compass-ci/sparrow/6-test/qemu, +#usage like this: 0_addjob iperf-vm.yaml + +suite: iperf +testcase: iperf +category: benchmark +runtime: 300 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +job_origin: jobs/iperf.yaml +testbox: vm-hi1620-2p8g-1 +arch: x86_64 +node_roles: server client + +#! include/category/benchmark +kmsg: +boot-time: +uptime: +iostat: +heartbeat: +vmstat: +numa-numastat: +numa-vmstat: +numa-meminfo: +proc-vmstat: +proc-stat: +meminfo: + +LKP_SERVER: 172.168.131.113 +LKP_CGI_PORT: 3000 +result_root: /result/iperf +LKP_DEBUG_PREFIX: bash -x diff --git a/user-client/jobs/myjobs.yaml b/user-client/jobs/myjobs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73bbb632a77e832c9b0c3c3b044fb83adcf8a082 --- /dev/null +++ b/user-client/jobs/myjobs.yaml @@ -0,0 +1,2 @@ +#! jobs/myjobs.yaml +test: diff --git a/user-client/maintain/iBMC/README b/user-client/maintain/iBMC/README new file mode 100644 index 0000000000000000000000000000000000000000..e1ae32792983e60b278cb6d959d40bc49ebfdc59 --- /dev/null +++ b/user-client/maintain/iBMC/README @@ -0,0 +1,20 @@ +1. Information including hostname, IP and type of all servers in LAB shall be + recorded in file /etc/servers.info. Content below FYI: + p1-1 1.1.1.1 2288hv5 + p1-2 1.1.1.2 thaishan + ... + +2. Manager users' account and password and servers network configuration shall be + recorded in file /root/.config/iBMC/accnt.info. Content below FYI: + VAR1=VALUE1 + VAR2=VALUE2 + ... + +3. Important servers' IP or hostname shall be recorded in file + /etc/iBMC/.config/forbidden.list. Content below FYI: + p1-1 providing some service + 1.1.2.1 PXE Server + ... + +4. Copy script ibmcOper under /usr/bin/ and the use command ibmcOper to manage all the + servers in LAB when iBMC basic configuration is ready. diff --git a/user-client/maintain/iBMC/ibmcOper b/user-client/maintain/iBMC/ibmcOper new file mode 100755 index 0000000000000000000000000000000000000000..27058427348952180f306367ec7006a2551c04d5 --- /dev/null +++ b/user-client/maintain/iBMC/ibmcOper @@ -0,0 +1,249 @@ +#!/bin/bash + +source "/etc/iBMC/.config/accnt.info" +servers_info_file='/etc/servers.info' +forbidden_list_file='/etc/iBMC/.config/forbidden.list' +operations=( + 'ibmcMac' + 'busiMac' + 'unlink' + 'relink' + 'power' + 'reboot' + 'users' +) + +NUMBER_SERVERS=0 +declare -A SERVERS IP_SERVERS HOST_SERVERS TYPE_SERVERS + +read_servers_file() { + while read line + do + SERVERS[$NUMBER_SERVERS]="$line" + IP_SERVERS[$NUMBER_SERVERS]="$(echo $line |awk '{print $1}')" + HOST_SERVERS[$NUMBER_SERVERS]="$(echo $line |awk '{print $2}')" + TYPE_SERVERS[$NUMBER_SERVERS]="$(echo $line |awk '{print $3}')" + NUMBER_SERVERS=$((NUMBER_SERVERS + 1)) + done < "$servers_info_file" + NUMBER_SERVERS=$((NUMBER_SERVERS - 1)) +} + +read_servers_cmd() { + ([ 'on' == "$1" ] || [ 'off' == "$1" ]) && shift + [ -z "$1" ] && return -1 + + local server idx + idx=0 + for server in $@ + do + line=$(grep -w $server "$servers_info_file") + [ -z "$line" ] && continue + + SERVERS[$NUMBER_SERVERS]="$line" + IP_SERVERS[$NUMBER_SERVERS]="$(echo $line |awk '{print $1}')" + HOST_SERVERS[$NUMBER_SERVERS]="$(echo $line |awk '{print $2}')" + TYPE_SERVERS[$NUMBER_SERVERS]="$(echo $line |awk '{print $3}')" + NUMBER_SERVERS=$((NUMBER_SERVERS + 1)) + done + NUMBER_SERVERS=$((NUMBER_SERVERS - 1)) +} + +check_forbidden_server() { + grep -wq -E "$cur_ip|$cur_host" $forbidden_list_file && { + echo "Forbidden $cur_host/$cur_ip Operation! Thanks!" + return -1 + } + return 0 +} + +print_servers() { + for idx in $(seq 0 $NUMBER_SERVERS) + do + echo " + Index: $idx + IP: ${IP_SERVERS[$idx]} + HOST: ${HOST_SERVERS[$idx]} + TYPE: ${TYPE_SERVERS[$idx]}" + done +} + +get_cur_ibmc_NIC_mac() { + [ 'thaishan' != $cur_type ] && [ '2288hv5' != $cur_type ] && return + + local mac + [ 'thaishan' == $cur_type ] && { + mac=$(ipmitool -I lanplus -H $cur_ip -U $iBMC_user -P $iBMC_passwd lan print |grep 'MAC Address' |awk '{print $NF}') + echo "ThaiShan: $cur_host $mac" + } + [ '2288hv5' == $cur_type ] && { + mac=$(ipmitool -I lanplus -H $cur_ip -U $iBMC_user -P $iBMC_passwd lan print |grep 'MAC Address' |awk '{print $NF}') + echo "2288HV5: $cur_host $mac" + } +} + +get_cur_busi_NIC_mac() { + [ 'thaishan' != $cur_type ] && [ '2288hv5' != $cur_type ] && return + + local mac mac_list nic_idx + [ 'thaishan' == $cur_type ] && { + for nic_idx in {1..4} ; do + mac=$(ipmitool -I lanplus -H $cur_ip -U $iBMC_user -P $iBMC_passwd raw 0x30 0x90 0x01 0x03 0x0${nic_idx} 0x00\ + |awk '{print $5,$6,$7,$8,$9,$10}' |sed 's/\s/\-/g') + mac_list="$mac_list $mac" + done + echo "ThaiShan: $cur_host $mac_list" + } + [ '2288hv5' == $cur_type ] && { + for nic_idx in 1 2 ; do + mac=$(ipmitool -I lanplus -H $cur_ip -U $iBMC_user -P $iBMC_passwd raw 0x30 0x90 0x01 0x00 0x0${nic_idx} 0x00\ + |awk '{print $5,$6,$7,$8,$9,$10}' |sed 's/\s/\-/g') + mac_list="$mac_list $mac" + done + echo "2288HV5: $cur_host $mac_list" + } +} + +get_power_status() { + local status + + status=$(ipmitool -H $cur_ip -I lanplus -U $iBMC_user -P $iBMC_passwd power status |awk '{print $NF}') + [ 'on' != "$status" ] && [ 'off' != "$status" ] && { + echo "Get $cur_host Power Status Failed!" + return -1 + } + echo "$cur_host Power Status: $status" + [ 'on' == "$status" ] && return 0 + [ 'off' == "$status" ] && return -1 +} + +set_power_status() { + check_forbidden_server || return + + local new_status + new_status="$1" + [ -n "$new_status" ] || return + + ipmitool -H $cur_ip -I lanplus -U $iBMC_user -P $iBMC_passwd power $new_status +} + +check_power_status_on() { + get_power_status && return + set_power_status on +} + +link_ibmc() { + check_forbidden_server || return + check_power_status_on + + ipmitool -I lanplus -H $cur_ip -I lanplus -U $iBMC_user -P $iBMC_passwd sol deactivate || echo "reconnecting..." + [ -d "$HOME/log" ] || mkdir -p $HOME/log + ipmitool -I lanplus -H $cur_ip -I lanplus -U $iBMC_user -P $iBMC_passwd sol activate |tee -a $HOME/log/${cur_host}_$(date +'%d-%m-%Y').log +} + +unlink_ibmc() { + check_power_status_on + ipmitool -I lanplus -H $cur_ip -I lanplus -U $iBMC_user -P $iBMC_passwd sol deactivate +} + +reboot_server() { + check_forbidden_server || return + check_power_status_on + + ipmitool -I lanplus -H $cur_ip -I lanplus -U $iBMC_user -P $iBMC_passwd power reset +} + +check_ibmc_network() { + ping "$cur_host" -c 1 -W 1 >/dev/null 2>&1 +} + +get_users() { + echo "$cur_host Users List Below:" + ipmitool -I lanplus -H $cur_ip -U $iBMC_user -P $iBMC_passwd user list +} + +walk_servers_do() { + [ -z "$1" ] && return + + for idx in $(seq 0 $NUMBER_SERVERS) + do + cur_ip=${IP_SERVERS[$idx]} + cur_type=${TYPE_SERVERS[$idx]} + cur_host=${HOST_SERVERS[$idx]} + check_ibmc_network || { + echo "Unreachable $cur_host/$cur_ip!" + continue + } + eval "$@" + done +} + +[ -f "$servers_info_file" ] || exit +[ -f "$forbidden_list_file" ] || exit + +trap 'echo && echo User Cancelled! && exit' SIGINT + +oper=$1 +echo "operation: $oper" +( [ -z "$oper" ] || [ -z "$(echo ${operations[*]} |grep -w "$oper")" ] ) && { + echo -e "Unexpected iBMC operation!\nMaybe you can do: ${operations[*]}" + exit -1 +} +read_servers_cmd "${@:2}" || read_servers_file + +# here for program debugging +# print_servers + +[ 'ibmcMac' == "$oper" ] && { + echo + echo "iBMC NIC mac address:" + walk_servers_do get_cur_ibmc_NIC_mac + exit +} + +[ 'busiMac' == "$oper" ] && { + echo + echo "business NIC mac address:" + walk_servers_do get_cur_busi_NIC_mac + exit +} + +[ "power" == "$oper" ] && { + new_status=$2 + [ -z "$new_status" ] && { + walk_servers_do get_power_status + exit + } + [ 'on' != "$new_status" ] && [ 'off' != "$new_status" ] && exit + [ 'off' == "$new_status" ] && { + [ 0 -eq $NUMBER_SERVERS ] && walk_servers_do set_power_status $new_status + [ 0 -ne $NUMBER_SERVERS ] && echo "Failed! Please Specify only ONE server!" + } + [ 'on' == "$new_status" ] && walk_servers_do set_power_status $new_status + exit +} + +[ 'relink' == "$oper" ] && { + echo + [ 0 -eq $NUMBER_SERVERS ] && walk_servers_do link_ibmc + [ 0 -ne $NUMBER_SERVERS ] && echo "Failed! Please Specify only ONE server!" + exit +} + +[ 'unlink' == "$oper" ] && { + echo + walk_servers_do unlink_ibmc + exit +} + +[ 'reboot' == "$oper" ] && { + echo + [ 0 -eq $NUMBER_SERVERS ] && walk_servers_do reboot_server + [ 0 -ne $NUMBER_SERVERS ] && echo "Failed! Please Specify only ONE server!" + exit +} + +[ 'users' == "$oper" ] && { + echo + walk_servers_do get_users + exit +} diff --git a/user-client/maintain/walk-os-test/iperf-walk-os.yaml b/user-client/maintain/walk-os-test/iperf-walk-os.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2991743f140062b53cff09919a524ab0146ef1ca --- /dev/null +++ b/user-client/maintain/walk-os-test/iperf-walk-os.yaml @@ -0,0 +1,15 @@ +suite: iperf-walk-os-test +category: benchmark +runtime: 300 +cluster: cs-localhost +if role server: + iperf-server: +if role client: + iperf: + protocol: tcp +testbox: vm-hi1620-2p8g--USER +os: OS +os_arch: OS_ARCH +os_version: OS_VER +node_roles: server client +LKP_DEBUG_PREFIX: bash -x diff --git a/user-client/maintain/walk-os-test/my-qemu.sh b/user-client/maintain/walk-os-test/my-qemu.sh new file mode 100755 index 0000000000000000000000000000000000000000..2f20f67dcc242fef85d717a76f9b4e2919e61c49 --- /dev/null +++ b/user-client/maintain/walk-os-test/my-qemu.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +[[ $tbox_group ]] || +tbox_group=vm-hi1620-2p8g +export hostname=$tbox_group--$USER-$$ + +$CCI_SRC/providers/qemu.sh diff --git a/user-client/maintain/walk-os-test/walk-os-iperf-test b/user-client/maintain/walk-os-test/walk-os-iperf-test new file mode 100755 index 0000000000000000000000000000000000000000..4a3677e512539b132a723fc7b25f16c049beed86 --- /dev/null +++ b/user-client/maintain/walk-os-test/walk-os-iperf-test @@ -0,0 +1,62 @@ +#!/bin/bash -e + +# os test rounds +test_times=1 + +test_logs=walk-test.report +test_yaml=iperf-walk-os.yaml +test_os=( + 'openeuler aarch64 20' + 'openeuler aarch64 20.03' + 'centos aarch64 7.6' + 'centos aarch64 7.8' + 'centos aarch64 8.1' + 'debian aarch64 sid' + 'debian aarch64 10' +) + +trap 'echo User Cancelled! && exit' SIGINT + +sec2date() { + date -d@$1 +"%Y%m%d_%H:%M:%S" +} + +logging() { + if [ 'new' == "$1" ] ; then + printf "%-5s %-8s %-25s %-20s %-20s %-8s %-10s\n" ${@:2} > $test_logs + else + printf "%-5s %-8s %-25s %-20s %-20s %-8s %-10s\n" $@ >> $test_logs + fi +} + +create_test_yaml() { + local hwarch os os_arch os_ver cur_os + hwarch=$(arch) + cur_os="$@" + os=$(echo $cur_os |cut -d' ' -f1) + os_arch=$(echo $cur_os |cut -d' ' -f2) + os_ver=$(echo $cur_os |cut -d' ' -f3) + sed "s/USER/$USER/" $test_yaml > test.yaml + sed -i "s/OS_ARCH/$os_arch/" test.yaml + sed -i "s/OS_VER/$os_ver/" test.yaml + sed -i "s/OS/$os/" test.yaml + echo "$hwarch $os/$os_arch/$os_ver" +} + +job_test() { + local job_id bef aft + job_id=$(submit test.yaml |awk '{print $NF}') + bef=$(date +%s) + ./my-qemu.sh + aft=$(date +%s) + logging "$1" "$2" "$3" "$(sec2date $bef)" "$(sec2date $aft)" "$((aft - bef))" "$job_id" +} + +logging new "Index" "hwArch" "OS" "Begin" "End" "Cost/sec" "Job_ID" +for round in $(seq $test_times) +do + for idx in "${!test_os[@]}" + do + job_test $round $(create_test_yaml "${test_os[$idx]}") + done +done diff --git a/user-client/src/lkp.rb b/user-client/src/lkp.rb new file mode 100644 index 0000000000000000000000000000000000000000..4eb5ef50ab4da7c654d17915459ed3de7dc5391c --- /dev/null +++ b/user-client/src/lkp.rb @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require "#{File.dirname(__FILE__)}/lkp_client" +require "#{File.dirname(__FILE__)}/lkp_server_info" + +if ARGV.size != 2 + puts 'cmd like: lkp queue myjobs.yaml' +else + server = LkpServerInfo.new + client = LkpClient.new(server) + + client.basic_authorization + client.cmd("lkp #{ARGV[0]} #{ARGV[1]}") + respon = client.run + + puts "add job as jobid = #{respon}" +end diff --git a/user-client/src/lkp_client.rb b/user-client/src/lkp_client.rb new file mode 100644 index 0000000000000000000000000000000000000000..7820dea4a5975ef9f5916ea9dcca67a208a5009e --- /dev/null +++ b/user-client/src/lkp_client.rb @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'yaml' +require 'json' +require 'base64' +require 'rest-client' + +#:nodoc: +class LkpClient + attr_accessor :server + + def initialize(server) + @server = server + end + + def cmd(cmd) + cmdlist = cmd.split(' ') + @operate = cmdlist[1] + @path = cmdlist[2] + end + + def basic_authorization + user_name = 'username' + user_pass = 'password' + + @auth = 'Basic ' + Base64.encode64("#{user_name}:#{user_pass}").chomp + resource = RestClient::Resource.new("http://#{@server.host}:#{@server.port}/", + { headers: { 'Authorization' => @auth } }) + resource.get + end + + def trans(file_path) + all_lines = '' + File.open(file_path) do |file| + lines = file.readlines + lines.each do |line| + line.gsub!(/^#(.*)\n$/, ":#\\1: \n") + end + all_lines = lines.join + end + + yaml = YAML.parse(all_lines) + yaml.to_ruby.to_json + end + + def http_post_cmd + resource = RestClient::Resource.new("http://#{@server.host}:#{@server.port}/submit_job", + { headers: { 'Authorization' => @auth } }) + resource.post(trans(@path)) + end + + def http_get_cmd + resource = RestClient::Resource.new("http://#{@server.host}:#{@server.port}/query_job", + { headers: { 'Authorization' => @auth, + :jobid => '@path' } }) + resource.get + end + + def run + case @operate + when 'queue' + http_post_cmd + when 'result' + http_get_cmd + else + raise "No this operate: #{@operate}" + end + end +end diff --git a/user-client/src/lkp_server_info.rb b/user-client/src/lkp_server_info.rb new file mode 100644 index 0000000000000000000000000000000000000000..6d330bccf24d1a9afd160c0eff964a748b3f5444 --- /dev/null +++ b/user-client/src/lkp_server_info.rb @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +require 'net/http' +#:nodoc: +class LkpServerInfo + attr_accessor :host, :port + + def initialize(host = '127.0.0.1', port = '3000') + @host = host + @port = port + end + + def connect_able + url = URI("http://#{@host}:#{@port}") + http = Net::HTTP.new(url.host, url.port) + + begin + response = http.get(url) + case response.code + when '200', '401' + true + else + false + end + rescue exception + false + end + end +end