diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a0165d8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +config.json +testnet.json +ether-pool +ether-pool.exe +logs +tools diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..94a0453 --- /dev/null +++ b/LICENSE @@ -0,0 +1,621 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS diff --git a/README.md b/README.md new file mode 100644 index 0000000..7187ec2 --- /dev/null +++ b/README.md @@ -0,0 +1,291 @@ +# go-ethereum-pool +## High Performance Ethereum Mining Pool + +![Miner's stats page](https://15254b2dcaab7f5478ab-24461f391e20b7336331d5789078af53.ssl.cf1.rackcdn.com/ethereum.vanillaforums.com/editor/pe/cf77cki0pjpt.png) + +### Features + +* Highly available mining endpoint module +* Payouts and block unlocking (maturity) module +* Configurable payouts period and balance threshold +* PROP payouts (miners are simply paid out when a block is found) +* Detailed block stats with luck percentage and full reward +* Failover geth instances: geth high availability built in +* Strict policy module (banning strategies using ipset/iptables) +* Designed for 100% distributed setup of all modules +* Modern beautiful Ember.js frontend +* Separate stats for workers: can highlight timed-out workers so miners can perform maintenance of rigs +* JSON-API for stats, miners can use for rigs maintenance automation (rig rebooting for example ) +Also it's compatible with my *ether-proxy* solo/pool proxy solution. + +*Written in Go it's a rocket highly concurrent and low RAM consuming piece of code* + +### Building on Linux + +Dependencies: + + * go >= 1.4 + * geth + * redis-server + * nodejs + * nginx + +**I highly recommend to use Ubuntu 14.04 LTS.** + +First of all you must install [go-ethereum](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Ubuntu). + +Export GOPATH: + + export GOPATH=$HOME/go + +Install required packages: + + go get github.com/ethereum/ethash + go get github.com/ethereum/go-ethereum/common + go get github.com/gorilla/mux + go get github.com/yvasiyarov/gorelic + +Compile: + + go build -o ether-pool main.go + +Install redis-server and all software you need too. +Install nodejs, I suggest to use >= 4.x LTS version from https://github.com/nodesource/distributions or from your Linux distribution. + +### Building on Windows + +It's a little bit crazy to run production pool on this platform, but you can follow +[geth building instructions](https://github.com/ethereum/go-ethereum/wiki/Installation-instructions-for-Windows) and compile pool this way. +Use some cloud Redis provider or give a try to https://github.com/MSOpenTech/redis/releases. + +### Building Frontend + +Frontend is a single-page Ember.js application. It polls API of the pool +to render pool stats to miners. + + cd www + +Change ApiUrl: '//example.net/' to match your domain name. + + npm install -g ember-cli@2.4.3 + npm install -g bower + npm install + bower install + ./build.sh + +Configure nginx to serve API on /api subdirectory. +Configure your nginx instance to serve www/dist as static website. + +#### Serving API using nginx + +Create an upstream for API: + + upstream api { + server 127.0.0.1:8080; + } + +and add this setting after location /: + + location /api { + proxy_pass http://api; + } + +You can customize layout and other stuff using built-in web server with live reload: + + ember server --port 8082 --environment development + +#### Customization + +Check out www/app/templates directory and edit these templates +in order to add your own branding and contacts. + +### Configuration + +Configuration is actually simple, just read it twice and think twice before changing defaults. + +**Don't copy config directly from this manual, use example config from package, +Otherwise you will get errors on start because JSON can't contain comments actually.** + +```javascript +{ + // Set to a number of CPU threads of your server + "threads": 2, + // Prefix for keys in redis store + "coin": "eth", + // Give unique name to each instance + "name": "main", + + "proxy": { + "enabled": true, + + // Bind mining endpoint to this IP:PORT + "listen": "0.0.0.0:8546", + + // Allow only this header and body size of HTTP request from miners + "limitHeadersSize": 1024, + "limitBodySize": 256, + + /* Use it if you are behind CloudFlare (bad idea) or behind http-reverse + proxy to enable IP detection from X-Forwarded-For header. + Advanced users only. It's tricky to make it right and secure. + */ + "behindReverseProxy": false, + + // Try to get new job from geth in this interval + "blockRefreshInterval": "120ms", + "stateUpdateInterval": "3s", + // Require this share difficulty from miners + "difficulty": 2000000000, + + "hashrateExpiration": "30m", + + /* Reply error to miner instead of job if redis is unavailable. + Should save electricity to miners if pool is sick and they didn't set up failovers. + */ + "healthCheck": true, + // Mark pool sick after this number of redis failures. + "maxFails": 100, + + "policy": { + "workers": 8, + "resetInterval": "60m", + "refreshInterval": "1m", + + "banning": { + "enabled": false, + /* Name of ipset for banning. + Check http://ipset.netfilter.org/ documentation. + */ + "ipset": "blacklist", + // Remove ban after this amount of time + "timeout": 1800, + // Percent of invalid shares from all shares to ban miner + "invalidPercent": 30, + // Check after after miner submitted this number of shares + "checkThreshold": 30, + // Bad miner after this number of malformed requests + "malformedLimit": 5 + } + } + }, + + // Provides JSON data for frontend which is static website + "api": { + "enabled": true, + + /* If you are running API node in a distributed environment where this module + is reading data from redis writeable slave, enable this option. + Only redis writeable slave will work properly if you are distributing using redis slaves. + Don't distribute! + */ + "purgeOnly": false, + "listen": "0.0.0.0:8080", + // Collect miners stats (hashrate, ...) in this interval + "statsCollectInterval": "5s", + + // Fast hashrate estimation window for each miner from it's shares + "hashrateWindow": "30m", + // Long and precise hashrate from shares, 3h is cool, keep it + "hashrateLargeWindow": "3h", + // Max number of payments to display in frontend + "payments": 50, + // Max numbers of blocks to display in frontend + "blocks": 50 + }, + + // Check health of each geth node in this interval + "upstreamCheckInterval": "5s", + + /* List of geth nodes to poll for new jobs. Pool will try to get work from + first alive one and check in background for failed to back up. + Current block template of the pool is always cached in RAM indeed. + */ + "upstream": [ + { + "name": "main", + "url": "http://127.0.0.1:8545", + "timeout": "10s" + }, + { + "name": "backup", + "url": "http://127.0.0.2:8545", + "timeout": "10s" + } + ], + + // This is standard redis connection options + "redis": { + // Where your redis instance is listening for commands + "endpoint": "127.0.0.1:6379", + "poolSize": 8, + "database": 0, + /* Generate and specify very strong password for in redis + configuration file and specify it here. + This is done using the requirepass directive in the configuration file. + */ + "password": "secret" + }, + + // This module periodically credits coins to miners + "unlocker": { + "enabled": false, + // Pool fee percentage + "poolFee": 1.0, + // Unlock only if this number of blocks mined back + "depth": 120, + // Simply don't touch this option + "immatureDepth": 20, + // Run unlocker in this interval + "interval": "10m", + // Geth instance node rpc endpoint for unlocking blocks + "daemon": "http://127.0.0.1:8545", + // Rise error if can't reach geth in this amount of time + "timeout": "10s" + }, + + // Paying out miners using this module + "payouts": { + "enabled": false, + // Run payouts in this interval + "interval": "12h", + // Geth instance node rpc endpoint for payouts processing + "daemon": "http://127.0.0.1:8545", + // Rise error if can't reach geth in this amount of time + "timeout": "10s", + // Address with pool balance + "address": "0x0", + // Gas amount and price for payout tx + "gas": "21000", + "gasPrice": "50000000000", + // Seend payment only if miner's balance is >= 0.5 Ether + "threshold": 500000000 + }, +} +``` + +If you are distributing your pool deployment to several servers or processes, +create several configs and disable unneeded modules on each server. +This is very advanced, better don't distribute to several servers until you really need it. + +I recommend this deployment strategy: + +* Mining instance - 1x (it depends, you can run one node for EU, one for US, one for Asia) +* Unlocker and payouts instance - 1x each (strict!) +* API instance - 1x + +### Notes + +Unlocking and payouts are sequential, 1st tx go, 2nd waiting for 1st to confirm and so on. +You can disable that in code. Also, keep in mind that unlocking and payouts will be stopped in case of any backend or geth failure. +You must restart module if you see such errors with *suspended* word, so I recommend to run unlocker and payouts in a separate processes. +Don't run payouts and unlocker as part of mining node. + +### Credits + +Made by sammy007. + +### Donations + +* **ETH**: [0xb85150eb365e7df0941f0cf08235f987ba91506a](https://etherchain.org/account/0xb85150eb365e7df0941f0cf08235f987ba91506a) + +* **BTC**: [1PYqZATFuYAKS65dbzrGhkrvoN9au7WBj8](https://blockchain.info/address/1PYqZATFuYAKS65dbzrGhkrvoN9au7WBj8) diff --git a/api/server.go b/api/server.go new file mode 100644 index 0000000..94eb06c --- /dev/null +++ b/api/server.go @@ -0,0 +1,277 @@ +package api + +import ( + "encoding/json" + "github.com/gorilla/mux" + "log" + "net/http" + "sync" + "sync/atomic" + "time" + + "../storage" + "../util" +) + +type ApiConfig struct { + Enabled bool `json:"enabled"` + Listen string `json:"listen"` + StatsCollectInterval string `json:"statsCollectInterval"` + HashrateWindow string `json:"hashrateWindow"` + HashrateLargeWindow string `json:"hashrateLargeWindow"` + Payments int64 `json:"payments"` + Blocks int64 `json:"blocks"` + PurgeOnly bool `json:"purgeOnly"` +} + +type ApiServer struct { + config *ApiConfig + backend *storage.RedisClient + hashrateWindow time.Duration + hashrateLargeWindow time.Duration + stats atomic.Value + miners map[string]*Entry + minersMu sync.RWMutex +} + +type Entry struct { + stats map[string]interface{} + updatedAt int64 +} + +func NewApiServer(cfg *ApiConfig, backend *storage.RedisClient) *ApiServer { + hashrateWindow, _ := time.ParseDuration(cfg.HashrateWindow) + hashrateLargeWindow, _ := time.ParseDuration(cfg.HashrateLargeWindow) + return &ApiServer{ + config: cfg, + backend: backend, + hashrateWindow: hashrateWindow, + hashrateLargeWindow: hashrateLargeWindow, + miners: make(map[string]*Entry), + } +} + +func (s *ApiServer) Start() { + if s.config.PurgeOnly { + log.Printf("Starting API in purge-only mode") + } else { + log.Printf("Starting API on %v", s.config.Listen) + } + + statsIntv, _ := time.ParseDuration(s.config.StatsCollectInterval) + statsTimer := time.NewTimer(statsIntv) + log.Printf("Set stats collect interval to %v", statsIntv) + + // Running only to flush stale data + if s.config.PurgeOnly { + s.purgeStale() + } else { + // Immediately collect stats + s.collectStats() + } + + go func() { + for { + select { + case <-statsTimer.C: + if s.config.PurgeOnly { + s.purgeStale() + } else { + s.collectStats() + } + statsTimer.Reset(statsIntv) + } + } + }() + + if !s.config.PurgeOnly { + s.listen() + } +} + +func (s *ApiServer) listen() { + r := mux.NewRouter() + r.HandleFunc("/api/stats", s.StatsIndex) + r.HandleFunc("/api/miners", s.MinersIndex) + r.HandleFunc("/api/blocks", s.BlocksIndex) + r.HandleFunc("/api/payments", s.PaymentsIndex) + r.HandleFunc("/api/accounts/{login:0x[0-9a-f]{40}}", s.AccountIndex) + r.NotFoundHandler = http.HandlerFunc(notFound) + err := http.ListenAndServe(s.config.Listen, r) + if err != nil { + log.Fatalf("Failed to start API: %v", err) + } +} + +func notFound(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusNotFound) +} + +func (s *ApiServer) purgeStale() { + total, err := s.backend.FlushStaleStats(s.hashrateLargeWindow) + if err != nil { + log.Printf("Failed to purge stale data from backend: ", err) + } else { + log.Printf("Purged stale stats from backend, %v shares affected", total) + } +} + +func (s *ApiServer) collectStats() { + now := util.MakeTimestamp() + stats, err := s.backend.CollectStats(s.hashrateWindow, s.config.Blocks, s.config.Payments) + if err != nil { + log.Printf("Failed to fetch stats from backend: %v", err) + } else { + log.Printf("Stats collection finished %v", util.MakeTimestamp()-now) + s.stats.Store(stats) + } +} + +func (s *ApiServer) StatsIndex(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + + reply := make(map[string]interface{}) + nodes, err := s.backend.GetNodeStates() + if err != nil { + log.Printf("Failed to get nodes stats from backend: %v", err) + } + reply["nodes"] = nodes + + stats := s.getStats() + if stats != nil { + reply["now"] = util.MakeTimestamp() + reply["stats"] = stats["stats"] + reply["hashrate"] = stats["hashrate"] + reply["minersTotal"] = stats["minersTotal"] + reply["maturedTotal"] = stats["maturedTotal"] + reply["immatureTotal"] = stats["immatureTotal"] + reply["candidatesTotal"] = stats["candidatesTotal"] + } + + err = json.NewEncoder(w).Encode(reply) + if err != nil { + log.Println("Error serializing API response: ", err) + } +} + +func (s *ApiServer) MinersIndex(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + + reply := make(map[string]interface{}) + stats := s.getStats() + if stats != nil { + reply["now"] = util.MakeTimestamp() + reply["miners"] = stats["miners"] + reply["hashrate"] = stats["hashrate"] + reply["minersTotal"] = stats["minersTotal"] + } + + err := json.NewEncoder(w).Encode(reply) + if err != nil { + log.Println("Error serializing API response: ", err) + } +} + +func (s *ApiServer) BlocksIndex(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + + reply := make(map[string]interface{}) + stats := s.getStats() + if stats != nil { + reply["matured"] = stats["matured"] + reply["maturedTotal"] = stats["maturedTotal"] + reply["immature"] = stats["immature"] + reply["immatureTotal"] = stats["immatureTotal"] + reply["candidates"] = stats["candidates"] + reply["candidatesTotal"] = stats["candidatesTotal"] + } + + err := json.NewEncoder(w).Encode(reply) + if err != nil { + log.Println("Error serializing API response: ", err) + } +} + +func (s *ApiServer) PaymentsIndex(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + + reply := make(map[string]interface{}) + stats := s.getStats() + if stats != nil { + reply["payments"] = stats["payments"] + reply["paymentsTotal"] = stats["paymentsTotal"] + } + + err := json.NewEncoder(w).Encode(reply) + if err != nil { + log.Println("Error serializing API response: ", err) + } +} + +func (s *ApiServer) AccountIndex(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + + login := mux.Vars(r)["login"] + reply, err := s.backend.GetMinerStats(login, s.config.Payments) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + log.Printf("Failed to fetch stats from backend: %v", err) + return + } + + s.minersMu.Lock() + defer s.minersMu.Unlock() + + entry, ok := s.miners[login] + now := util.MakeTimestamp() + // Refresh stats if stale + if !ok || entry.updatedAt < now-5000 { + stats, err := s.backend.CollectWorkersStats(s.hashrateWindow, s.hashrateLargeWindow, login) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + log.Printf("Failed to fetch stats from backend: %v", err) + return + } + entry = &Entry{stats: stats, updatedAt: now} + s.miners[login] = entry + } + + reply["workers"] = entry.stats["workers"] + reply["workersTotal"] = entry.stats["workersTotal"] + reply["workersOnline"] = entry.stats["workersOnline"] + reply["workersOffline"] = entry.stats["workersOffline"] + reply["hashrate"] = entry.stats["hashrate"] + reply["currentHashrate"] = entry.stats["currentHashrate"] + reply["pageSize"] = s.config.Payments + + w.WriteHeader(http.StatusOK) + err = json.NewEncoder(w).Encode(reply) + if err != nil { + log.Println("Error serializing API response: ", err) + } +} + +func (s *ApiServer) getStats() map[string]interface{} { + stats := s.stats.Load() + if stats != nil { + return stats.(map[string]interface{}) + } + return nil +} diff --git a/build.sh b/build.sh new file mode 100755 index 0000000..bf3b63b --- /dev/null +++ b/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +go build -o ether-pool main.go diff --git a/config.example.json b/config.example.json new file mode 100644 index 0000000..9aa985d --- /dev/null +++ b/config.example.json @@ -0,0 +1,93 @@ +{ + "threads": 2, + "coin": "eth", + "name": "main", + + "proxy": { + "enabled": true, + "listen": "0.0.0.0:8546", + "limitHeadersSize": 1024, + "limitBodySize": 256, + "behindReverseProxy": false, + "blockRefreshInterval": "120ms", + "stateUpdateInterval": "3s", + "difficulty": 2000000000, + "hashrateExpiration": "30m", + + "healthCheck": true, + "maxFails": 100, + + "policy": { + "workers": 8, + "resetInterval": "60m", + "refreshInterval": "1m", + + "banning": { + "enabled": true, + "ipset": "blacklist", + "timeout": 1800, + "invalidPercent": 30, + "checkThreshold": 30, + "malformedLimit": 5 + } + } + }, + + "api": { + "enabled": true, + "purgeOnly": false, + "listen": "0.0.0.0:8080", + "statsCollectInterval": "5s", + "hashrateWindow": "30m", + "hashrateLargeWindow": "3h", + "payments": 30, + "blocks": 50 + }, + + "upstreamCheckInterval": "5s", + "upstream": [ + { + "name": "main", + "url": "http://127.0.0.1:8545", + "timeout": "10s" + }, + { + "name": "backup", + "url": "http://127.0.0.2:8545", + "timeout": "10s" + } + ], + + "redis": { + "endpoint": "127.0.0.1:6379", + "poolSize": 5, + "database": 0, + "password": "secret" + }, + + "unlocker": { + "enabled": false, + "poolFee": 1.0, + "depth": 120, + "immatureDepth": 20, + "interval": "10m", + "daemon": "http://127.0.0.1:8545", + "timeout": "10s" + }, + + "payouts": { + "enabled": false, + "interval": "120m", + "daemon": "http://127.0.0.1:8545", + "timeout": "10s", + "address": "0x0", + "gas": "21000", + "gasPrice": "50000000000", + "threshold": 500000000 + }, + + "newrelicEnabled": false, + "newrelicName": "MyEtherProxy", + "newrelicKey": "SECRET_KEY", + "newrelicVerbose": false +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..7801c72 --- /dev/null +++ b/main.go @@ -0,0 +1,102 @@ +package main + +import ( + "encoding/json" + "log" + "os" + "path/filepath" + "runtime" + + "./api" + "./payouts" + "./proxy" + "./storage" + + "github.com/yvasiyarov/gorelic" +) + +var cfg proxy.Config +var backend *storage.RedisClient + +func startProxy() { + s := proxy.NewProxy(&cfg, backend) + s.Start() +} + +func startApi() { + s := api.NewApiServer(&cfg.Api, backend) + s.Start() +} + +func startBlockUnlocker() { + u := payouts.NewBlockUnlocker(&cfg.BlockUnlocker, backend) + u.Start() +} + +func startPayoutsProcessor() { + u := payouts.NewPayoutsProcessor(&cfg.Payouts, backend) + u.Start() +} + +func startNewrelic() { + if cfg.NewrelicEnabled { + nr := gorelic.NewAgent() + nr.Verbose = cfg.NewrelicVerbose + nr.NewrelicLicense = cfg.NewrelicKey + nr.NewrelicName = cfg.NewrelicName + nr.Run() + } +} + +func readConfig(cfg *proxy.Config) { + configFileName := "config.json" + if len(os.Args) > 1 { + configFileName = os.Args[1] + } + configFileName, _ = filepath.Abs(configFileName) + log.Printf("Loading config: %v", configFileName) + + configFile, err := os.Open(configFileName) + if err != nil { + log.Fatal("File error: ", err.Error()) + } + defer configFile.Close() + jsonParser := json.NewDecoder(configFile) + if err := jsonParser.Decode(&cfg); err != nil { + log.Fatal("Config error: ", err.Error()) + } +} + +func main() { + readConfig(&cfg) + + if cfg.Threads > 0 { + runtime.GOMAXPROCS(cfg.Threads) + log.Printf("Running with %v threads", cfg.Threads) + } + + startNewrelic() + + backend = storage.NewRedisClient(&cfg.Redis, cfg.Coin) + pong, err := backend.Check() + if err != nil { + log.Printf("Can't establish connection to backend: %v", err) + } else { + log.Printf("Backend check reply: %v", pong) + } + + if cfg.Proxy.Enabled { + go startProxy() + } + if cfg.Api.Enabled { + go startApi() + } + if cfg.BlockUnlocker.Enabled { + go startBlockUnlocker() + } + if cfg.Payouts.Enabled { + go startPayoutsProcessor() + } + quit := make(chan bool) + <-quit +} diff --git a/payouts/payer.go b/payouts/payer.go new file mode 100644 index 0000000..af66de1 --- /dev/null +++ b/payouts/payer.go @@ -0,0 +1,135 @@ +package payouts + +import ( + "log" + "math/big" + "time" + + "../rpc" + "../storage" + + "github.com/ethereum/go-ethereum/common" +) + +type PayoutsConfig struct { + Enabled bool `json:"enabled"` + Interval string `json:"interval"` + Daemon string `json:"daemon"` + Timeout string `json:"timeout"` + Address string `json:"address"` + Gas string `json:"gas"` + GasPrice string `json:"gasPrice"` + // In Shannon + Threshold int64 `json:"threshold"` +} + +func (self PayoutsConfig) GasHex() string { + x := common.String2Big(self.Gas) + return common.BigToHash(x).Hex() +} + +func (self PayoutsConfig) GasPriceHex() string { + x := common.String2Big(self.GasPrice) + return common.BigToHash(x).Hex() +} + +type PayoutsProcessor struct { + config *PayoutsConfig + backend *storage.RedisClient + rpc *rpc.RPCClient + halt bool +} + +func NewPayoutsProcessor(cfg *PayoutsConfig, backend *storage.RedisClient) *PayoutsProcessor { + u := &PayoutsProcessor{config: cfg, backend: backend} + u.rpc = rpc.NewRPCClient("PayoutsProcessor", cfg.Daemon, cfg.Timeout) + return u +} + +func (u *PayoutsProcessor) Start() { + log.Println("Starting payouts processor") + intv, _ := time.ParseDuration(u.config.Interval) + timer := time.NewTimer(intv) + log.Printf("Set block payout interval to %v", intv) + + // Immediately process payouts after start + u.process() + timer.Reset(intv) + + go func() { + for { + select { + case <-timer.C: + u.process() + timer.Reset(intv) + } + } + }() +} + +func (u *PayoutsProcessor) process() { + if u.halt { + log.Println("Payments suspended due to last critical error") + return + } + mustPay := 0 + minersPaid := 0 + totalAmount := big.NewInt(0) + payees, err := u.backend.GetPayees() + if err != nil { + log.Printf("Error while retrieving payees from backend: %v", err) + return + } + + for _, login := range payees { + amount, _ := u.backend.GetBalance(login) + if amount <= 0 { + continue + } + + gweiAmount := big.NewInt(amount) + if !u.reachedThreshold(gweiAmount) { + continue + } + mustPay++ + + // Gwei^2 = Wei + weiAmount := gweiAmount.Mul(gweiAmount, common.Shannon) + value := common.BigToHash(weiAmount).Hex() + txHash, err := u.rpc.SendTransaction(u.config.Address, login, u.config.GasHex(), u.config.GasPriceHex(), value) + if err != nil { + log.Printf("Failed to send payment: %v", err) + u.halt = true + break + } + minersPaid++ + totalAmount.Add(totalAmount, big.NewInt(amount)) + log.Printf("Paid %v Shannon to %v, TxHash: %v", amount, login, txHash) + + err = u.backend.UpdateBalance(login, txHash, amount) + if err != nil { + log.Printf("DANGER: Failed to update balance for %v with %v. TX: %v. Error is: %v", login, amount, txHash, err) + u.halt = true + return + } + // Wait for TX confirmation before further payouts + for { + log.Printf("Waiting for TX to get confirmed: %v", txHash) + time.Sleep(15 * time.Second) + receipt, err := u.rpc.GetTxReceipt(txHash) + if err != nil { + log.Printf("Failed to get tx receipt for %v: %v", txHash, err) + } + if receipt != nil { + break + } + } + log.Printf("Payout TX confirmed: %v", txHash) + } + log.Printf("Paid total %v Shannon to %v of %v payees", totalAmount, minersPaid, mustPay) +} + +func (self PayoutsProcessor) reachedThreshold(amount *big.Int) bool { + x := big.NewInt(self.config.Threshold).Cmp(amount) + return x < 0 // Threshold is less than amount +} diff --git a/payouts/unlocker.go b/payouts/unlocker.go new file mode 100644 index 0000000..b34f818 --- /dev/null +++ b/payouts/unlocker.go @@ -0,0 +1,460 @@ +package payouts + +import ( + "fmt" + "log" + "math/big" + "strconv" + "strings" + "time" + + "../rpc" + "../storage" + "../util" + + "github.com/ethereum/go-ethereum/common" +) + +type UnlockerConfig struct { + Enabled bool `json:"enabled"` + PoolFee float64 `json:"poolFee"` + Depth int64 `json:"depth"` + ImmatureDepth int64 `json:"immatureDepth"` + Interval string `json:"interval"` + Daemon string `json:"daemon"` + Timeout string `json:"timeout"` +} + +var constRewardInEther = new(big.Int).SetInt64(5) +var constReward = new(big.Int).Mul(constRewardInEther, common.Ether) +var uncleReward = new(big.Int).Div(constReward, new(big.Int).SetInt64(32)) + +type BlockUnlocker struct { + config *UnlockerConfig + backend *storage.RedisClient + rpc *rpc.RPCClient + halt bool +} + +func NewBlockUnlocker(cfg *UnlockerConfig, backend *storage.RedisClient) *BlockUnlocker { + if cfg.Depth < 10 { + log.Fatalf("Block maturity depth can't be < 10, your depth is %v", cfg.Depth) + } + if cfg.ImmatureDepth < 10 { + log.Fatalf("Immature depth can't be < 10, your depth is %v", cfg.ImmatureDepth) + } + u := &BlockUnlocker{config: cfg, backend: backend} + u.rpc = rpc.NewRPCClient("BlockUnlocker", cfg.Daemon, cfg.Timeout) + return u +} + +func (u *BlockUnlocker) Start() { + log.Println("Starting block unlocker") + intv, _ := time.ParseDuration(u.config.Interval) + timer := time.NewTimer(intv) + log.Printf("Set block unlock interval to %v", intv) + + // Immediately unlock after start + u.unlockPendingBlocks() + u.unlockAndCreditMiners() + timer.Reset(intv) + + go func() { + for { + select { + case <-timer.C: + u.unlockPendingBlocks() + u.unlockAndCreditMiners() + timer.Reset(intv) + } + } + }() +} + +type UnlockResult struct { + maturedBlocks []*storage.BlockData + orphanedBlocks []*storage.BlockData + orphans int + uncles int + blocks int +} + +/* FIXME: Geth does not provide consistent state when you need both new height and new job, + * so in redis I am logging just what I have in a pool state on the moment when block found. + * Having very likely incorrect height in database results in a weird block unlocking scheme, + * when I have to check what the hell we actually found and traversing all the blocks with height-N and htight+N + * to make sure we will find it. We can't rely on block height here, it's just a reference point. + * You can say I can search with block hash, but we don't know block hash of submitted block until we actually found + * it traversing all the blocks around our height. + * ISSUE: https://github.com/ethereum/go-ethereum/issues/2333 + */ +func (u *BlockUnlocker) unlockCandidates(candidates []*storage.BlockData) (*UnlockResult, error) { + var maturedBlocks []*storage.BlockData + var orphanedBlocks []*storage.BlockData + blocksUnlocked := 0 + unclesUnlocked := 0 + orphans := 0 + + // Data row is: "height:nonce:powHash:mixDigest:timestamp:diff:totalShares" + for _, candidate := range candidates { + block, err := u.rpc.GetBlockByHeight(candidate.Height) + if err != nil { + return nil, fmt.Errorf("Error while retrieving block %v from node: %v", candidate.Height, err) + } + if block == nil { + return nil, fmt.Errorf("Error while retrieving block %v from node, wrong node height", candidate.Height) + } + + if block.Nonce == candidate.Nonce { + blocksUnlocked++ + err = u.handleCandidate(block, candidate) + if err != nil { + return nil, err + } + maturedBlocks = append(maturedBlocks, candidate) + log.Printf("Mature block %v with %v tx, hash: %v", candidate.Height, len(block.Transactions), block.Hash[0:8]) + } else { + // Temporarily mark as lost + orphan := true + log.Printf("Probably uncle block %v with nonce: %v", candidate.Height, candidate.Nonce) + + /* Search for block that can include this one as uncle. + * Also we are searching for a normal block with wrong height here by traversing 16 blocks back and forward. + */ + for i := int64(-16); i < 16; i++ { + nephewHeight := candidate.Height + i + nephewBlock, err := u.rpc.GetBlockByHeight(nephewHeight) + if err != nil { + log.Printf("Error while retrieving block %v from node: %v", nephewHeight, err) + return nil, err + } + if nephewBlock == nil { + return nil, fmt.Errorf("Error while retrieving block %v from node, wrong node height", nephewHeight) + } + + // Check incorrect block height + if candidate.Nonce == nephewBlock.Nonce { + orphan = false + blocksUnlocked++ + err = u.handleCandidate(nephewBlock, candidate) + if err != nil { + return nil, err + } + rightHeight, err := strconv.ParseInt(strings.Replace(nephewBlock.Number, "0x", "", -1), 16, 64) + if err != nil { + u.halt = true + log.Printf("Can't parse block number: %v", err) + return nil, err + } + log.Printf("Block %v has incorrect height, correct height is %v", candidate.Height, rightHeight) + maturedBlocks = append(maturedBlocks, candidate) + log.Printf("Mature block %v with %v tx, hash: %v", candidate.Height, len(block.Transactions), block.Hash[0:8]) + break + } + + if len(nephewBlock.Uncles) == 0 { + continue + } + + // Trying to find uncle in current block during our forward check + for uncleIndex, uncleHash := range nephewBlock.Uncles { + reply, err := u.rpc.GetUncleByBlockNumberAndIndex(nephewHeight, uncleIndex) + if err != nil { + return nil, fmt.Errorf("Error while retrieving block %v from node: %v", uncleHash, err) + } + if reply == nil { + return nil, fmt.Errorf("Error while retrieving block %v from node, wrong node height", nephewHeight) + } + + // Found uncle + if reply.Nonce == candidate.Nonce { + orphan = false + unclesUnlocked++ + uncleHeight, err := strconv.ParseInt(strings.Replace(reply.Number, "0x", "", -1), 16, 64) + if err != nil { + u.halt = true + log.Printf("Can't parse uncle block number: %v", err) + return nil, err + } + reward := getUncleReward(uncleHeight, nephewHeight) + candidate.Uncle = true + candidate.Orphan = false + candidate.Hash = reply.Hash + candidate.Reward = reward + maturedBlocks = append(maturedBlocks, candidate) + log.Printf("Mature uncle block %v/%v of reward %v with hash: %v", candidate.Height, nephewHeight, util.FormatReward(reward), reply.Hash[0:8]) + break + } + } + + if !orphan { + break + } + } + + // Block is lost, we didn't find any valid block or uncle matching our data in a blockchain + if orphan { + orphans++ + candidate.Uncle = false + candidate.Orphan = true + orphanedBlocks = append(orphanedBlocks, candidate) + log.Printf("Rejected block %v", candidate) + } + } + } + return &UnlockResult{ + maturedBlocks: maturedBlocks, + orphanedBlocks: orphanedBlocks, + orphans: orphans, + blocks: blocksUnlocked, + uncles: unclesUnlocked, + }, nil +} + +func (u *BlockUnlocker) handleCandidate(block *rpc.GetBlockReply, candidate *storage.BlockData) error { + // Initial 5 Ether static reward + reward := big.NewInt(0) + reward.Add(reward, constReward) + + // Add TX fees + extraTxReward, err := u.getExtraRewardForTx(block) + if err != nil { + return fmt.Errorf("Error while fetching TX receipt: %v", err) + } + reward.Add(reward, extraTxReward) + + // Add reward for including uncles + rewardForUncles := big.NewInt(0).Mul(uncleReward, big.NewInt(int64(len(block.Uncles)))) + reward.Add(reward, rewardForUncles) + + candidate.Uncle = false + candidate.Orphan = false + candidate.Hash = block.Hash + candidate.Reward = reward + return nil +} + +func (u *BlockUnlocker) unlockPendingBlocks() { + if u.halt { + log.Println("Unlocking suspended due to last critical error") + return + } + + current, err := u.rpc.GetPendingBlock() + if err != nil { + u.halt = true + log.Printf("Unable to get current blockchain height from node: %v", err) + return + } + currentHeight, err := strconv.ParseInt(strings.Replace(current.Number, "0x", "", -1), 16, 64) + if err != nil { + u.halt = true + log.Printf("Can't parse pending block number: %v", err) + return + } + + candidates, err := u.backend.GetCandidates(currentHeight - u.config.ImmatureDepth) + if err != nil { + u.halt = true + log.Printf("Failed to get block candidates from backend: %v", err) + return + } + + result, err := u.unlockCandidates(candidates) + if err != nil { + u.halt = true + log.Printf("Failed to unlock blocks: %v", err) + return + } + log.Printf("Immature %v blocks, %v uncles, %v orphans", result.blocks, result.uncles, result.orphans) + + err = u.backend.WritePendingOrphans(result.orphanedBlocks) + if err != nil { + u.halt = true + log.Printf("Failed to insert orphaned blocks into backend: %v", err) + return + } else { + log.Printf("Inserted %v orphaned blocks to backend", result.orphans) + } + + totalRevenue := new(big.Rat) + totalMinersProfit := new(big.Rat) + totalPoolProfit := new(big.Rat) + + for _, block := range result.maturedBlocks { + revenue, minersProfit, poolProfit, roundRewards, err := u.calculateRewards(block) + if err != nil { + u.halt = true + log.Printf("Failed to calculate rewards for round %v: %v", block.RoundKey(), err) + return + } + err = u.backend.WriteImmatureBlock(block, roundRewards) + if err != nil { + u.halt = true + log.Printf("Failed to credit rewards for round %v: %v", block.RoundKey(), err) + return + } + totalRevenue.Add(totalRevenue, revenue) + totalMinersProfit.Add(totalMinersProfit, minersProfit) + totalPoolProfit.Add(totalPoolProfit, poolProfit) + + logEntry := fmt.Sprintf( + "IMMATURE %v: revenue %v, miners profit %v, pool profit: %v", + block.RoundKey(), + util.FormatRatReward(revenue), + util.FormatRatReward(minersProfit), + util.FormatRatReward(poolProfit), + ) + entries := []string{logEntry} + for login, reward := range roundRewards { + entries = append(entries, fmt.Sprintf("\tREWARD %v: %v : %v", block.RoundKey(), login, reward)) + } + log.Println(strings.Join(entries, "\n")) + } + + log.Printf( + "IMMATURE SESSION: revenue %v, miners profit %v, pool profit: %v", + util.FormatRatReward(totalRevenue), + util.FormatRatReward(totalMinersProfit), + util.FormatRatReward(totalPoolProfit), + ) +} + +func (u *BlockUnlocker) unlockAndCreditMiners() { + if u.halt { + log.Println("Unlocking suspended due to last critical error") + return + } + + current, err := u.rpc.GetPendingBlock() + if err != nil { + u.halt = true + log.Printf("Unable to get current blockchain height from node: %v", err) + return + } + currentHeight, err := strconv.ParseInt(strings.Replace(current.Number, "0x", "", -1), 16, 64) + if err != nil { + u.halt = true + log.Printf("Can't parse pending block number: %v", err) + return + } + + immature, err := u.backend.GetImmatureBlocks(currentHeight - u.config.Depth) + if err != nil { + u.halt = true + log.Printf("Failed to get block candidates from backend: %v", err) + return + } + + result, err := u.unlockCandidates(immature) + if err != nil { + u.halt = true + log.Printf("Failed to unlock blocks: %v", err) + return + } + log.Printf("Unlocked %v blocks, %v uncles, %v orphans", result.blocks, result.uncles, result.orphans) + + for _, block := range result.orphanedBlocks { + err = u.backend.WriteOrphan(block) + if err != nil { + u.halt = true + log.Printf("Failed to insert orphaned block into backend: %v", err) + return + } + } + log.Printf("Inserted %v orphaned blocks to backend", result.orphans) + + totalRevenue := new(big.Rat) + totalMinersProfit := new(big.Rat) + totalPoolProfit := new(big.Rat) + + for _, block := range result.maturedBlocks { + revenue, minersProfit, poolProfit, roundRewards, err := u.calculateRewards(block) + if err != nil { + u.halt = true + log.Printf("Failed to calculate rewards for round %v: %v", block.RoundKey(), err) + return + } + err = u.backend.WriteMaturedBlock(block, roundRewards) + if err != nil { + u.halt = true + log.Printf("Failed to credit rewards for round %v: %v", block.RoundKey(), err) + return + } + totalRevenue.Add(totalRevenue, revenue) + totalMinersProfit.Add(totalMinersProfit, minersProfit) + totalPoolProfit.Add(totalPoolProfit, poolProfit) + + logEntry := fmt.Sprintf( + "MATURED %v: revenue %v, miners profit %v, pool profit: %v", + block.RoundKey(), + util.FormatRatReward(revenue), + util.FormatRatReward(minersProfit), + util.FormatRatReward(poolProfit), + ) + entries := []string{logEntry} + for login, reward := range roundRewards { + entries = append(entries, fmt.Sprintf("\tREWARD %v: %v : %v", block.RoundKey(), login, reward)) + } + log.Println(strings.Join(entries, "\n")) + } + + log.Printf( + "MATURE SESSION: revenue %v, miners profit %v, pool profit: %v", + util.FormatRatReward(totalRevenue), + util.FormatRatReward(totalMinersProfit), + util.FormatRatReward(totalPoolProfit), + ) +} + +func (u *BlockUnlocker) calculateRewards(block *storage.BlockData) (*big.Rat, *big.Rat, *big.Rat, map[string]int64, error) { + rewards := make(map[string]int64) + revenue := new(big.Rat).SetInt(block.Reward) + + feePercent := new(big.Rat).SetFloat64(u.config.PoolFee / 100) + poolProfit := new(big.Rat).Mul(revenue, feePercent) + + minersProfit := new(big.Rat).Sub(revenue, poolProfit) + + shares, err := u.backend.GetRoundShares(uint64(block.Height), block.Nonce) + if err != nil { + return nil, nil, nil, nil, err + } + + for login, n := range shares { + percent := big.NewRat(n, block.TotalShares) + workerReward := new(big.Rat).Mul(minersProfit, percent) + + shannon := new(big.Rat).SetInt(common.Shannon) + workerReward = workerReward.Quo(workerReward, shannon) + amount, _ := strconv.ParseInt(workerReward.FloatString(0), 10, 64) + rewards[login] += amount + } + + return revenue, minersProfit, poolProfit, rewards, nil +} + +func getUncleReward(uHeight, height int64) *big.Int { + reward := new(big.Int).Set(constReward) + reward.Mul(big.NewInt(uHeight+8-height), reward) + reward.Div(reward, big.NewInt(8)) + return reward +} + +func (u *BlockUnlocker) getExtraRewardForTx(block *rpc.GetBlockReply) (*big.Int, error) { + amount := new(big.Int) + + for _, tx := range block.Transactions { + receipt, err := u.rpc.GetTxReceipt(tx.Hash) + if err != nil { + return nil, err + } + if receipt != nil { + gasUsed := common.String2Big(receipt.GasUsed) + gasPrice := common.String2Big(tx.GasPrice) + fee := new(big.Int).Mul(gasUsed, gasPrice) + amount.Add(amount, fee) + } + } + return amount, nil +} diff --git a/policy/policy.go b/policy/policy.go new file mode 100644 index 0000000..64727bf --- /dev/null +++ b/policy/policy.go @@ -0,0 +1,308 @@ +package policy + +import ( + "fmt" + "log" + "os/exec" + "strings" + "sync" + "sync/atomic" + "time" + + "../storage" + "../util" +) + +type Config struct { + Workers int `json:"workers"` + Banning Banning `json:"banning"` + Limits Limits `json:"limits"` + ResetInterval string `json:"resetInterval"` + RefreshInterval string `json:"refreshInterval"` +} + +type Limits struct { + Enabled bool `json:"enabled"` + Limit int32 `json:"limit"` + Grace string `json:"grace"` + LimitJump int32 `json:"limitJump"` +} + +type Banning struct { + Enabled bool `json:"enabled"` + IPSet string `json:"ipset"` + Timeout int64 `json:"timeout"` + InvalidPercent float32 `json:"invalidPercent"` + CheckThreshold int32 `json:"checkThreshold"` + MalformedLimit int32 `json:"malformedLimit"` +} + +type Stats struct { + sync.Mutex + // We are using atomic with LastBeat, + // so moving it before the rest in order to avoid alignment issue + LastBeat int64 + BannedAt int64 + ValidShares int32 + InvalidShares int32 + Malformed int32 + ConnLimit int32 + Banned int32 +} + +type PolicyServer struct { + sync.RWMutex + statsMu sync.RWMutex + config *Config + stats map[string]*Stats + banChannel chan string + startedAt int64 + grace int64 + timeout int64 + blacklist []string + whitelist []string + storage *storage.RedisClient +} + +func Start(cfg *Config, storage *storage.RedisClient) *PolicyServer { + s := &PolicyServer{config: cfg, startedAt: util.MakeTimestamp()} + grace, _ := time.ParseDuration(cfg.Limits.Grace) + s.grace = int64(grace / time.Millisecond) + s.banChannel = make(chan string, 64) + s.stats = make(map[string]*Stats) + s.storage = storage + s.refreshState() + + timeout, _ := time.ParseDuration(s.config.ResetInterval) + s.timeout = int64(timeout / time.Millisecond) + + resetIntv, _ := time.ParseDuration(s.config.ResetInterval) + resetTimer := time.NewTimer(resetIntv) + log.Printf("Set policy stats reset every %v", resetIntv) + + refreshIntv, _ := time.ParseDuration(s.config.RefreshInterval) + refreshTimer := time.NewTimer(refreshIntv) + log.Printf("Set policy state refresh every %v", refreshIntv) + + go func() { + for { + select { + case <-resetTimer.C: + s.resetStats() + resetTimer.Reset(resetIntv) + case <-refreshTimer.C: + s.refreshState() + refreshTimer.Reset(refreshIntv) + } + } + }() + + for i := 0; i < s.config.Workers; i++ { + s.startPolicyWorker() + } + log.Printf("Running with %v policy workers", s.config.Workers) + return s +} + +func (s *PolicyServer) startPolicyWorker() { + go func() { + for { + select { + case ip := <-s.banChannel: + s.doBan(ip) + } + } + }() +} + +func (s *PolicyServer) resetStats() { + now := util.MakeTimestamp() + banningTimeout := s.config.Banning.Timeout * 1000 + total := 0 + s.statsMu.Lock() + defer s.statsMu.Unlock() + + for key, m := range s.stats { + lastBeat := atomic.LoadInt64(&m.LastBeat) + bannedAt := atomic.LoadInt64(&m.BannedAt) + + if now-bannedAt >= banningTimeout { + atomic.StoreInt64(&m.BannedAt, 0) + if atomic.CompareAndSwapInt32(&m.Banned, 1, 0) { + log.Printf("Ban dropped for %v", key) + } + } + if now-lastBeat >= s.timeout { + delete(s.stats, key) + total++ + } + } + log.Printf("Flushed stats for %v IP addresses", total) +} + +func (s *PolicyServer) refreshState() { + s.Lock() + defer s.Unlock() + var err error + + s.blacklist, err = s.storage.GetBlacklist() + if err != nil { + log.Printf("Failed to get blacklist from backend: %v", err) + } + s.whitelist, err = s.storage.GetWhitelist() + if err != nil { + log.Printf("Failed to get whitelist from backend: %v", err) + } + log.Println("Policy state refresh complete") +} + +func (s *PolicyServer) NewStats() *Stats { + x := &Stats{ + ConnLimit: s.config.Limits.Limit, + } + x.heartbeat() + return x +} + +func (s *PolicyServer) Get(ip string) *Stats { + s.statsMu.RLock() + defer s.statsMu.RUnlock() + + if x, ok := s.stats[ip]; ok { + x.heartbeat() + return x + } + x := s.NewStats() + s.stats[ip] = x + return x +} + +func (s *PolicyServer) ApplyLimitPolicy(ip string) bool { + if !s.config.Limits.Enabled { + return true + } + now := util.MakeTimestamp() + if now-s.startedAt > s.grace { + return s.Get(ip).decrLimit() > 0 + } + return true +} + +func (s *PolicyServer) ApplyLoginPolicy(addy, ip string) bool { + if s.InBlackList(addy) { + x := s.Get(ip) + s.forceBan(x, ip) + return false + } + return true +} + +func (s *PolicyServer) ApplyMalformedPolicy(ip string) { + x := s.Get(ip) + n := x.incrMalformed() + if n >= s.config.Banning.MalformedLimit { + s.forceBan(x, ip) + } +} + +func (s *PolicyServer) ApplySharePolicy(ip string, validShare bool) bool { + x := s.Get(ip) + if validShare && s.config.Limits.Enabled { + s.Get(ip).incrLimit(s.config.Limits.LimitJump) + } + x.Lock() + + if validShare { + x.ValidShares++ + if s.config.Limits.Enabled { + x.incrLimit(s.config.Limits.LimitJump) + } + } else { + x.InvalidShares++ + } + + totalShares := x.ValidShares + x.InvalidShares + if totalShares < s.config.Banning.CheckThreshold { + x.Unlock() + return true + } + validShares := float32(x.ValidShares) + invalidShares := float32(x.InvalidShares) + x.resetShares() + x.Unlock() + + if invalidShares == 0 { + return true + } + + // Can be +Inf or value, previous check prevents NaN + ratio := invalidShares / validShares + + if ratio >= s.config.Banning.InvalidPercent/100.0 { + s.forceBan(x, ip) + return false + } + return true +} + +func (x *Stats) resetShares() { + x.ValidShares = 0 + x.InvalidShares = 0 +} + +func (s *PolicyServer) forceBan(x *Stats, ip string) { + if !s.config.Banning.Enabled || s.InWhiteList(ip) { + return + } + atomic.StoreInt64(&x.BannedAt, util.MakeTimestamp()) + + if atomic.CompareAndSwapInt32(&x.Banned, 0, 1) { + if len(s.config.Banning.IPSet) > 0 { + s.banChannel <- ip + } + } +} + +func (x *Stats) incrLimit(n int32) { + atomic.AddInt32(&x.ConnLimit, n) +} + +func (x *Stats) incrMalformed() int32 { + return atomic.AddInt32(&x.Malformed, 1) +} + +func (x *Stats) decrLimit() int32 { + return atomic.AddInt32(&x.ConnLimit, -1) +} + +func (s *PolicyServer) InBlackList(addy string) bool { + s.RLock() + defer s.RUnlock() + return util.StringInSlice(addy, s.blacklist) +} + +func (s *PolicyServer) InWhiteList(ip string) bool { + s.RLock() + defer s.RUnlock() + return util.StringInSlice(ip, s.whitelist) +} + +func (s *PolicyServer) doBan(ip string) { + set, timeout := s.config.Banning.IPSet, s.config.Banning.Timeout + cmd := fmt.Sprintf("sudo ipset add %s %s timeout %v -!", set, ip, timeout) + args := strings.Fields(cmd) + head := args[0] + args = args[1:] + + log.Printf("Banned %v with timeout %v on ipset %s", ip, timeout, set) + + _, err := exec.Command(head, args...).Output() + if err != nil { + log.Printf("CMD Error: %s", err) + } +} + +func (x *Stats) heartbeat() { + now := util.MakeTimestamp() + atomic.StoreInt64(&x.LastBeat, now) +} diff --git a/proxy/api.go b/proxy/api.go new file mode 100644 index 0000000..73493e8 --- /dev/null +++ b/proxy/api.go @@ -0,0 +1,35 @@ +package proxy + +import ( + "encoding/json" + "log" + "net/http" + "sync/atomic" +) + +func (s *ProxyServer) StatusIndex(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + + reply := make(map[string]interface{}) + + var upstreams []interface{} + current := atomic.LoadInt32(&s.upstream) + + for i, u := range s.upstreams { + upstream := map[string]interface{}{ + "name": u.Name, + "sick": u.Sick(), + "current": current == int32(i), + } + upstreams = append(upstreams, upstream) + } + reply["upstreams"] = upstreams + + err := json.NewEncoder(w).Encode(reply) + if err != nil { + log.Println("Error serializing API response: ", err) + } +} diff --git a/proxy/blocks.go b/proxy/blocks.go new file mode 100644 index 0000000..86985a7 --- /dev/null +++ b/proxy/blocks.go @@ -0,0 +1,116 @@ +package proxy + +import ( + "log" + "math/big" + "strconv" + "strings" + "sync" + + "../rpc" + "../util" + + "github.com/ethereum/go-ethereum/common" +) + +const maxBacklog = 3 + +type BlockTemplate struct { + sync.RWMutex + Header string + Seed string + Target string + Difficulty *big.Int + Height uint64 + GetPendingBlockCache *rpc.GetBlockReplyPart + nonces map[string]bool + headers map[string]uint64 +} + +func (t *BlockTemplate) submit(nonce string) bool { + t.Lock() + defer t.Unlock() + _, exist := t.nonces[nonce] + if exist { + return true + } + t.nonces[nonce] = true + return false +} + +type Block struct { + difficulty *big.Int + hashNoNonce common.Hash + nonce uint64 + mixDigest common.Hash + number uint64 +} + +func (b Block) Difficulty() *big.Int { return b.difficulty } +func (b Block) HashNoNonce() common.Hash { return b.hashNoNonce } +func (b Block) Nonce() uint64 { return b.nonce } +func (b Block) MixDigest() common.Hash { return b.mixDigest } +func (b Block) NumberU64() uint64 { return b.number } + +func (s *ProxyServer) fetchBlockTemplate() { + rpc := s.rpc() + t := s.currentBlockTemplate() + pendingReply, height, diff, err := s.fetchPendingBlock() + if err != nil { + log.Printf("Error while refreshing pending block on %s: %s", rpc.Name, err) + return + } + reply, err := rpc.GetWork() + if err != nil { + log.Printf("Error while refreshing block template on %s: %s", rpc.Name, err) + return + } + // No need to update, we have fresh job + if t != nil && t.Header == reply[0] { + return + } + + pendingReply.Difficulty = util.ToHex(s.config.Proxy.Difficulty) + + newTemplate := BlockTemplate{ + Header: reply[0], + Seed: reply[1], + Target: reply[2], + Height: height, + Difficulty: big.NewInt(diff), + GetPendingBlockCache: pendingReply, + nonces: make(map[string]bool), + headers: make(map[string]uint64), + } + // Copy headers backlog and add current one + newTemplate.headers[reply[0]] = height + if t != nil { + for k, v := range t.headers { + if v >= height-maxBacklog { + newTemplate.headers[k] = v + } + } + } + s.blockTemplate.Store(&newTemplate) + log.Printf("New block to mine on %s at height: %d / %s", rpc.Name, height, reply[0][0:10]) +} + +func (s *ProxyServer) fetchPendingBlock() (*rpc.GetBlockReplyPart, uint64, int64, error) { + rpc := s.rpc() + reply, err := rpc.GetPendingBlock() + if err != nil { + log.Printf("Error while refreshing pending block on %s: %s", rpc.Name, err) + return nil, 0, 0, err + } + blockNumber, err := strconv.ParseUint(strings.Replace(reply.Number, "0x", "", -1), 16, 64) + if err != nil { + log.Println("Can't parse pending block number") + return nil, 0, 0, err + } + blockDiff, err := strconv.ParseInt(strings.Replace(reply.Difficulty, "0x", "", -1), 16, 64) + if err != nil { + log.Println("Can't parse pending block difficulty") + return nil, 0, 0, err + } + return reply, blockNumber, blockDiff, nil +} diff --git a/proxy/config.go b/proxy/config.go new file mode 100644 index 0000000..3721423 --- /dev/null +++ b/proxy/config.go @@ -0,0 +1,52 @@ +package proxy + +import ( + "../api" + "../payouts" + "../policy" + "../storage" +) + +type Config struct { + Name string `json:"name"` + Proxy Proxy `json:"proxy"` + Api api.ApiConfig `json:"api"` + Upstream []Upstream `json:"upstream"` + UpstreamCheckInterval string `json:"upstreamCheckInterval"` + + Threads int `json:"threads"` + + Coin string `json:"coin"` + Redis storage.Config `json:"redis"` + + BlockUnlocker payouts.UnlockerConfig `json:"unlocker"` + Payouts payouts.PayoutsConfig `json:"payouts"` + + NewrelicName string `json:"newrelicName"` + NewrelicKey string `json:"newrelicKey"` + NewrelicVerbose bool `json:"newrelicVerbose"` + NewrelicEnabled bool `json:"newrelicEnabled"` +} + +type Proxy struct { + Enabled bool `json:"enabled"` + Listen string `json:"listen"` + LimitHeadersSize int `json:"limitHeadersSize"` + LimitBodySize int64 `json:"limitBodySize"` + BehindReverseProxy bool `json:"behindReverseProxy"` + BlockRefreshInterval string `json:"blockRefreshInterval"` + Difficulty int64 `json:"difficulty"` + StateUpdateInterval string `json:"stateUpdateInterval"` + HashrateExpiration string `json:"hashrateExpiration"` + + Policy policy.Config `json:"policy"` + + MaxFails int64 `json:"maxFails"` + HealthCheck bool `json:"healthCheck"` +} + +type Upstream struct { + Name string `json:"name"` + Url string `json:"url"` + Timeout string `json:"timeout"` +} diff --git a/proxy/handlers.go b/proxy/handlers.go new file mode 100644 index 0000000..f2bc68d --- /dev/null +++ b/proxy/handlers.go @@ -0,0 +1,68 @@ +package proxy + +import ( + "log" + "regexp" + + "../rpc" +) + +var noncePattern *regexp.Regexp + +func init() { + noncePattern, _ = regexp.Compile("^0x[0-9a-f]{16}$") +} + +func (s *ProxyServer) handleGetWorkRPC(cs *Session, login, id string) ([]string, *ErrorReply) { + t := s.currentBlockTemplate() + if t == nil || len(t.Header) == 0 || s.isSick() { + return nil, &ErrorReply{Code: -1, Message: "Work not ready"} + } + return []string{t.Header, t.Seed, s.diff}, nil +} + +func (s *ProxyServer) handleSubmitRPC(cs *Session, login string, id string, params []string) (bool, *ErrorReply) { + m := NewMiner(login, id, cs.ip) + + if len(params) != 3 { + s.policy.ApplyMalformedPolicy(cs.ip) + log.Printf("Malformed params from %s@%s", m.Login, m.IP) + return false, &ErrorReply{Code: -1, Message: "Malformed params", close: true} + } + + if !noncePattern.MatchString(params[0]) { + s.policy.ApplyMalformedPolicy(cs.ip) + log.Printf("Malformed nonce from %s@%s", m.Login, m.IP) + return false, &ErrorReply{Code: -1, Message: "Malformed nonce", close: true} + } + t := s.currentBlockTemplate() + exist, validShare := m.processShare(s, t, params) + s.policy.ApplySharePolicy(m.IP, !exist && validShare) + + if exist { + log.Printf("Duplicate share %s from %s@%s params: %v", params[0], m.Login, m.IP, params) + return false, &ErrorReply{Code: -1, Message: "Duplicate share", close: true} + } + + if !validShare { + log.Printf("Invalid share from %s@%s with %v nonce", m.Login, m.IP, params[0]) + return false, nil + } + + log.Printf("Valid share from %s@%s", m.Login, m.IP) + return true, nil +} + +func (s *ProxyServer) handleGetBlockByNumberRPC() *rpc.GetBlockReplyPart { + t := s.currentBlockTemplate() + var reply *rpc.GetBlockReplyPart + if t != nil { + reply = t.GetPendingBlockCache + } + return reply +} + +func (s *ProxyServer) handleUnknownRPC(cs *Session, req *JSONRpcReq) *ErrorReply { + log.Printf("Unknown RPC method: %v", req) + return &ErrorReply{Code: -1, Message: "Invalid method"} +} diff --git a/proxy/miner.go b/proxy/miner.go new file mode 100644 index 0000000..aa70b24 --- /dev/null +++ b/proxy/miner.go @@ -0,0 +1,95 @@ +package proxy + +import ( + "log" + "math/big" + "strconv" + "strings" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/common" +) + +var hasher = ethash.New() + +type Miner struct { + Id string + Login string + IP string +} + +func NewMiner(login, id, ip string) Miner { + if len(id) == 0 { + id = "0" + } + return Miner{Login: login, Id: id, IP: ip} +} + +func (m Miner) key() string { + return strings.Join([]string{m.Login, m.Id}, ":") +} + +func (m Miner) processShare(s *ProxyServer, t *BlockTemplate, params []string) (bool, bool) { + paramsOrig := params[:] + + nonceHex := params[0] + hashNoNonce := params[1] + nonce, _ := strconv.ParseUint(strings.Replace(nonceHex, "0x", "", -1), 16, 64) + mixDigest := strings.ToLower(params[2]) + shareDiff := s.config.Proxy.Difficulty + + if _, ok := t.headers[hashNoNonce]; !ok { + log.Printf("Stale share from %v@%v", m.Login, m.IP) + return false, false + } + + share := Block{ + number: t.Height, + hashNoNonce: common.HexToHash(hashNoNonce), + difficulty: big.NewInt(shareDiff), + nonce: nonce, + mixDigest: common.HexToHash(mixDigest), + } + + block := Block{ + number: t.Height, + hashNoNonce: common.HexToHash(hashNoNonce), + difficulty: t.Difficulty, + nonce: nonce, + mixDigest: common.HexToHash(mixDigest), + } + + if !hasher.Verify(share) { + return false, false + } + + // In-Ram check for duplicate share + if t.submit(params[0]) { + return true, false + } + + if hasher.Verify(block) { + _, err := s.rpc().SubmitBlock(paramsOrig) + if err != nil { + log.Printf("Block submission failure on height: %v for %v: %v", t.Height, t.Header, err) + } else { + s.fetchBlockTemplate() + err = s.backend.WriteBlock(m.Login, m.Id, shareDiff, t.Difficulty.Int64(), t.Height, nonceHex, hashNoNonce, mixDigest, s.hashrateExpiration) + if err != nil { + log.Printf("Failed to insert block candidate into backend: %v", err) + } else { + log.Printf("Inserted block %v to backend", t.Height) + } + log.Printf("Block with nonce: %v found by miner %v@%v at height: %d", nonceHex, m.Login, m.IP, t.Height) + } + } else { + exist, err := s.backend.WriteShare(m.Login, m.Id, nonceHex, mixDigest, t.Height, shareDiff, s.hashrateExpiration) + if exist { + return true, false + } + if err != nil { + log.Printf("Failed to insert share data into backend: %v", err) + } + } + return false, true +} diff --git a/proxy/proto.go b/proxy/proto.go new file mode 100644 index 0000000..b72bc7d --- /dev/null +++ b/proxy/proto.go @@ -0,0 +1,26 @@ +package proxy + +import "encoding/json" + +type JSONRpcReq struct { + Id *json.RawMessage `json:"id"` + Method string `json:"method"` + Params *json.RawMessage `json:"params"` +} + +type JSONRpcResp struct { + Id *json.RawMessage `json:"id"` + Version string `json:"jsonrpc"` + Result interface{} `json:"result"` + Error interface{} `json:"error,omitempty"` +} + +type SubmitReply struct { + Status string `json:"status"` +} + +type ErrorReply struct { + Code int `json:"code"` + Message string `json:"message"` + close bool +} diff --git a/proxy/proxy.go b/proxy/proxy.go new file mode 100644 index 0000000..de77381 --- /dev/null +++ b/proxy/proxy.go @@ -0,0 +1,291 @@ +package proxy + +import ( + "encoding/json" + "github.com/gorilla/mux" + "io" + "log" + "net" + "net/http" + "sync/atomic" + "time" + + "../policy" + "../rpc" + "../storage" + "../util" +) + +type ProxyServer struct { + config *Config + blockTemplate atomic.Value + upstream int32 + upstreams []*rpc.RPCClient + backend *storage.RedisClient + diff string + policy *policy.PolicyServer + hashrateExpiration time.Duration + failsCount int64 +} + +type Session struct { + ip string + enc *json.Encoder +} + +func NewProxy(cfg *Config, backend *storage.RedisClient) *ProxyServer { + if len(cfg.Name) == 0 { + log.Fatal("You must set instance name") + } + policy := policy.Start(&cfg.Proxy.Policy, backend) + + proxy := &ProxyServer{config: cfg, backend: backend, policy: policy} + proxy.diff = util.GetTargetHex(cfg.Proxy.Difficulty) + + proxy.upstreams = make([]*rpc.RPCClient, len(cfg.Upstream)) + for i, v := range cfg.Upstream { + proxy.upstreams[i] = rpc.NewRPCClient(v.Name, v.Url, v.Timeout) + log.Printf("Upstream: %s => %s", v.Name, v.Url) + } + log.Printf("Default upstream: %s => %s", proxy.rpc().Name, proxy.rpc().Url) + + proxy.fetchBlockTemplate() + + proxy.hashrateExpiration, _ = time.ParseDuration(cfg.Proxy.HashrateExpiration) + + refreshIntv, _ := time.ParseDuration(cfg.Proxy.BlockRefreshInterval) + refreshTimer := time.NewTimer(refreshIntv) + log.Printf("Set block refresh every %v", refreshIntv) + + checkIntv, _ := time.ParseDuration(cfg.UpstreamCheckInterval) + checkTimer := time.NewTimer(checkIntv) + + stateUpdateIntv, _ := time.ParseDuration(cfg.Proxy.StateUpdateInterval) + stateUpdateTimer := time.NewTimer(stateUpdateIntv) + + go func() { + for { + select { + case <-refreshTimer.C: + proxy.fetchBlockTemplate() + refreshTimer.Reset(refreshIntv) + } + } + }() + + go func() { + for { + select { + case <-checkTimer.C: + proxy.checkUpstreams() + checkTimer.Reset(checkIntv) + } + } + }() + + go func() { + for { + select { + case <-stateUpdateTimer.C: + t := proxy.currentBlockTemplate() + if t != nil { + err := backend.WriteNodeState(cfg.Name, t.Height, t.Difficulty) + if err != nil { + log.Printf("Failed to write node state to backend: %v", err) + proxy.markSick() + } else { + proxy.markOk() + } + } + stateUpdateTimer.Reset(stateUpdateIntv) + } + } + }() + + return proxy +} + +func (s *ProxyServer) Start() { + log.Printf("Starting proxy on %v", s.config.Proxy.Listen) + r := mux.NewRouter() + r.Handle("/miner/{login:0x[0-9a-f]{40}}/{id:[0-9a-zA-Z\\-\\_]{1,8}}", s) + r.Handle("/miner/{login:0x[0-9a-f]{40}}", s) + srv := &http.Server{ + Addr: s.config.Proxy.Listen, + Handler: r, + MaxHeaderBytes: s.config.Proxy.LimitHeadersSize, + } + err := srv.ListenAndServe() + if err != nil { + log.Fatalf("Failed to start proxy: %v", err) + } +} + +func (s *ProxyServer) rpc() *rpc.RPCClient { + i := atomic.LoadInt32(&s.upstream) + return s.upstreams[i] +} + +func (s *ProxyServer) checkUpstreams() { + candidate := int32(0) + backup := false + + for i, v := range s.upstreams { + if v.Check() && !backup { + candidate = int32(i) + backup = true + } + } + + if s.upstream != candidate { + log.Printf("Switching to %v upstream", s.upstreams[candidate].Name) + atomic.StoreInt32(&s.upstream, candidate) + } +} + +func (s *ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + s.writeError(w, 405, "rpc: POST method required, received "+r.Method) + return + } + ip := s.remoteAddr(r) + s.handleClient(w, r, ip) +} + +func (s *ProxyServer) remoteAddr(r *http.Request) string { + if s.config.Proxy.BehindReverseProxy { + ip := r.Header.Get("X-Forwarded-For") + if len(ip) > 0 && net.ParseIP(ip) != nil { + return ip + } + } + ip, _, _ := net.SplitHostPort(r.RemoteAddr) + return ip +} + +func (s *ProxyServer) handleClient(w http.ResponseWriter, r *http.Request, ip string) { + if r.ContentLength > s.config.Proxy.LimitBodySize { + log.Printf("Socket flood from %s", ip) + s.policy.ApplyMalformedPolicy(ip) + r.Close = true + http.Error(w, "Request too large", http.StatusExpectationFailed) + return + } + r.Body = http.MaxBytesReader(w, r.Body, s.config.Proxy.LimitBodySize) + defer r.Body.Close() + + cs := &Session{ip: ip, enc: json.NewEncoder(w)} + dec := json.NewDecoder(r.Body) + for { + var req JSONRpcReq + if err := dec.Decode(&req); err == io.EOF { + break + } else if err != nil { + log.Printf("Malformed request from %v: %v", ip, err) + s.policy.ApplyMalformedPolicy(ip) + r.Close = true + return + } + cs.handleMessage(s, r, &req) + } +} + +func (cs *Session) handleMessage(s *ProxyServer, r *http.Request, req *JSONRpcReq) { + if req.Id == nil { + log.Printf("Missing RPC id from %s", cs.ip) + s.policy.ApplyMalformedPolicy(cs.ip) + r.Close = true + return + } + + vars := mux.Vars(r) + + if !s.policy.ApplyLoginPolicy(vars["login"], cs.ip) { + errReply := &ErrorReply{Code: -1, Message: "You are blacklisted", close: true} + cs.sendError(req.Id, errReply) + return + } + + // Handle RPC methods + switch req.Method { + case "eth_getWork": + reply, errReply := s.handleGetWorkRPC(cs, vars["login"], vars["id"]) + if errReply != nil { + r.Close = errReply.close + cs.sendError(req.Id, errReply) + break + } + cs.sendResult(req.Id, &reply) + case "eth_submitWork": + if req.Params != nil { + var params []string + err := json.Unmarshal(*req.Params, ¶ms) + if err != nil { + log.Printf("Unable to parse params from %v", cs.ip) + s.policy.ApplyMalformedPolicy(cs.ip) + r.Close = true + break + } + reply, errReply := s.handleSubmitRPC(cs, vars["login"], vars["id"], params) + if errReply != nil { + r.Close = errReply.close + err = cs.sendError(req.Id, errReply) + break + } + cs.sendResult(req.Id, &reply) + } else { + r.Close = true + errReply := &ErrorReply{Code: -1, Message: "Malformed request"} + cs.sendError(req.Id, errReply) + } + case "eth_getBlockByNumber": + reply := s.handleGetBlockByNumberRPC() + cs.sendResult(req.Id, reply) + case "eth_submitHashrate": + cs.sendResult(req.Id, true) + default: + r.Close = true + errReply := s.handleUnknownRPC(cs, req) + cs.sendError(req.Id, errReply) + } +} + +func (cs *Session) sendResult(id *json.RawMessage, result interface{}) error { + message := JSONRpcResp{Id: id, Version: "2.0", Error: nil, Result: result} + return cs.enc.Encode(&message) +} + +func (cs *Session) sendError(id *json.RawMessage, reply *ErrorReply) error { + message := JSONRpcResp{Id: id, Version: "2.0", Error: reply} + return cs.enc.Encode(&message) +} + +func (s *ProxyServer) writeError(w http.ResponseWriter, status int, msg string) { + w.WriteHeader(status) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") +} + +func (s *ProxyServer) currentBlockTemplate() *BlockTemplate { + t := s.blockTemplate.Load() + if t != nil { + return t.(*BlockTemplate) + } else { + return nil + } +} + +func (s *ProxyServer) markSick() { + atomic.AddInt64(&s.failsCount, 1) +} + +func (s *ProxyServer) isSick() bool { + x := atomic.LoadInt64(&s.failsCount) + if s.config.Proxy.HealthCheck && x >= s.config.Proxy.MaxFails { + return true + } + return false +} + +func (s *ProxyServer) markOk() { + atomic.StoreInt64(&s.failsCount, 0) +} diff --git a/rpc/rpc.go b/rpc/rpc.go new file mode 100644 index 0000000..e3f7ee3 --- /dev/null +++ b/rpc/rpc.go @@ -0,0 +1,251 @@ +package rpc + +import ( + "bytes" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "sync" + "time" +) + +type RPCClient struct { + sync.RWMutex + Url string + Name string + sick bool + sickRate int + successRate int + client *http.Client +} + +type GetBlockReply struct { + Number string `json:"number"` + Hash string `json:"hash"` + ParentHash string `json:"parentHash"` + Nonce string `json:"nonce"` + Sha3Uncles string `json:"sha3Uncles"` + LogsBloom string `json:"logsBloom"` + TransactionsRoot string `json:"transactionsRoot"` + StateRoot string `json:"stateRoot"` + Miner string `json:"miner"` + Difficulty string `json:"difficulty"` + TotalDifficulty string `json:"totalDifficulty"` + Size string `json:"size"` + ExtraData string `json:"extraData"` + GasLimit string `json:"gasLimit"` + GasUsed string `json:"gasUsed"` + Timestamp string `json:"timestamp"` + Transactions []Tx `json:"transactions"` + Uncles []string `json:"uncles"` +} + +type GetBlockReplyPart struct { + Number string `json:"number"` + Difficulty string `json:"difficulty"` +} + +type TxReceipt struct { + TxHash string `json:"transactionHash"` + GasUsed string `json:"gasUsed"` +} + +type Tx struct { + Gas string `json:"gas"` + GasPrice string `json:"gasPrice"` + Hash string `json:"hash"` +} + +type JSONRpcResp struct { + Id *json.RawMessage `json:"id"` + Result *json.RawMessage `json:"result"` + Error map[string]interface{} `json:"error"` +} + +func NewRPCClient(name, url, timeout string) *RPCClient { + rpcClient := &RPCClient{Name: name, Url: url} + timeoutIntv, _ := time.ParseDuration(timeout) + rpcClient.client = &http.Client{ + Timeout: timeoutIntv, + } + return rpcClient +} + +func (r *RPCClient) GetWork() ([]string, error) { + rpcResp, err := r.doPost(r.Url, "eth_getWork", []string{}) + var reply []string + if err != nil { + return reply, err + } + if rpcResp.Error != nil { + return reply, errors.New(rpcResp.Error["message"].(string)) + } + + err = json.Unmarshal(*rpcResp.Result, &reply) + // Handle empty result, daemon is catching up (geth bug!!!) + if len(reply) != 3 || len(reply[0]) == 0 { + return reply, errors.New("Daemon is not ready") + } + return reply, err +} + +func (r *RPCClient) GetPendingBlock() (*GetBlockReplyPart, error) { + rpcResp, err := r.doPost(r.Url, "eth_getBlockByNumber", []interface{}{"pending", false}) + var reply *GetBlockReplyPart + if err != nil { + return reply, err + } + if rpcResp.Error != nil { + return reply, errors.New(rpcResp.Error["message"].(string)) + } + if rpcResp.Result != nil { + err = json.Unmarshal(*rpcResp.Result, &reply) + } + return reply, err +} + +func (r *RPCClient) GetBlockByHeight(height int64) (*GetBlockReply, error) { + params := []interface{}{height, true} + return r.getBlockBy("eth_getBlockByNumber", params) +} + +func (r *RPCClient) GetBlockByHash(hash string) (*GetBlockReply, error) { + params := []interface{}{hash, true} + return r.getBlockBy("eth_getBlockByHash", params) +} + +func (r *RPCClient) getBlockByHeight(params []interface{}) (*GetBlockReply, error) { + return r.getBlockBy("eth_getBlockByNumber", params) +} + +func (r *RPCClient) GetUncleByBlockNumberAndIndex(height int64, index int) (*GetBlockReply, error) { + params := []interface{}{height, index} + return r.getBlockBy("eth_getUncleByBlockNumberAndIndex", params) +} + +func (r *RPCClient) getBlockBy(method string, params []interface{}) (*GetBlockReply, error) { + rpcResp, err := r.doPost(r.Url, method, params) + var reply *GetBlockReply + if err != nil { + return reply, err + } + if rpcResp.Error != nil { + return reply, errors.New(rpcResp.Error["message"].(string)) + } + if rpcResp.Result != nil { + err = json.Unmarshal(*rpcResp.Result, &reply) + } + return reply, err +} + +func (r *RPCClient) GetTxReceipt(hash string) (*TxReceipt, error) { + rpcResp, err := r.doPost(r.Url, "eth_getTransactionReceipt", []string{hash}) + var reply *TxReceipt + if err != nil { + return nil, err + } + if rpcResp.Error != nil { + return nil, errors.New(rpcResp.Error["message"].(string)) + } + if rpcResp.Result != nil { + err = json.Unmarshal(*rpcResp.Result, &reply) + } + return reply, err +} + +func (r *RPCClient) SubmitBlock(params []string) (bool, error) { + rpcResp, err := r.doPost(r.Url, "eth_submitWork", params) + var result bool + if err != nil { + return false, err + } + err = json.Unmarshal(*rpcResp.Result, &result) + if !result { + return false, errors.New("Block not accepted, result=false") + } + return result, nil +} + +func (r *RPCClient) SendTransaction(from, to, gas, gasPrice, value string) (string, error) { + params := map[string]string{ + "from": from, + "to": to, + "gas": gas, + "gasPrice": gasPrice, + "value": value, + } + rpcResp, err := r.doPost(r.Url, "eth_sendTransaction", []interface{}{params}) + var reply string + if err != nil { + return reply, err + } + if rpcResp.Error != nil { + return reply, errors.New(rpcResp.Error["message"].(string)) + } + err = json.Unmarshal(*rpcResp.Result, &reply) + return reply, err +} + +func (r *RPCClient) doPost(url string, method string, params interface{}) (JSONRpcResp, error) { + jsonReq := map[string]interface{}{"jsonrpc": "2.0", "method": method, "params": params, "id": 0} + data, _ := json.Marshal(jsonReq) + + req, err := http.NewRequest("POST", url, bytes.NewBuffer(data)) + req.Header.Set("Content-Length", (string)(len(data))) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + + resp, err := r.client.Do(req) + var rpcResp JSONRpcResp + + if err != nil { + r.markSick() + return rpcResp, err + } + defer resp.Body.Close() + + body, _ := ioutil.ReadAll(resp.Body) + err = json.Unmarshal(body, &rpcResp) + + if rpcResp.Error != nil { + r.markSick() + } + return rpcResp, err +} + +func (r *RPCClient) Check() bool { + _, err := r.GetWork() + if err != nil { + return false + } + r.markAlive() + return !r.Sick() +} + +func (r *RPCClient) Sick() bool { + r.RLock() + defer r.RUnlock() + return r.sick +} + +func (r *RPCClient) markSick() { + r.Lock() + r.sickRate++ + r.successRate = 0 + if r.sickRate >= 5 { + r.sick = true + } + r.Unlock() +} + +func (r *RPCClient) markAlive() { + r.Lock() + r.successRate++ + if r.successRate >= 5 { + r.sick = false + r.sickRate = 0 + r.successRate = 0 + } + r.Unlock() +} diff --git a/storage/redis.go b/storage/redis.go new file mode 100644 index 0000000..7ddc06b --- /dev/null +++ b/storage/redis.go @@ -0,0 +1,754 @@ +package storage + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "gopkg.in/redis.v3" + + "../util" +) + +type Config struct { + Endpoint string `json:"endpoint"` + Password string `json:"password"` + Database int64 `json:"database"` + PoolSize int `json:"poolSize"` +} + +type RedisClient struct { + client *redis.Client + prefix string +} + +type BlockData struct { + Height int64 `json:"height"` + Timestamp int64 `json:"timestamp"` + Difficulty string `json:"difficulty"` + TotalShares int64 `json:"shares"` + Uncle bool `json:"uncle"` + Orphan bool `json:"orphan"` + Hash string `json:"hash"` + Nonce string `json:"-"` + PowHash string `json:"-"` + MixDigest string `json:"-"` + Reward *big.Int `json:"-"` + ImmatureReward string `json:"-"` + RewardString string `json:"reward"` + candidateKey string `json:"-"` + immatureKey string `json:"-"` +} + +func (b *BlockData) RewardInShannon() int64 { + reward := new(big.Int).Div(b.Reward, common.Shannon) + return reward.Int64() +} + +func (b *BlockData) serializeHash() string { + if len(b.Hash) > 0 { + return b.Hash + } else { + return "0x0" + } +} + +func (b *BlockData) RoundKey() string { + return join(b.Height, b.Hash) +} + +func (b *BlockData) key() string { + return join(b.Uncle, b.Orphan, b.Nonce, b.serializeHash(), b.Timestamp, b.Difficulty, b.TotalShares, b.Reward) +} + +type Miner struct { + LastBeat int64 `json:"lastBeat"` + HR int64 `json:"hr"` + Offline bool `json:"offline"` + startedAt int64 +} + +type Worker struct { + Miner + TotalHR int64 `json:"hr2"` +} + +func NewRedisClient(cfg *Config, prefix string) *RedisClient { + client := redis.NewClient(&redis.Options{ + Addr: cfg.Endpoint, + Password: cfg.Password, + DB: cfg.Database, + PoolSize: cfg.PoolSize, + }) + return &RedisClient{client: client, prefix: prefix} +} + +func (r *RedisClient) Client() *redis.Client { + return r.client +} + +func (r *RedisClient) Check() (string, error) { + return r.client.Ping().Result() +} + +// Always returns list of addresses. If Redis fails it will return empty list. +func (r *RedisClient) GetBlacklist() ([]string, error) { + cmd := r.client.SMembers(r.formatKey("blacklist")) + if cmd.Err() != nil { + return []string{}, cmd.Err() + } + return cmd.Val(), nil +} + +// Always returns list of IPs. If Redis fails it will return empty list. +func (r *RedisClient) GetWhitelist() ([]string, error) { + cmd := r.client.SMembers(r.formatKey("whitelist")) + if cmd.Err() != nil { + return []string{}, cmd.Err() + } + return cmd.Val(), nil +} + +func (r *RedisClient) WriteNodeState(id string, height uint64, diff *big.Int) error { + tx := r.client.Multi() + defer tx.Close() + + now := util.MakeTimestamp() / 1000 + + _, err := tx.Exec(func() error { + tx.HSet(r.formatKey("nodes"), join(id, "name"), id) + tx.HSet(r.formatKey("nodes"), join(id, "height"), strconv.FormatUint(height, 10)) + tx.HSet(r.formatKey("nodes"), join(id, "difficulty"), diff.String()) + tx.HSet(r.formatKey("nodes"), join(id, "lastBeat"), strconv.FormatInt(now, 10)) + return nil + }) + return err +} + +func (r *RedisClient) GetNodeStates() ([]map[string]interface{}, error) { + cmd := r.client.HGetAllMap(r.formatKey("nodes")) + if cmd.Err() != nil { + return nil, cmd.Err() + } + m := make(map[string]map[string]interface{}) + for key, value := range cmd.Val() { + parts := strings.Split(key, ":") + if val, ok := m[parts[0]]; ok { + val[parts[1]] = value + } else { + node := make(map[string]interface{}) + node[parts[1]] = value + m[parts[0]] = node + } + } + v := make([]map[string]interface{}, len(m), len(m)) + i := 0 + for _, value := range m { + v[i] = value + i++ + } + return v, nil +} + +func (r *RedisClient) WriteShare(login, id, nonce, mixDigest string, height uint64, diff int64, window time.Duration) (bool, error) { + // Sweep PoW backlog for previous blocks + r.client.ZRemRangeByScore(r.formatKey("pow"), "-inf", fmt.Sprint("(", height-8)) + cmd := r.client.ZAdd(r.formatKey("pow"), redis.Z{Score: float64(height), Member: join(nonce, mixDigest)}) + if cmd.Err() != nil { + return false, cmd.Err() + } + // Duplicate nonce + if cmd.Val() == 0 { + return true, nil + } + tx := r.client.Multi() + defer tx.Close() + + ms := time.Now().UnixNano() / 1000000 + ts := ms / 1000 + + _, err := tx.Exec(func() error { + r.writeShare(tx, ms, ts, login, id, diff, window) + tx.HIncrBy(r.formatKey("stats"), "roundShares", diff) + return nil + }) + return false, err +} + +func (r *RedisClient) WriteBlock(login, id string, diff, roundDiff int64, height uint64, nonce, powHash, mixDigest string, window time.Duration) error { + tx := r.client.Multi() + defer tx.Close() + + ms := util.MakeTimestamp() + ts := ms / 1000 + + cmds, err := tx.Exec(func() error { + r.writeShare(tx, ms, ts, login, id, diff, window) + tx.HSet(r.formatKey("stats"), "lastBlockFound", strconv.FormatInt(ts, 10)) + tx.HDel(r.formatKey("stats"), "roundShares") + tx.ZIncrBy(r.formatKey("finders"), 1, login) + tx.HIncrBy(r.formatKey("miners", login), "blocksFound", 1) + tx.Rename(r.formatKey("shares", "roundCurrent"), r.formatKey("shares", formatRound(height), nonce)) + tx.HGetAllMap(r.formatKey("shares", formatRound(height), nonce)) + return nil + }) + if err != nil { + return err + } else { + sharesMap, _ := cmds[10].(*redis.StringStringMapCmd).Result() + totalShares := int64(0) + for _, v := range sharesMap { + n, _ := strconv.ParseInt(v, 10, 64) + totalShares += n + } + hashHex := join(nonce, powHash, mixDigest) + s := join(hashHex, ts, roundDiff, totalShares) + cmd := r.client.ZAdd(r.formatKey("blocks", "candidates"), redis.Z{Score: float64(height), Member: s}) + return cmd.Err() + } +} + +func (r *RedisClient) writeShare(tx *redis.Multi, ms, ts int64, login, id string, diff int64, expire time.Duration) { + tx.HIncrBy(r.formatKey("shares", "roundCurrent"), login, diff) + tx.ZAdd(r.formatKey("hashrate"), redis.Z{Score: float64(ts), Member: join(diff, login, id, ms)}) + tx.ZAdd(r.formatKey("hashrate", login), redis.Z{Score: float64(ts), Member: join(diff, id, ms)}) + tx.Expire(r.formatKey("hashrate", login), expire) // Will delete hashrates for miners that gone + tx.HSet(r.formatKey("miners", login), "lastShare", strconv.FormatInt(ts, 10)) +} + +func (r *RedisClient) formatKey(args ...interface{}) string { + return join(r.prefix, join(args...)) +} + +func formatRound(height uint64) string { + return "round" + strconv.FormatUint(height, 10) +} + +func join(args ...interface{}) string { + s := make([]string, len(args)) + for i, v := range args { + switch v.(type) { + case string: + s[i] = v.(string) + case int64: + s[i] = strconv.FormatInt(v.(int64), 10) + case uint64: + s[i] = strconv.FormatUint(v.(uint64), 10) + case float64: + s[i] = strconv.FormatFloat(v.(float64), 'f', 0, 64) + case bool: + if v.(bool) { + s[i] = "1" + } else { + s[i] = "0" + } + case *big.Int: + n := v.(*big.Int) + if n != nil { + s[i] = n.String() + } else { + s[i] = "0" + } + default: + panic("Invalid type specified for conversion") + } + } + return strings.Join(s, ":") +} + +func (r *RedisClient) GetCandidates(maxHeight int64) ([]*BlockData, error) { + option := redis.ZRangeByScore{Min: "0", Max: strconv.FormatInt(maxHeight, 10)} + cmd := r.client.ZRangeByScoreWithScores(r.formatKey("blocks", "candidates"), option) + if cmd.Err() != nil { + return nil, cmd.Err() + } + return convertCandidateResults(cmd), nil +} + +func (r *RedisClient) GetImmatureBlocks(maxHeight int64) ([]*BlockData, error) { + option := redis.ZRangeByScore{Min: "0", Max: strconv.FormatInt(maxHeight, 10)} + cmd := r.client.ZRangeByScoreWithScores(r.formatKey("blocks", "immature"), option) + if cmd.Err() != nil { + return nil, cmd.Err() + } + return convertBlockResults(cmd), nil +} + +func (r *RedisClient) GetRoundShares(height uint64, nonce string) (map[string]int64, error) { + result := make(map[string]int64) + cmd := r.client.HGetAllMap(r.formatKey("shares", formatRound(height), nonce)) + if cmd.Err() != nil { + return nil, cmd.Err() + } + sharesMap, _ := cmd.Result() + for login, v := range sharesMap { + n, _ := strconv.ParseInt(v, 10, 64) + result[login] = n + } + return result, nil +} + +func (r *RedisClient) GetPayees() ([]string, error) { + var result []string + payees := make(map[string]bool) + cmd := r.client.Keys(r.formatKey("miners", "*")) + if cmd.Err() != nil { + return nil, cmd.Err() + } + for _, worker := range cmd.Val() { + login := strings.Split(worker, ":")[2] + payees[login] = true + } + for login, _ := range payees { + result = append(result, login) + } + return result, nil +} + +func (r *RedisClient) GetBalance(login string) (int64, error) { + cmd := r.client.HGet(r.formatKey("miners", login), "balance") + if cmd.Err() != nil { + return 0, cmd.Err() + } + return cmd.Int64() +} + +// Update balance after TX sent +func (r *RedisClient) UpdateBalance(login, txHash string, amount int64) error { + tx := r.client.Multi() + defer tx.Close() + + ts := util.MakeTimestamp() / 1000 + + _, err := tx.Exec(func() error { + tx.HIncrBy(r.formatKey("miners", login), "balance", (amount * -1)) + tx.HIncrBy(r.formatKey("miners", login), "paid", amount) + tx.HIncrBy(r.formatKey("finances"), "balance", (amount * -1)) + tx.HIncrBy(r.formatKey("finances"), "paid", amount) + tx.ZAdd(r.formatKey("payments", "all"), redis.Z{Score: float64(ts), Member: join(txHash, login, amount)}) + tx.ZAdd(r.formatKey("payments", login), redis.Z{Score: float64(ts), Member: join(txHash, amount)}) + return nil + }) + return err +} + +func (r *RedisClient) WriteImmatureBlock(block *BlockData, roundRewards map[string]int64) error { + tx := r.client.Multi() + defer tx.Close() + + _, err := tx.Exec(func() error { + r.writeImmatureBlock(tx, block) + total := int64(0) + for login, amount := range roundRewards { + total += amount + tx.HIncrBy(r.formatKey("miners", login), "immature", amount) + tx.HSetNX(r.formatKey("credits:immature", block.Height, block.Hash), login, strconv.FormatInt(amount, 10)) + } + tx.HIncrBy(r.formatKey("finances"), "immature", total) + return nil + }) + return err +} + +func (r *RedisClient) WriteMaturedBlock(block *BlockData, roundRewards map[string]int64) error { + creditKey := r.formatKey("credits:immature", block.Height, block.Hash) + tx, err := r.client.Watch(creditKey) + // Must decrement immatures using existing log entry + immatureCredits := tx.HGetAllMap(creditKey) + if err != nil { + return err + } + defer tx.Close() + + ts := util.MakeTimestamp() / 1000 + value := join(block.Hash, ts, block.Reward) + + _, err = tx.Exec(func() error { + r.writeMaturedBlock(tx, block) + tx.ZAdd(r.formatKey("credits", "all"), redis.Z{Score: float64(block.Height), Member: value}) + + // Decrement immature balances + totalImmature := int64(0) + for login, amountString := range immatureCredits.Val() { + amount, _ := strconv.ParseInt(amountString, 10, 64) + totalImmature += amount + tx.HIncrBy(r.formatKey("miners", login), "immature", (amount * -1)) + } + + // Increment balances + total := int64(0) + for login, amount := range roundRewards { + total += amount + // NOTICE: Maybe expire round reward entry in 604800 (a week)? + tx.HIncrBy(r.formatKey("miners", login), "balance", amount) + tx.HSetNX(r.formatKey("credits", block.Height, block.Hash), login, strconv.FormatInt(amount, 10)) + } + tx.Del(creditKey) + tx.HIncrBy(r.formatKey("finances"), "balance", total) + tx.HIncrBy(r.formatKey("finances"), "immature", (totalImmature * -1)) + tx.HSet(r.formatKey("finances"), "lastCreditHeight", strconv.FormatInt(block.Height, 10)) + tx.HSet(r.formatKey("finances"), "lastCreditHash", block.Hash) + tx.HIncrBy(r.formatKey("finances"), "totalMined", block.RewardInShannon()) + return nil + }) + return err +} + +func (r *RedisClient) WriteOrphan(block *BlockData) error { + creditKey := r.formatKey("credits:immature", block.Height, block.Hash) + tx, err := r.client.Watch(creditKey) + // Much decrement immatures using existing log entry + immatureCredits := tx.HGetAllMap(creditKey) + if err != nil { + return err + } + defer tx.Close() + + _, err = tx.Exec(func() error { + r.writeMaturedBlock(tx, block) + + // Decrement immature balances + totalImmature := int64(0) + for login, amountString := range immatureCredits.Val() { + amount, _ := strconv.ParseInt(amountString, 10, 64) + totalImmature += amount + tx.HIncrBy(r.formatKey("miners", login), "immature", (amount * -1)) + } + tx.Del(creditKey) + tx.HIncrBy(r.formatKey("finances"), "immature", (totalImmature * -1)) + return nil + }) + return err +} + +func (r *RedisClient) WritePendingOrphans(blocks []*BlockData) error { + tx := r.client.Multi() + defer tx.Close() + + _, err := tx.Exec(func() error { + for _, block := range blocks { + r.writeImmatureBlock(tx, block) + } + return nil + }) + return err +} + +func (r *RedisClient) writeImmatureBlock(tx *redis.Multi, block *BlockData) { + tx.ZRem(r.formatKey("blocks", "candidates"), block.candidateKey) + tx.ZAdd(r.formatKey("blocks", "immature"), redis.Z{Score: float64(block.Height), Member: block.key()}) +} + +func (r *RedisClient) writeMaturedBlock(tx *redis.Multi, block *BlockData) { + tx.Del(r.formatKey("shares", formatRound(uint64(block.Height)), block.Nonce)) + tx.ZRem(r.formatKey("blocks", "immature"), block.immatureKey) + tx.ZAdd(r.formatKey("blocks", "matured"), redis.Z{Score: float64(block.Height), Member: block.key()}) +} + +func (r *RedisClient) GetMinerStats(login string, maxPayments int64) (map[string]interface{}, error) { + stats := make(map[string]interface{}) + + tx := r.client.Multi() + defer tx.Close() + + cmds, err := tx.Exec(func() error { + tx.HGetAllMap(r.formatKey("miners", login)) + tx.ZRevRangeWithScores(r.formatKey("payments", login), 0, maxPayments-1) + tx.ZCard(r.formatKey("payments", login)) + tx.HGet(r.formatKey("shares", "roundCurrent"), login) + return nil + }) + + if err != nil && err != redis.Nil { + return nil, err + } else { + stats["stats"], _ = cmds[0].(*redis.StringStringMapCmd).Result() + payments := convertPaymentsResults(cmds[1].(*redis.ZSliceCmd)) + stats["payments"] = payments + stats["paymentsTotal"] = cmds[2].(*redis.IntCmd).Val() + roundShares, _ := cmds[3].(*redis.StringCmd).Int64() + stats["roundShares"] = roundShares + } + + return stats, nil +} + +// WARNING: Must run it periodically to flush out of window hashrate entries +func (r *RedisClient) FlushStaleStats(largeWindow time.Duration) (int64, error) { + now := util.MakeTimestamp() / 1000 + max := fmt.Sprint("(", now-int64(largeWindow/time.Second)) + total := int64(0) + n, err := r.client.ZRemRangeByScore(r.formatKey("hashrate"), "-inf", max).Result() + if err != nil { + return total, err + } + total += n + + keys, err := r.client.Keys(r.formatKey("hashrate", "*")).Result() + if err != nil { + return total, err + } + for _, worker := range keys { + login := strings.Split(worker, ":")[2] + n, err = r.client.ZRemRangeByScore(r.formatKey("hashrate", login), "-inf", max).Result() + if err != nil { + return total, err + } + total += n + } + return total, nil +} + +func (r *RedisClient) CollectStats(smallWindow time.Duration, maxBlocks, maxPayments int64) (map[string]interface{}, error) { + window := int64(smallWindow / time.Second) + stats := make(map[string]interface{}) + + tx := r.client.Multi() + defer tx.Close() + + now := util.MakeTimestamp() / 1000 + + cmds, err := tx.Exec(func() error { + tx.ZRemRangeByScore(r.formatKey("hashrate"), "-inf", fmt.Sprint("(", now-window)) + tx.ZRangeWithScores(r.formatKey("hashrate"), 0, -1) + tx.HGetAllMap(r.formatKey("stats")) + tx.ZRevRangeWithScores(r.formatKey("blocks", "candidates"), 0, -1) + tx.ZRevRangeWithScores(r.formatKey("blocks", "immature"), 0, -1) + tx.ZRevRangeWithScores(r.formatKey("blocks", "matured"), 0, maxBlocks-1) + tx.ZCard(r.formatKey("blocks", "candidates")) + tx.ZCard(r.formatKey("blocks", "immature")) + tx.ZCard(r.formatKey("blocks", "matured")) + tx.ZCard(r.formatKey("payments", "all")) + tx.ZRevRangeWithScores(r.formatKey("payments", "all"), 0, maxPayments-1) + return nil + }) + + if err != nil { + return nil, err + } + + stats["stats"], _ = cmds[2].(*redis.StringStringMapCmd).Result() + candidates := convertCandidateResults(cmds[3].(*redis.ZSliceCmd)) + stats["candidates"] = candidates + stats["candidatesTotal"] = cmds[6].(*redis.IntCmd).Val() + + immature := convertBlockResults(cmds[4].(*redis.ZSliceCmd)) + stats["immature"] = immature + stats["immatureTotal"] = cmds[7].(*redis.IntCmd).Val() + + matured := convertBlockResults(cmds[5].(*redis.ZSliceCmd)) + stats["matured"] = matured + stats["maturedTotal"] = cmds[8].(*redis.IntCmd).Val() + + payments := convertPaymentsResults(cmds[10].(*redis.ZSliceCmd)) + stats["payments"] = payments + stats["paymentsTotal"] = cmds[9].(*redis.IntCmd).Val() + + totalHashrate, miners := convertMinersStats(window, cmds[1].(*redis.ZSliceCmd)) + stats["miners"] = miners + stats["minersTotal"] = len(miners) + stats["hashrate"] = totalHashrate + return stats, nil +} + +func (r *RedisClient) CollectWorkersStats(sWindow, lWindow time.Duration, login string) (map[string]interface{}, error) { + smallWindow := int64(sWindow / time.Second) + largeWindow := int64(lWindow / time.Second) + stats := make(map[string]interface{}) + + tx := r.client.Multi() + defer tx.Close() + + now := util.MakeTimestamp() / 1000 + + cmds, err := tx.Exec(func() error { + tx.ZRemRangeByScore(r.formatKey("hashrate", login), "-inf", fmt.Sprint("(", now-largeWindow)) + tx.ZRangeWithScores(r.formatKey("hashrate", login), 0, -1) + return nil + }) + + if err != nil { + return nil, err + } + + totalHashrate := int64(0) + currentHashrate := int64(0) + online := int64(0) + offline := int64(0) + workers := convertWorkersStats(smallWindow, cmds[1].(*redis.ZSliceCmd)) + + for id, worker := range workers { + timeOnline := now - worker.startedAt + if timeOnline < 600 { + timeOnline = 600 + } + + boundary := timeOnline + if timeOnline >= smallWindow { + boundary = smallWindow + } + worker.HR = worker.HR / boundary + + boundary = timeOnline + if timeOnline >= largeWindow { + boundary = largeWindow + } + worker.TotalHR = worker.TotalHR / boundary + + if worker.LastBeat < (now - smallWindow/2) { + worker.Offline = true + offline++ + } else { + online++ + } + + currentHashrate += worker.HR + totalHashrate += worker.TotalHR + workers[id] = worker + } + stats["workers"] = workers + stats["workersTotal"] = len(workers) + stats["workersOnline"] = online + stats["workersOffline"] = offline + stats["hashrate"] = totalHashrate + stats["currentHashrate"] = currentHashrate + return stats, nil +} + +func convertCandidateResults(raw *redis.ZSliceCmd) []*BlockData { + var result []*BlockData + for _, v := range raw.Val() { + // "nonce:powHash:mixDigest:timestamp:diff:totalShares" + block := BlockData{} + block.Height = int64(v.Score) + fields := strings.Split(v.Member.(string), ":") + block.Nonce = fields[0] + block.PowHash = fields[1] + block.MixDigest = fields[2] + block.Timestamp, _ = strconv.ParseInt(fields[3], 10, 64) + block.Difficulty = fields[4] + block.TotalShares, _ = strconv.ParseInt(fields[5], 10, 64) + block.candidateKey = v.Member.(string) + result = append(result, &block) + } + return result +} + +func convertBlockResults(raw *redis.ZSliceCmd) []*BlockData { + var result []*BlockData + for _, v := range raw.Val() { + // "uncle:orphan:nonce:blockHash:timestamp:diff:totalShares:rewardInWei" + block := BlockData{} + block.Height = int64(v.Score) + fields := strings.Split(v.Member.(string), ":") + block.Uncle, _ = strconv.ParseBool(fields[0]) + block.Orphan, _ = strconv.ParseBool(fields[1]) + block.Nonce = fields[2] + block.Hash = fields[3] + block.Timestamp, _ = strconv.ParseInt(fields[4], 10, 64) + block.Difficulty = fields[5] + block.TotalShares, _ = strconv.ParseInt(fields[6], 10, 64) + block.RewardString = fields[7] + block.ImmatureReward = fields[7] + block.immatureKey = v.Member.(string) + result = append(result, &block) + } + return result +} + +// Build per login workers's total shares map {'rig-1': 12345, 'rig-2': 6789, ...} +// TS => diff, id, ms +func convertWorkersStats(window int64, raw *redis.ZSliceCmd) map[string]Worker { + now := util.MakeTimestamp() / 1000 + workers := make(map[string]Worker) + + for _, v := range raw.Val() { + parts := strings.Split(v.Member.(string), ":") + share, _ := strconv.ParseInt(parts[0], 10, 64) + id := parts[1] + score := int64(v.Score) + worker := workers[id] + + // Add for large window + worker.TotalHR += share + + // Add for small window if matches + if score >= now-window { + worker.HR += share + } + + if worker.LastBeat < score { + worker.LastBeat = score + } + if worker.startedAt > score || worker.startedAt == 0 { + worker.startedAt = score + } + workers[id] = worker + } + return workers +} + +func convertMinersStats(window int64, raw *redis.ZSliceCmd) (int64, map[string]Miner) { + now := util.MakeTimestamp() / 1000 + miners := make(map[string]Miner) + totalHashrate := int64(0) + + for _, v := range raw.Val() { + parts := strings.Split(v.Member.(string), ":") + share, _ := strconv.ParseInt(parts[0], 10, 64) + id := parts[1] + score := int64(v.Score) + miner := miners[id] + miner.HR += share + + if miner.LastBeat < score { + miner.LastBeat = score + } + if miner.startedAt > score || miner.startedAt == 0 { + miner.startedAt = score + } + miners[id] = miner + } + + for id, miner := range miners { + timeOnline := now - miner.startedAt + if timeOnline < 600 { + timeOnline = 600 + } + + boundary := timeOnline + if timeOnline >= window { + boundary = window + } + miner.HR = miner.HR / boundary + + if miner.LastBeat < (now - window/2) { + miner.Offline = true + } + totalHashrate += miner.HR + miners[id] = miner + } + return totalHashrate, miners +} + +func convertPaymentsResults(raw *redis.ZSliceCmd) []map[string]interface{} { + var result []map[string]interface{} + for _, v := range raw.Val() { + tx := make(map[string]interface{}) + tx["timestamp"] = int64(v.Score) + fields := strings.Split(v.Member.(string), ":") + tx["tx"] = fields[0] + // Individual or whole payments row + if len(fields) < 3 { + tx["amount"], _ = strconv.ParseInt(fields[1], 10, 64) + } else { + tx["address"] = fields[1] + tx["amount"], _ = strconv.ParseInt(fields[2], 10, 64) + } + result = append(result, tx) + } + return result +} diff --git a/upstart.conf b/upstart.conf new file mode 100644 index 0000000..3059307 --- /dev/null +++ b/upstart.conf @@ -0,0 +1,28 @@ +# Ether-Pool +description "Ether-Pool" + +env DAEMON=/home/main/src/ether-pool/ether-pool +env CONFIG=/home/main/src/ether-pool/config.json +env NAME=ether-pool + +start on filesystem or runlevel [2345] +stop on runlevel [!2345] + +setuid main +setgid main + +kill signal INT + +respawn +respawn limit 10 5 +umask 022 + +pre-start script + test -x $DAEMON || { stop; exit 0; } +end script + +# Start +script + #test -f /etc/default/$NAME && . /etc/default/$NAME + exec $DAEMON $CONFIG +end script diff --git a/util/util.go b/util/util.go new file mode 100644 index 0000000..d5692df --- /dev/null +++ b/util/util.go @@ -0,0 +1,44 @@ +package util + +import ( + "math/big" + "time" + "strconv" + + "github.com/ethereum/go-ethereum/common" +) + +var pow256 = common.BigPow(2, 256) + +func MakeTimestamp() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) +} + +func GetTargetHex(diff int64) string { + difficulty := big.NewInt(diff) + diff1 := new(big.Int).Div(pow256, difficulty) + return string(common.ToHex(diff1.Bytes())) +} + +func ToHex(n int64) string { + return "0x0" + strconv.FormatInt(n, 16) +} + +func FormatReward(reward *big.Int) string { + return reward.String() +} + +func FormatRatReward(reward *big.Rat) string { + wei := new(big.Rat).SetInt(common.Ether) + reward = reward.Quo(reward, wei) + return reward.FloatString(8) +} + +func StringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/www/.bowerrc b/www/.bowerrc new file mode 100644 index 0000000..959e169 --- /dev/null +++ b/www/.bowerrc @@ -0,0 +1,4 @@ +{ + "directory": "bower_components", + "analytics": false +} diff --git a/www/.editorconfig b/www/.editorconfig new file mode 100644 index 0000000..47c5438 --- /dev/null +++ b/www/.editorconfig @@ -0,0 +1,34 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# editorconfig.org + +root = true + + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +indent_style = space +indent_size = 2 + +[*.js] +indent_style = space +indent_size = 2 + +[*.hbs] +insert_final_newline = false +indent_style = space +indent_size = 2 + +[*.css] +indent_style = space +indent_size = 2 + +[*.html] +indent_style = space +indent_size = 2 + +[*.{diff,md}] +trim_trailing_whitespace = false diff --git a/www/.ember-cli b/www/.ember-cli new file mode 100644 index 0000000..ee64cfe --- /dev/null +++ b/www/.ember-cli @@ -0,0 +1,9 @@ +{ + /** + Ember CLI sends analytics information by default. The data is completely + anonymous, but there are times when you might want to disable this behavior. + + Setting `disableAnalytics` to true will prevent any data from being sent. + */ + "disableAnalytics": false +} diff --git a/www/.gitignore b/www/.gitignore new file mode 100644 index 0000000..86fceae --- /dev/null +++ b/www/.gitignore @@ -0,0 +1,17 @@ +# See http://help.github.com/ignore-files/ for more about ignoring files. + +# compiled output +/dist +/tmp + +# dependencies +/node_modules +/bower_components + +# misc +/.sass-cache +/connect.lock +/coverage/* +/libpeerconnection.log +npm-debug.log +testem.log diff --git a/www/.jshintrc b/www/.jshintrc new file mode 100644 index 0000000..e75f719 --- /dev/null +++ b/www/.jshintrc @@ -0,0 +1,33 @@ +{ + "predef": [ + "document", + "window", + "-Promise", + "moment" + ], + "browser": true, + "boss": true, + "curly": true, + "debug": false, + "devel": true, + "eqeqeq": true, + "evil": true, + "forin": false, + "immed": false, + "laxbreak": false, + "newcap": true, + "noarg": true, + "noempty": false, + "nonew": false, + "nomen": false, + "onevar": false, + "plusplus": false, + "regexp": false, + "undef": true, + "sub": true, + "strict": false, + "white": false, + "eqnull": true, + "esnext": true, + "unused": true +} diff --git a/www/.travis.yml b/www/.travis.yml new file mode 100644 index 0000000..66dd107 --- /dev/null +++ b/www/.travis.yml @@ -0,0 +1,23 @@ +--- +language: node_js +node_js: + - "0.12" + +sudo: false + +cache: + directories: + - node_modules + +before_install: + - export PATH=/usr/local/phantomjs-2.0.0/bin:$PATH + - "npm config set spin false" + - "npm install -g npm@^2" + +install: + - npm install -g bower + - npm install + - bower install + +script: + - npm test diff --git a/www/.watchmanconfig b/www/.watchmanconfig new file mode 100644 index 0000000..5e9462c --- /dev/null +++ b/www/.watchmanconfig @@ -0,0 +1,3 @@ +{ + "ignore_dirs": ["tmp"] +} diff --git a/www/README.md b/www/README.md new file mode 100644 index 0000000..5a3c03f --- /dev/null +++ b/www/README.md @@ -0,0 +1,53 @@ +# Pool + +This README outlines the details of collaborating on this Ember application. +A short introduction of this app could easily go here. + +## Prerequisites + +You will need the following things properly installed on your computer. + +* [Git](http://git-scm.com/) +* [Node.js](http://nodejs.org/) (with NPM) +* [Bower](http://bower.io/) +* [Ember CLI](http://www.ember-cli.com/) +* [PhantomJS](http://phantomjs.org/) + +## Installation + +* `git clone ` this repository +* change into the new directory +* `npm install` +* `bower install` + +## Running / Development + +* `ember server` +* Visit your app at [http://localhost:4200](http://localhost:4200). + +### Code Generators + +Make use of the many generators for code, try `ember help generate` for more details + +### Running Tests + +* `ember test` +* `ember test --server` + +### Building + +* `ember build` (development) +* `ember build --environment production` (production) + +### Deploying + +Specify what it takes to deploy your app. + +## Further Reading / Useful Links + +* [ember.js](http://emberjs.com/) +* [ember-cli](http://www.ember-cli.com/) +* Development Browser Extensions + * [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi) + * [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/) + diff --git a/www/app/app.js b/www/app/app.js new file mode 100644 index 0000000..bf84f87 --- /dev/null +++ b/www/app/app.js @@ -0,0 +1,18 @@ +import Ember from 'ember'; +import Resolver from 'ember-resolver'; +import loadInitializers from 'ember/load-initializers'; +import config from './config/environment'; + +var App; + +Ember.MODEL_FACTORY_INJECTIONS = true; + +App = Ember.Application.extend({ + modulePrefix: config.modulePrefix, + podModulePrefix: config.podModulePrefix, + Resolver: Resolver +}); + +loadInitializers(App, config.modulePrefix); + +export default App; diff --git a/www/app/components/.gitkeep b/www/app/components/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/app/components/active-li.js b/www/app/components/active-li.js new file mode 100644 index 0000000..9a62502 --- /dev/null +++ b/www/app/components/active-li.js @@ -0,0 +1,15 @@ +import Ember from 'ember'; + +export default Ember.Component.extend({ + tagName: 'li', + classNameBindings: ['isActive:active:inactive'], + + router: function(){ + return this.container.lookup('router:main'); + }.property(), + + isActive: function(){ + var currentWhen = this.get('currentWhen'); + return this.get('router').isActive(currentWhen); + }.property('router.url', 'currentWhen') +}); diff --git a/www/app/controllers/.gitkeep b/www/app/controllers/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/app/controllers/account.js b/www/app/controllers/account.js new file mode 100644 index 0000000..79782f7 --- /dev/null +++ b/www/app/controllers/account.js @@ -0,0 +1,16 @@ +import Ember from 'ember'; + +export default Ember.Controller.extend({ + applicationController: Ember.inject.controller('application'), + stats: Ember.computed.reads('applicationController.model.stats'), + + roundPercent: Ember.computed('stats', 'model', { + get() { + var percent = this.get('model.roundShares') / this.get('stats.roundShares'); + if (!percent) { + return 0; + } + return percent; + } + }) +}); diff --git a/www/app/controllers/application.js b/www/app/controllers/application.js new file mode 100644 index 0000000..c546e3c --- /dev/null +++ b/www/app/controllers/application.js @@ -0,0 +1,66 @@ +import Ember from 'ember'; + +export default Ember.Controller.extend({ + height: Ember.computed('model.nodes', { + get() { + var node = this.get('bestNode'); + if (node) { + return node.height; + } + return 0; + } + }), + + roundShares: Ember.computed('model.stats', { + get() { + return parseInt(this.get('model.stats.roundShares')); + } + }), + + difficulty: Ember.computed('model.nodes', { + get() { + var node = this.get('bestNode'); + if (node) { + return node.difficulty; + } + return 0; + } + }), + + immatureTotal: Ember.computed('model', { + get() { + return this.getWithDefault('model.immatureTotal', 0) + this.getWithDefault('model.candidatesTotal', 0); + } + }), + + bestNode: Ember.computed('model.nodes', { + get() { + var node = null; + this.get('model.nodes').forEach(function (n) { + if (!node) { + node = n; + } + if (node.height < n.height) { + node = n; + } + }); + return node; + } + }), + + lastBlockFound: Ember.computed('model', { + get() { + return parseInt(this.get('model.lastBlockFound')) || 0; + } + }), + + roundVariance: Ember.computed('model', { + get() { + var percent = this.get('model.stats.roundShares') / this.get('difficulty'); + if (!percent) { + return 0; + } + return percent.toFixed(2); + } + }) +}); diff --git a/www/app/controllers/index.js b/www/app/controllers/index.js new file mode 100644 index 0000000..12191c2 --- /dev/null +++ b/www/app/controllers/index.js @@ -0,0 +1,17 @@ +import Ember from 'ember'; + +export default Ember.Controller.extend({ + applicationController: Ember.inject.controller('application'), + stats: Ember.computed.reads('applicationController'), + + cachedLogin: Ember.computed('login', { + get() { + return this.get('login') || Ember.$.cookie('login'); + }, + set(key, value) { + Ember.$.cookie('login', value); + this.set('model.login', value); + return value; + } + }) +}); diff --git a/www/app/formats.js b/www/app/formats.js new file mode 100644 index 0000000..7078388 --- /dev/null +++ b/www/app/formats.js @@ -0,0 +1,18 @@ +var hhmmss = { + hour: 'numeric', + minute: 'numeric', + second: 'numeric' +}; + +export default { + time: { + hhmmss: hhmmss + }, + date: { + hhmmss: hhmmss + }, + number: { + EUR: { style: 'currency', currency: 'EUR', minimumFractionDigits: 2, maximumFractionDigits: 2 }, + USD: { style: 'currency', currency: 'USD', minimumFractionDigits: 2, maximumFractionDigits: 2 } + } +}; diff --git a/www/app/helpers/.gitkeep b/www/app/helpers/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/app/helpers/format-balance.js b/www/app/helpers/format-balance.js new file mode 100644 index 0000000..0f5d024 --- /dev/null +++ b/www/app/helpers/format-balance.js @@ -0,0 +1,8 @@ +import Ember from 'ember'; + +export function formatBalance(value) { + value = value * 0.000000001; + return value.toFixed(8); +} + +export default Ember.Helper.helper(formatBalance); diff --git a/www/app/helpers/format-date-locale.js b/www/app/helpers/format-date-locale.js new file mode 100644 index 0000000..db008b5 --- /dev/null +++ b/www/app/helpers/format-date-locale.js @@ -0,0 +1,8 @@ +import Ember from 'ember'; + +export function formatDateLocale(ts) { + var date = new Date(ts * 1000); + return date.toLocaleString(); +} + +export default Ember.Helper.helper(formatDateLocale); diff --git a/www/app/helpers/format-hashrate.js b/www/app/helpers/format-hashrate.js new file mode 100644 index 0000000..11e4524 --- /dev/null +++ b/www/app/helpers/format-hashrate.js @@ -0,0 +1,14 @@ +import Ember from 'ember'; + +export function formatHashrate(params/*, hash*/) { + var hashrate = params[0]; + var i = 0; + var units = ['H', 'KH', 'MH', 'GH', 'TH', 'PH']; + while (hashrate > 1000) { + hashrate = hashrate / 1000; + i++; + } + return hashrate.toFixed(2) + ' ' + units[i]; +} + +export default Ember.Helper.helper(formatHashrate); diff --git a/www/app/helpers/format-tx.js b/www/app/helpers/format-tx.js new file mode 100644 index 0000000..c65a3f6 --- /dev/null +++ b/www/app/helpers/format-tx.js @@ -0,0 +1,7 @@ +import Ember from 'ember'; + +export function formatTx(value) { + return value[0].substring(2, 26) + "..." + value[0].substring(42); +} + +export default Ember.Helper.helper(formatTx); diff --git a/www/app/helpers/seconds-to-ms.js b/www/app/helpers/seconds-to-ms.js new file mode 100644 index 0000000..b190261 --- /dev/null +++ b/www/app/helpers/seconds-to-ms.js @@ -0,0 +1,7 @@ +import Ember from 'ember'; + +export function secondsToMs(value) { + return value * 1000; +} + +export default Ember.Helper.helper(secondsToMs); diff --git a/www/app/helpers/string-to-int.js b/www/app/helpers/string-to-int.js new file mode 100644 index 0000000..da8b774 --- /dev/null +++ b/www/app/helpers/string-to-int.js @@ -0,0 +1,7 @@ +import Ember from 'ember'; + +export function stringToInt(value) { + return parseInt(value); +} + +export default Ember.Helper.helper(stringToInt); diff --git a/www/app/index.html b/www/app/index.html new file mode 100644 index 0000000..9f99f26 --- /dev/null +++ b/www/app/index.html @@ -0,0 +1,35 @@ + + + + + + + Ethereum Mining Pool + + + + {{content-for "head"}} + + + {{content-for "head-footer"}} + + + {{content-for "body"}} + + + + + {{content-for "body-footer"}} + + + + diff --git a/www/app/models/.gitkeep b/www/app/models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/app/models/block.js b/www/app/models/block.js new file mode 100644 index 0000000..7c7ee21 --- /dev/null +++ b/www/app/models/block.js @@ -0,0 +1,30 @@ +import Ember from 'ember'; + +var Block = Ember.Object.extend({ + variance: Ember.computed('difficulty', 'shares', function() { + var percent = this.get('shares') / this.get('difficulty'); + if (!percent) { + return 0; + } + return percent; + }), + + isLucky: Ember.computed('variance', function() { + return this.get('variance') <= 1.0; + }), + + isOk: Ember.computed('orphan', 'uncle', function() { + return !this.get('orphan'); + }), + + formatReward: Ember.computed('reward', function() { + if (!this.get('orphan')) { + var value = parseInt(this.get('reward')) * 0.000000000000000001; + return value.toFixed(6); + } else { + return 0; + } + }) +}); + +export default Block; diff --git a/www/app/models/payment.js b/www/app/models/payment.js new file mode 100644 index 0000000..f07c9a3 --- /dev/null +++ b/www/app/models/payment.js @@ -0,0 +1,10 @@ +import Ember from 'ember'; + +var Payment = Ember.Object.extend({ + formatAmount: Ember.computed('amount', function() { + var value = parseInt(this.get('amount')) * 0.000000001; + return value.toFixed(8); + }) +}); + +export default Payment; diff --git a/www/app/router.js b/www/app/router.js new file mode 100644 index 0000000..4afe192 --- /dev/null +++ b/www/app/router.js @@ -0,0 +1,24 @@ +import Ember from 'ember'; +import config from './config/environment'; + +var Router = Ember.Router.extend({ + location: config.locationType +}); + +Router.map(function() { + this.route('account', { path: '/account/:login' }, function() { + this.route('payouts'); + }); + this.route('not-found'); + + this.route('blocks', function() { + this.route('immature'); + this.route('pending'); + }); + + this.route('help'); + this.route('payments'); + this.route('about'); +}); + +export default Router; diff --git a/www/app/routes/.gitkeep b/www/app/routes/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/app/routes/account.js b/www/app/routes/account.js new file mode 100644 index 0000000..b5744f6 --- /dev/null +++ b/www/app/routes/account.js @@ -0,0 +1,25 @@ +import Ember from 'ember'; +import config from '../config/environment'; + +export default Ember.Route.extend({ + model: function(params) { + var url = config.APP.ApiUrl + 'api/accounts/' + params.login; + return Ember.$.getJSON(url).then(function(data) { + data.login = params.login; + return Ember.Object.create(data); + }); + }, + + setupController: function(controller, model) { + this._super(controller, model); + Ember.run.later(this, this.refresh, 5000); + }, + + actions: { + error(error) { + if (error.status === 404) { + return this.transitionTo('not-found'); + } + } + } +}); diff --git a/www/app/routes/application.js b/www/app/routes/application.js new file mode 100644 index 0000000..293d5ce --- /dev/null +++ b/www/app/routes/application.js @@ -0,0 +1,22 @@ +import Ember from 'ember'; +import config from '../config/environment'; + +export default Ember.Route.extend({ + intl: Ember.inject.service(), + + beforeModel() { + this.get('intl').setLocale('en-us'); + }, + + model: function() { + var url = config.APP.ApiUrl + 'api/stats'; + return Ember.$.getJSON(url).then(function(data) { + return Ember.Object.create(data); + }); + }, + + setupController: function(controller, model) { + this._super(controller, model); + Ember.run.later(this, this.refresh, 5000); + } +}); diff --git a/www/app/routes/blocks.js b/www/app/routes/blocks.js new file mode 100644 index 0000000..975d698 --- /dev/null +++ b/www/app/routes/blocks.js @@ -0,0 +1,32 @@ +import Ember from 'ember'; +import Block from "../models/block"; +import config from '../config/environment'; + +export default Ember.Route.extend({ + model: function() { + var url = config.APP.ApiUrl + 'api/blocks'; + return Ember.$.getJSON(url).then(function(data) { + if (data.candidates) { + data.candidates = data.candidates.map(function(b) { + return Block.create(b); + }); + } + if (data.immature) { + data.immature = data.immature.map(function(b) { + return Block.create(b); + }); + } + if (data.matured) { + data.matured = data.matured.map(function(b) { + return Block.create(b); + }); + } + return data; + }); + }, + + setupController: function(controller, model) { + this._super(controller, model); + Ember.run.later(this, this.refresh, 5000); + } +}); diff --git a/www/app/routes/index.js b/www/app/routes/index.js new file mode 100644 index 0000000..cbee966 --- /dev/null +++ b/www/app/routes/index.js @@ -0,0 +1,11 @@ +import Ember from 'ember'; + +export default Ember.Route.extend({ + actions: { + lookup(login) { + if (!Ember.isEmpty(login)) { + return this.transitionTo('account', login); + } + } + } +}); diff --git a/www/app/routes/payments.js b/www/app/routes/payments.js new file mode 100644 index 0000000..aa52f04 --- /dev/null +++ b/www/app/routes/payments.js @@ -0,0 +1,22 @@ +import Ember from 'ember'; +import Payment from "../models/payment"; +import config from '../config/environment'; + +export default Ember.Route.extend({ + model: function() { + var url = config.APP.ApiUrl + 'api/payments'; + return Ember.$.getJSON(url).then(function(data) { + if (data.payments) { + data.payments = data.payments.map(function(p) { + return Payment.create(p); + }); + } + return data; + }); + }, + + setupController: function(controller, model) { + this._super(controller, model); + Ember.run.later(this, this.refresh, 5000); + } +}); diff --git a/www/app/styles/app.css b/www/app/styles/app.css new file mode 100644 index 0000000..9384155 --- /dev/null +++ b/www/app/styles/app.css @@ -0,0 +1,174 @@ +/* Sticky footer styles +-------------------------------------------------- */ +html { + position: relative; + min-height: 100%; +} +body { + /* Margin bottom by footer height */ + margin-bottom: 60px; + background: url('/bg.png'); +} +.footer { + position: absolute; + bottom: 0; + width: 100%; + /* Set the fixed height of the footer here */ + height: 60px; + background-color: #f5f5f5; +} + + +/* Custom page CSS +-------------------------------------------------- */ +/* Not required for template or sticky footer method. */ + +body { + padding-top: 20px; + padding-bottom: 5px; +} + +body > .container { + padding: 0px 15px 0; +} +.container .text-muted { + margin: 20px 0; +} + +.footer > .container { + padding-right: 15px; + padding-left: 15px; +} + +.jumbotron { + margin: 0; + padding: 40px 0 15px 0; + margin-bottom: 15px; +} + +code { + font-size: 80%; +} + +.navbar-default { + background-color: #04191f; + border-color: #69102b; +} +.navbar-default .navbar-brand { + color: #dadada; +} +.navbar-default .navbar-brand:hover, .navbar-default .navbar-brand:focus { + color: #ffffff; +} +.navbar-default .navbar-text { + color: #dadada; +} +.navbar-default .navbar-nav > li > a { + color: #dadada; +} +.navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus { + color: #ffffff; +} +.navbar-default .navbar-nav > .active > a, .navbar-default .navbar-nav > .active > a:hover, .navbar-default .navbar-nav > .active > a:focus { + color: #ffffff; + background-color: #0a6c9d; +} +.navbar-default .navbar-nav > .open > a, .navbar-default .navbar-nav > .open > a:hover, .navbar-default .navbar-nav > .open > a:focus { + color: #ffffff; + background-color: #69102b; +} +.navbar-default .navbar-toggle { + border-color: #0a6c9d; +} +.navbar-default .navbar-toggle:hover, .navbar-default .navbar-toggle:focus { + background-color: #0a6c9d; +} +.navbar-default .navbar-toggle .icon-bar { + background-color: #dadada; +} +.navbar-default .navbar-collapse, +.navbar-default .navbar-form { + border-color: #dadada; +} +.navbar-default .navbar-link { + color: #dadada; +} +.navbar-default .navbar-link:hover { + color: #ffffff; +} + +@media (max-width: 767px) { + .navbar-default .navbar-nav .open .dropdown-menu > li > a { + color: #dadada; + } + .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { + color: #ffffff; + } + .navbar-default .navbar-nav .open .dropdown-menu > .active > a, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { + color: #ffffff; + background-color: #0a6c9d; + } +} + +span.logo-1 { + font-weight: 700; + color: #1994b8; +} + +span.logo-2 { + font-weight: 300; + color: #FFF; +} + +span.logo-3 { + color: #FFF; + font-weight: 100; +} + +.navbar-collapse { + font-size: 14px; + font-weight: 200; +} + +.note { + margin: 0 0 20px 0; + padding: 15px 30px 15px 15px; + border-left: 5px solid #eee; + border-radius: 5px; +} + +.note-info { + background-color: #E8F6FC; + border-color: #57b5e3; +} + +.note-danger { + background-color: #ff9999; + border-color: #ff0000; +} + +h4.note { + margin-top: 0; + font-weight: 300 !important; +} + +/* Stats */ +.stats { + margin-bottom: 10px; + margin-top: 5px; +} +.stats:last-child{ + width: auto; +} +.stats > h3 > i { + width: 21px; +} +.stats > div{ + padding: 5px 0; +} +.stats > div > .fa { + width: 25px; +} +.stats > div > span:first-of-type{ + font-weight: bold; +} diff --git a/www/app/templates/about.hbs b/www/app/templates/about.hbs new file mode 100644 index 0000000..f6f5979 --- /dev/null +++ b/www/app/templates/about.hbs @@ -0,0 +1,20 @@ +
+ +

Terms of Service

+

By using the pool you accept all possible risks related to experimental software usage.
+ Pool owner can't compensate any irreversible losses, but will do his best to prevent worst case. +

+

Details

+

+

    +
  • Written in Go it's a rocket highly concurrent and low RAM consuming piece of code
  • +
  • High performance proxy
  • +
  • Payouts and block unlocking module
  • +
  • Designed for 100% distributed setup
  • +
  • Strict policy module
  • +
  • Beautiful modern Ember.js frontend
  • +
+

+
diff --git a/www/app/templates/account.hbs b/www/app/templates/account.hbs new file mode 100644 index 0000000..3d5af61 --- /dev/null +++ b/www/app/templates/account.hbs @@ -0,0 +1,51 @@ +{{outlet 'error' }} +
+
+
+
+
+ Immature Balance: {{format-balance model.stats.immature}}
+ Preliminary balance awaiting blocks to mature. +
+
+ Pending Balance: {{format-balance model.stats.balance}}
+ Credited coins awaiting payout. +
+
Total Paid: {{format-balance model.stats.paid}}
+
+
+ {{#if model.stats.lastShare}} +
+ Last Share Submitted: {{format-relative (seconds-to-ms (string-to-int model.stats.lastShare))}} +
+ {{/if}} +
Workers Online: {{format-number model.workersOnline}}
+
Hashrate (30m): {{format-hashrate model.currentHashrate}}
+
Hashrate (3h): {{format-hashrate model.hashrate}}
+
+
+ {{#if model.stats.blocksFound}} +
Blocks Found: {{format-number model.stats.blocksFound}}
+ {{/if}} +
Total Payments: {{format-number model.paymentsTotal}}
+
+ Your Round Share: {{format-number roundPercent style='percent' maximumFractionDigits='6'}}
+ Percent of your contribution to current round. +
+
+
+
+
+ +
+ +
+ +{{outlet}} diff --git a/www/app/templates/account/index.hbs b/www/app/templates/account/index.hbs new file mode 100644 index 0000000..bd2616e --- /dev/null +++ b/www/app/templates/account/index.hbs @@ -0,0 +1,46 @@ +
+ {{#if model.workers}} +

Your Workers

+ + + + + + + + + + + {{#each-in model.workers as |k v|}} + {{#if v.offline}} + + + + + + + {{else}} + + + + + + + {{/if}} + {{/each-in}} + +
IDHashrate (rough, short average)Hashrate (accurate, long average)Last Share
{{k}}{{format-hashrate v.hr}}{{format-hashrate v.hr2}}{{format-relative (seconds-to-ms v.lastBeat)}}
{{k}}{{format-hashrate v.hr}}{{format-hashrate v.hr2}}{{format-relative (seconds-to-ms v.lastBeat)}}
+ {{else}} +

No workers online

+ {{/if}} + + +
diff --git a/www/app/templates/account/payouts.hbs b/www/app/templates/account/payouts.hbs new file mode 100644 index 0000000..a6d2433 --- /dev/null +++ b/www/app/templates/account/payouts.hbs @@ -0,0 +1,25 @@ +
+ {{#if model.payments}} +

Your Latest Payouts

+ + + + + + + + + + {{#each model.payments as |tx|}} + + + + + + {{/each}} + +
TimeTx IDAmount
{{format-date-locale tx.timestamp}}{{tx.tx}}{{format-balance tx.amount}}
+ {{else}} +

No payouts yet

+ {{/if}} +
diff --git a/www/app/templates/application-error.hbs b/www/app/templates/application-error.hbs new file mode 100644 index 0000000..940fc1d --- /dev/null +++ b/www/app/templates/application-error.hbs @@ -0,0 +1,6 @@ +
+ +
diff --git a/www/app/templates/application.hbs b/www/app/templates/application.hbs new file mode 100644 index 0000000..bcacef2 --- /dev/null +++ b/www/app/templates/application.hbs @@ -0,0 +1,48 @@ + + + +{{outlet}} diff --git a/www/app/templates/blocks.hbs b/www/app/templates/blocks.hbs new file mode 100644 index 0000000..d27d901 --- /dev/null +++ b/www/app/templates/blocks.hbs @@ -0,0 +1,23 @@ +
+
+

Pool always pay full block reward including TX fees and uncle rewards.

+ + Block maturity requires up to 520 blocks. + Usually it's less indeed. + +
+
+
+ + {{outlet}} +
diff --git a/www/app/templates/blocks/immature.hbs b/www/app/templates/blocks/immature.hbs new file mode 100644 index 0000000..d154da1 --- /dev/null +++ b/www/app/templates/blocks/immature.hbs @@ -0,0 +1,47 @@ +{{#if model.immature}} +

Immature Blocks

+ + + + + + + + + + + + {{#each model.immature as |block|}} + + + + + + + + {{/each}} + +
HeightBlock HashTime FoundVarianceReward
{{format-number block.height}} + {{#if block.isOk}} + {{block.hash}} + {{else}} + Lost + {{/if}} + {{format-date-locale block.timestamp}} + {{#if block.isLucky}} + {{format-number block.variance style='percent'}} + {{else}} + {{format-number block.variance style='percent'}} + {{/if}} + + {{#if block.uncle}} + {{block.formatReward}} + {{else}} + {{#if block.isOk}} + {{block.formatReward}} + {{/if}} + {{/if}} +
+{{else}} +

No immature blocks yet

+{{/if}} diff --git a/www/app/templates/blocks/index.hbs b/www/app/templates/blocks/index.hbs new file mode 100644 index 0000000..50876ee --- /dev/null +++ b/www/app/templates/blocks/index.hbs @@ -0,0 +1,47 @@ +{{#if model.matured}} +

Matured Blocks

+ + + + + + + + + + + + {{#each model.matured as |block|}} + + + + + + + + {{/each}} + +
HeightBlock HashTime FoundVarianceReward
{{format-number block.height}} + {{#if block.isOk}} + {{block.hash}} + {{else}} + Lost + {{/if}} + {{format-date-locale block.timestamp}} + {{#if block.isLucky}} + {{format-number block.variance style='percent'}} + {{else}} + {{format-number block.variance style='percent'}} + {{/if}} + + {{#if block.uncle}} + {{block.formatReward}} + {{else}} + {{#if block.isOk}} + {{block.formatReward}} + {{/if}} + {{/if}} +
+{{else}} +

No matured blocks yet

+{{/if}} diff --git a/www/app/templates/blocks/pending.hbs b/www/app/templates/blocks/pending.hbs new file mode 100644 index 0000000..0686d7f --- /dev/null +++ b/www/app/templates/blocks/pending.hbs @@ -0,0 +1,29 @@ +{{#if model.candidates}} +

Recently Found Blocks

+ + + + + + + + + + {{#each model.candidates as |block|}} + + + + + + {{/each}} + +
HeightTime FoundVariance
{{format-number block.height}}{{format-date-locale block.timestamp}} + {{#if block.isLucky}} + {{format-number block.variance style='percent'}} + {{else}} + {{format-number block.variance style='percent'}} + {{/if}} +
+{{else}} +

No new blocks yet

+{{/if}} diff --git a/www/app/templates/components/.gitkeep b/www/app/templates/components/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/app/templates/components/active-li.hbs b/www/app/templates/components/active-li.hbs new file mode 100644 index 0000000..889d9ee --- /dev/null +++ b/www/app/templates/components/active-li.hbs @@ -0,0 +1 @@ +{{yield}} diff --git a/www/app/templates/help.hbs b/www/app/templates/help.hbs new file mode 100644 index 0000000..e8bf29f --- /dev/null +++ b/www/app/templates/help.hbs @@ -0,0 +1,39 @@ +
+ +

In order to mine on this pool you need to have an + ethminer installation + pointed to
http://example.net:8888/miner/YOUR_ETH_ADDRESS/RIG_ID +

+
+
YOUR_ETH_ADDRESS
+
This is your address for payouts, generate one with geth, or mine directly to exchange like + Poloniex + or Bittrex.
+ Example: 0xb85150eb365e7df0941f0cf08235f987ba91506a. +
+
RIG_ID
+
+ ID of your farm to distinguish it from your other rig. If you have just one rig, feel free to omit this param. + This param must be short alphanumeric string with optional dashes and underscores.
+ Example: rig-1 +
+
+

+ Full example: + ethminer -F http://example.net:8888/miner/0xb85150eb365e7df0941f0cf08235f987ba91506a/myfarm -G --farm-recheck 200.
+ Hint: If you are compiling ethminer from latest source, please also use + extra --disable-submit-hashrate option. +

+ +

Mining with Ether-Proxy

+

Use stable (green) release of my Ethereum Solo/Pool Mining Proxy.

+ +

Advice

+

CPU mining is not recommended.

+

Terms of Service

+

By using the pool you accept all possible risks related to experimental software usage.
+ Pool owner can't compensate any irreversible losses, but will do his best to prevent worst case. +

+
diff --git a/www/app/templates/index.hbs b/www/app/templates/index.hbs new file mode 100644 index 0000000..e60cf41 --- /dev/null +++ b/www/app/templates/index.hbs @@ -0,0 +1,46 @@ +
+
+
+
+

+ Open Ethereum Pool +

+ Min. payout threshold: 0.5 Ether, Payouts run twice per day.
+ PROP Stable and profitable pool with regular payouts. +
+
+
Miners Online: {{format-number stats.model.minersTotal}}
+
Pool Hash Rate: {{format-hashrate stats.model.hashrate}}
+
Pool Fee: 1%
+
+
+
Network Difficulty: {{format-number stats.difficulty}}
+
Blockchain Height: {{format-number stats.height}}
+ {{#if stats.model.stats.lastBlockFound}} +
Last Block Found: {{format-relative (seconds-to-ms stats.model.stats.lastBlockFound)}}
+ {{/if}} +
Round Variance: {{format-number stats.roundVariance style='percent'}}
+
+
+
+
+ +
+
+

Your Stats & Payment History

+
+ {{input value=cachedLogin class="form-control" placeholder="Enter Your Ethereum Address"}} + + + +
+
+
+
+

+ ethminer.exe -F http://example.net:8888/miner/<address>/<worker> -G +

+
+
diff --git a/www/app/templates/not-found.hbs b/www/app/templates/not-found.hbs new file mode 100644 index 0000000..29844b1 --- /dev/null +++ b/www/app/templates/not-found.hbs @@ -0,0 +1,6 @@ +
+ +
diff --git a/www/app/templates/payments.hbs b/www/app/templates/payments.hbs new file mode 100644 index 0000000..59711f4 --- /dev/null +++ b/www/app/templates/payments.hbs @@ -0,0 +1,33 @@ +
+
+

Pool always pay tx fees from it's own pocket for now.

+ Total payments sent: {{model.paymentsTotal}} +
+
+
+ {{#if model.payments}} +

Latest Payouts

+ + + + + + + + + + + {{#each model.payments as |tx|}} + + + + + + + {{/each}} + +
TimeAmountAddressTx ID
{{format-date-locale tx.timestamp}}{{format-number tx.formatAmount}}{{tx.address}}{{format-tx tx.tx}}
+ {{else}} +

No payouts yet

+ {{/if}} +
diff --git a/www/bower.json b/www/bower.json new file mode 100644 index 0000000..7b4fd82 --- /dev/null +++ b/www/bower.json @@ -0,0 +1,16 @@ +{ + "name": "pool", + "dependencies": { + "ember": "~2.4.3", + "ember-cli-shims": "0.1.1", + "ember-cli-test-loader": "0.2.2", + "ember-qunit-notifications": "0.1.0" + }, + "resolutions": { + "ember": "2.2.0" + }, + "devDependencies": { + "bootstrap": "~3.3.6", + "font-awesome": "fontawesome#~4.5.0" + } +} diff --git a/www/build.sh b/www/build.sh new file mode 100755 index 0000000..32d575c --- /dev/null +++ b/www/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./node_modules/.bin/ember build --environment production diff --git a/www/config/environment.js b/www/config/environment.js new file mode 100644 index 0000000..7c12a7e --- /dev/null +++ b/www/config/environment.js @@ -0,0 +1,54 @@ +/* jshint node: true */ + +module.exports = function(environment) { + var ENV = { + modulePrefix: 'pool', + environment: environment, + baseURL: '/', + locationType: 'hash', + EmberENV: { + FEATURES: { + // Here you can enable experimental features on an ember canary build + // e.g. 'with-controller': true + } + }, + + APP: { + // Here you can pass flags/options to your application instance + // when it is created + ApiUrl: '//example.net/' + }, + + intl: { + defaultLocale: 'en-us', + locales: ['en-us'] + } + }; + + if (environment === 'development') { + ENV.APP.ApiUrl = 'http://localhost:8080/' + // ENV.APP.LOG_RESOLVER = true; + // ENV.APP.LOG_ACTIVE_GENERATION = true; + // ENV.APP.LOG_TRANSITIONS = true; + // ENV.APP.LOG_TRANSITIONS_INTERNAL = true; + // ENV.APP.LOG_VIEW_LOOKUPS = true; + } + + if (environment === 'test') { + // Testem prefers this... + ENV.baseURL = '/'; + ENV.locationType = 'none'; + + // keep test console output quieter + ENV.APP.LOG_ACTIVE_GENERATION = false; + ENV.APP.LOG_VIEW_LOOKUPS = false; + + ENV.APP.rootElement = '#ember-testing'; + } + + if (environment === 'production') { + ENV.APP.API_URL = 'http://localhost:8080' + } + + return ENV; +}; diff --git a/www/ember-cli-build.js b/www/ember-cli-build.js new file mode 100644 index 0000000..7227213 --- /dev/null +++ b/www/ember-cli-build.js @@ -0,0 +1,33 @@ +/* global require, module */ +var EmberApp = require('ember-cli/lib/broccoli/ember-app'); +var Funnel = require('broccoli-funnel'); + +module.exports = function(defaults) { + var app = new EmberApp(defaults, { + // Add options here + }); + + // Use `app.import` to add additional libraries to the generated + // output files. + // + // If you need to use different assets in different + // environments, specify an object as the first parameter. That + // object's keys should be the environment name and the values + // should be the asset to use in that environment. + // + // If the library that you are including contains AMD or ES6 + // modules that you would like to import into your application + // please specify an object with the list of modules as keys + // along with the exports of each module as its value. + app.import('bower_components/bootstrap/dist/css/bootstrap.min.css') + app.import('bower_components/bootstrap/dist/js/bootstrap.min.js'); + app.import('bower_components/font-awesome/css/font-awesome.min.css') + + var extraAssets = new Funnel('bower_components/font-awesome/fonts', { + srcDir: '/', + include: ['**/*.ttf', '**/*.woff', '**/*.woff2'], + destDir: '/fonts' + }); + + return app.toTree(extraAssets); +}; diff --git a/www/package.json b/www/package.json new file mode 100644 index 0000000..6f9146e --- /dev/null +++ b/www/package.json @@ -0,0 +1,43 @@ +{ + "name": "pool", + "version": "0.0.0", + "description": "Small description for pool goes here", + "private": true, + "directories": { + "doc": "doc", + "test": "tests" + }, + "scripts": { + "build": "ember build", + "start": "ember server", + "test": "ember test" + }, + "repository": "", + "engines": { + "node": ">= 0.10.0" + }, + "author": "", + "license": "MIT", + "devDependencies": { + "broccoli-asset-rev": "^2.4.2", + "broccoli-funnel": "^1.0.0", + "ember-ajax": "0.7.1", + "ember-cli": "2.4.3", + "ember-cli-app-version": "^1.0.0", + "ember-cli-babel": "^5.1.6", + "ember-cli-dependency-checker": "^1.2.0", + "ember-cli-htmlbars": "^1.0.3", + "ember-cli-htmlbars-inline-precompile": "^0.3.1", + "ember-cli-inject-live-reload": "^1.4.0", + "ember-cli-qunit": "^1.4.0", + "ember-cli-release": "0.2.8", + "ember-cli-sri": "^2.1.0", + "ember-cli-uglify": "^1.2.0", + "ember-export-application-global": "^1.0.5", + "ember-load-initializers": "^0.5.1", + "ember-resolver": "^2.0.3", + "loader.js": "^4.0.1", + "ember-intl": "2.0.0", + "ember-cli-cookie": "^0.1.1" + } +} diff --git a/www/public/bg.png b/www/public/bg.png new file mode 100644 index 0000000..6736f39 Binary files /dev/null and b/www/public/bg.png differ diff --git a/www/public/crossdomain.xml b/www/public/crossdomain.xml new file mode 100644 index 0000000..0c16a7a --- /dev/null +++ b/www/public/crossdomain.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + diff --git a/www/public/favicon.ico b/www/public/favicon.ico new file mode 100644 index 0000000..85a4d9f Binary files /dev/null and b/www/public/favicon.ico differ diff --git a/www/public/robots.txt b/www/public/robots.txt new file mode 100644 index 0000000..f591645 --- /dev/null +++ b/www/public/robots.txt @@ -0,0 +1,3 @@ +# http://www.robotstxt.org +User-agent: * +Disallow: diff --git a/www/testem.json b/www/testem.json new file mode 100644 index 0000000..0f35392 --- /dev/null +++ b/www/testem.json @@ -0,0 +1,12 @@ +{ + "framework": "qunit", + "test_page": "tests/index.html?hidepassed", + "disable_watching": true, + "launch_in_ci": [ + "PhantomJS" + ], + "launch_in_dev": [ + "PhantomJS", + "Chrome" + ] +} diff --git a/www/tests/.jshintrc b/www/tests/.jshintrc new file mode 100644 index 0000000..6ec0b7c --- /dev/null +++ b/www/tests/.jshintrc @@ -0,0 +1,52 @@ +{ + "predef": [ + "document", + "window", + "location", + "setTimeout", + "$", + "-Promise", + "define", + "console", + "visit", + "exists", + "fillIn", + "click", + "keyEvent", + "triggerEvent", + "find", + "findWithAssert", + "wait", + "DS", + "andThen", + "currentURL", + "currentPath", + "currentRouteName" + ], + "node": false, + "browser": false, + "boss": true, + "curly": true, + "debug": false, + "devel": false, + "eqeqeq": true, + "evil": true, + "forin": false, + "immed": false, + "laxbreak": false, + "newcap": true, + "noarg": true, + "noempty": false, + "nonew": false, + "nomen": false, + "onevar": false, + "plusplus": false, + "regexp": false, + "undef": true, + "sub": true, + "strict": false, + "white": false, + "eqnull": true, + "esnext": true, + "unused": true +} diff --git a/www/tests/helpers/resolver.js b/www/tests/helpers/resolver.js new file mode 100644 index 0000000..28f4ece --- /dev/null +++ b/www/tests/helpers/resolver.js @@ -0,0 +1,11 @@ +import Resolver from 'ember/resolver'; +import config from '../../config/environment'; + +var resolver = Resolver.create(); + +resolver.namespace = { + modulePrefix: config.modulePrefix, + podModulePrefix: config.podModulePrefix +}; + +export default resolver; diff --git a/www/tests/helpers/start-app.js b/www/tests/helpers/start-app.js new file mode 100644 index 0000000..0f7aab1 --- /dev/null +++ b/www/tests/helpers/start-app.js @@ -0,0 +1,18 @@ +import Ember from 'ember'; +import Application from '../../app'; +import config from '../../config/environment'; + +export default function startApp(attrs) { + var application; + + var attributes = Ember.merge({}, config.APP); + attributes = Ember.merge(attributes, attrs); // use defaults, but you can override; + + Ember.run(function() { + application = Application.create(attributes); + application.setupForTesting(); + application.injectTestHelpers(); + }); + + return application; +} diff --git a/www/tests/index.html b/www/tests/index.html new file mode 100644 index 0000000..38c0c66 --- /dev/null +++ b/www/tests/index.html @@ -0,0 +1,33 @@ + + + + + + Pool Tests + + + + {{content-for 'head'}} + {{content-for 'test-head'}} + + + + + + {{content-for 'head-footer'}} + {{content-for 'test-head-footer'}} + + + + {{content-for 'body'}} + {{content-for 'test-body'}} + + + + + + + {{content-for 'body-footer'}} + {{content-for 'test-body-footer'}} + + diff --git a/www/tests/integration/components/active-li-test.js b/www/tests/integration/components/active-li-test.js new file mode 100644 index 0000000..92b38f7 --- /dev/null +++ b/www/tests/integration/components/active-li-test.js @@ -0,0 +1,26 @@ +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('active-li', 'Integration | Component | active li', { + integration: true +}); + +test('it renders', function(assert) { + assert.expect(2); + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... }); + + this.render(hbs`{{active-li}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage: + this.render(hbs` + {{#active-li}} + template block text + {{/active-li}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/www/tests/test-helper.js b/www/tests/test-helper.js new file mode 100644 index 0000000..e6cfb70 --- /dev/null +++ b/www/tests/test-helper.js @@ -0,0 +1,6 @@ +import resolver from './helpers/resolver'; +import { + setResolver +} from 'ember-qunit'; + +setResolver(resolver); diff --git a/www/tests/unit/.gitkeep b/www/tests/unit/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/www/tests/unit/controllers/account-test.js b/www/tests/unit/controllers/account-test.js new file mode 100644 index 0000000..b3cf637 --- /dev/null +++ b/www/tests/unit/controllers/account-test.js @@ -0,0 +1,12 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:account', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + var controller = this.subject(); + assert.ok(controller); +}); diff --git a/www/tests/unit/controllers/application-test.js b/www/tests/unit/controllers/application-test.js new file mode 100644 index 0000000..eb711c9 --- /dev/null +++ b/www/tests/unit/controllers/application-test.js @@ -0,0 +1,12 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:application', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + var controller = this.subject(); + assert.ok(controller); +}); diff --git a/www/tests/unit/controllers/blocks-test.js b/www/tests/unit/controllers/blocks-test.js new file mode 100644 index 0000000..8b3a0b1 --- /dev/null +++ b/www/tests/unit/controllers/blocks-test.js @@ -0,0 +1,12 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:blocks', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + var controller = this.subject(); + assert.ok(controller); +}); diff --git a/www/tests/unit/controllers/index-test.js b/www/tests/unit/controllers/index-test.js new file mode 100644 index 0000000..3625931 --- /dev/null +++ b/www/tests/unit/controllers/index-test.js @@ -0,0 +1,12 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:index', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + var controller = this.subject(); + assert.ok(controller); +}); diff --git a/www/tests/unit/helpers/format-balance-test.js b/www/tests/unit/helpers/format-balance-test.js new file mode 100644 index 0000000..09a5a5d --- /dev/null +++ b/www/tests/unit/helpers/format-balance-test.js @@ -0,0 +1,10 @@ +import { formatBalance } from '../../../helpers/format-balance'; +import { module, test } from 'qunit'; + +module('Unit | Helper | format balance'); + +// Replace this with your real tests. +test('it works', function(assert) { + var result = formatBalance(42); + assert.ok(result); +}); diff --git a/www/tests/unit/helpers/format-date-locale-test.js b/www/tests/unit/helpers/format-date-locale-test.js new file mode 100644 index 0000000..9e3a590 --- /dev/null +++ b/www/tests/unit/helpers/format-date-locale-test.js @@ -0,0 +1,10 @@ +import { formatDateLocale } from '../../../helpers/format-date-locale'; +import { module, test } from 'qunit'; + +module('Unit | Helper | format date locale'); + +// Replace this with your real tests. +test('it works', function(assert) { + var result = formatDateLocale(42); + assert.ok(result); +}); diff --git a/www/tests/unit/helpers/format-date-test.js b/www/tests/unit/helpers/format-date-test.js new file mode 100644 index 0000000..4cb7ba2 --- /dev/null +++ b/www/tests/unit/helpers/format-date-test.js @@ -0,0 +1,10 @@ +import { formatDate } from '../../../helpers/format-date'; +import { module, test } from 'qunit'; + +module('Unit | Helper | format date'); + +// Replace this with your real tests. +test('it works', function(assert) { + var result = formatDate(42); + assert.ok(result); +}); diff --git a/www/tests/unit/helpers/format-tx-test.js b/www/tests/unit/helpers/format-tx-test.js new file mode 100644 index 0000000..64db03d --- /dev/null +++ b/www/tests/unit/helpers/format-tx-test.js @@ -0,0 +1,10 @@ +import { formatTx } from '../../../helpers/format-tx'; +import { module, test } from 'qunit'; + +module('Unit | Helper | format tx'); + +// Replace this with your real tests. +test('it works', function(assert) { + var result = formatTx(42); + assert.ok(result); +}); diff --git a/www/tests/unit/helpers/from-now-test.js b/www/tests/unit/helpers/from-now-test.js new file mode 100644 index 0000000..68d78f9 --- /dev/null +++ b/www/tests/unit/helpers/from-now-test.js @@ -0,0 +1,10 @@ +import { fromNow } from '../../../helpers/from-now'; +import { module, test } from 'qunit'; + +module('Unit | Helper | from now'); + +// Replace this with your real tests. +test('it works', function(assert) { + var result = fromNow(42); + assert.ok(result); +}); diff --git a/www/tests/unit/models/block-test.js b/www/tests/unit/models/block-test.js new file mode 100644 index 0000000..d89fa89 --- /dev/null +++ b/www/tests/unit/models/block-test.js @@ -0,0 +1,12 @@ +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('block', 'Unit | Model | block', { + // Specify the other units that are required for this test. + needs: [] +}); + +test('it exists', function(assert) { + var model = this.subject(); + // var store = this.store(); + assert.ok(!!model); +}); diff --git a/www/tests/unit/models/payment-test.js b/www/tests/unit/models/payment-test.js new file mode 100644 index 0000000..953da10 --- /dev/null +++ b/www/tests/unit/models/payment-test.js @@ -0,0 +1,12 @@ +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('payment', 'Unit | Model | payment', { + // Specify the other units that are required for this test. + needs: [] +}); + +test('it exists', function(assert) { + var model = this.subject(); + // var store = this.store(); + assert.ok(!!model); +}); diff --git a/www/tests/unit/routes/about-test.js b/www/tests/unit/routes/about-test.js new file mode 100644 index 0000000..0215072 --- /dev/null +++ b/www/tests/unit/routes/about-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:about', 'Unit | Route | about', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/application-test.js b/www/tests/unit/routes/application-test.js new file mode 100644 index 0000000..7caba15 --- /dev/null +++ b/www/tests/unit/routes/application-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:application', 'Unit | Route | application', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/blocks-test.js b/www/tests/unit/routes/blocks-test.js new file mode 100644 index 0000000..87567a4 --- /dev/null +++ b/www/tests/unit/routes/blocks-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:blocks', 'Unit | Route | blocks', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/blocks/index-test.js b/www/tests/unit/routes/blocks/index-test.js new file mode 100644 index 0000000..0068ed5 --- /dev/null +++ b/www/tests/unit/routes/blocks/index-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:blocks/index', 'Unit | Route | blocks/index', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/blocks/pending-test.js b/www/tests/unit/routes/blocks/pending-test.js new file mode 100644 index 0000000..60d4c10 --- /dev/null +++ b/www/tests/unit/routes/blocks/pending-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:blocks/pending', 'Unit | Route | blocks/pending', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/error-test.js b/www/tests/unit/routes/error-test.js new file mode 100644 index 0000000..eef0953 --- /dev/null +++ b/www/tests/unit/routes/error-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:error', 'Unit | Route | error', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/help-test.js b/www/tests/unit/routes/help-test.js new file mode 100644 index 0000000..a1e2a26 --- /dev/null +++ b/www/tests/unit/routes/help-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:help', 'Unit | Route | help', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/index-test.js b/www/tests/unit/routes/index-test.js new file mode 100644 index 0000000..18c8f13 --- /dev/null +++ b/www/tests/unit/routes/index-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:index', 'Unit | Route | index', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/miner-test.js b/www/tests/unit/routes/miner-test.js new file mode 100644 index 0000000..6614e12 --- /dev/null +++ b/www/tests/unit/routes/miner-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:miner', 'Unit | Route | miner', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/payments-test.js b/www/tests/unit/routes/payments-test.js new file mode 100644 index 0000000..851b9d1 --- /dev/null +++ b/www/tests/unit/routes/payments-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:payments', 'Unit | Route | payments', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/stats-payouts-test.js b/www/tests/unit/routes/stats-payouts-test.js new file mode 100644 index 0000000..2ec2184 --- /dev/null +++ b/www/tests/unit/routes/stats-payouts-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:stats-payouts', 'Unit | Route | stats payouts', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/tests/unit/routes/stats/not-found-test.js b/www/tests/unit/routes/stats/not-found-test.js new file mode 100644 index 0000000..30c0744 --- /dev/null +++ b/www/tests/unit/routes/stats/not-found-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:stats/not-found', 'Unit | Route | stats/not found', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + var route = this.subject(); + assert.ok(route); +}); diff --git a/www/translations/en-us.yaml b/www/translations/en-us.yaml new file mode 100644 index 0000000..5c7ef82 --- /dev/null +++ b/www/translations/en-us.yaml @@ -0,0 +1,5 @@ +product: + info: '{product} will cost {price, number, USD} if ordered by {deadline, date, time}' + title: 'Hello world!' + html: + info: '{product} will cost {price, number, USD} if ordered by {deadline, date, time}' diff --git a/www/vendor/.gitkeep b/www/vendor/.gitkeep new file mode 100644 index 0000000..e69de29