summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBas Wijnen <wijnen@debian.org>2014-02-05 02:14:06 +0100
committerBas Wijnen <wijnen@debian.org>2014-02-05 02:14:06 +0100
commit41579cd44e4797bba2faa68fa4424a01b97b993c (patch)
tree205b85bc26b44806aadde75307611ce5d801d77d
Import cura-engine_14.01.orig.tar.xz
[dgit import orig cura-engine_14.01.orig.tar.xz]
-rw-r--r--.gitignore16
-rw-r--r--LICENSE661
-rw-r--r--Makefile52
-rw-r--r--README.md81
-rw-r--r--_tests/runtest.py14
-rw-r--r--_tests/testModel.stlbin0 -> 28284 bytes
-rw-r--r--bridge.cpp56
-rw-r--r--bridge.h9
-rw-r--r--clipper/License.txt26
-rw-r--r--clipper/README365
-rw-r--r--clipper/clipper.cpp4545
-rw-r--r--clipper/clipper.hpp396
-rw-r--r--comb.cpp256
-rw-r--r--comb.h41
-rw-r--r--fffProcessor.h525
-rw-r--r--gcodeExport.cpp578
-rw-r--r--gcodeExport.h212
-rw-r--r--infill.cpp79
-rw-r--r--infill.h10
-rw-r--r--inset.cpp45
-rw-r--r--inset.h11
-rw-r--r--layerPart.cpp90
-rw-r--r--layerPart.h27
-rw-r--r--main.cpp199
-rw-r--r--modelFile/modelFile.cpp177
-rw-r--r--modelFile/modelFile.h110
-rw-r--r--multiVolumes.h59
-rw-r--r--optimizedModel.cpp141
-rw-r--r--optimizedModel.h77
-rw-r--r--pathOrderOptimizer.cpp95
-rw-r--r--pathOrderOptimizer.h35
-rw-r--r--polygonOptimizer.cpp47
-rw-r--r--polygonOptimizer.h11
-rw-r--r--raft.cpp19
-rw-r--r--raft.h9
-rw-r--r--settings.cpp101
-rw-r--r--settings.h130
-rw-r--r--skin.cpp116
-rw-r--r--skin.h10
-rw-r--r--skirt.cpp39
-rw-r--r--skirt.h9
-rw-r--r--sliceDataStorage.h83
-rw-r--r--slicer.cpp405
-rw-r--r--slicer.h160
-rw-r--r--support.cpp188
-rw-r--r--support.h30
-rw-r--r--timeEstimate.cpp305
-rw-r--r--timeEstimate.h76
-rw-r--r--utils/floatpoint.h83
-rw-r--r--utils/gettime.cpp14
-rw-r--r--utils/gettime.h33
-rw-r--r--utils/intpoint.h183
-rw-r--r--utils/logoutput.cpp36
-rw-r--r--utils/logoutput.h12
-rw-r--r--utils/polygon.h301
-rw-r--r--utils/polygondebug.h75
56 files changed, 11463 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cd35f5a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,16 @@
+*.tar.bz2
+*.tar.gz
+*.7z
+*.pyc
+*.zip
+*.exe
+.idea
+.DS_Store
+_bin
+_obj
+*.depend
+*.o
+.*.swp
+*.gcode
+CuraEngine
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..dba13ed
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7afb443
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,52 @@
+#
+# Makefile for CuraEngine
+#
+
+# simplest working invocation to compile it
+#g++ main.cpp modelFile/modelFile.cpp clipper/clipper.cpp -I. -o CuraEngine
+
+CXX ?= g++
+CFLAGS += -I. -c -Wall -Wextra -O3 -fomit-frame-pointer
+# also include debug symbols
+#CFLAGS+=-ggdb
+LDFLAGS +=
+SOURCES = bridge.cpp comb.cpp gcodeExport.cpp infill.cpp inset.cpp layerPart.cpp main.cpp optimizedModel.cpp pathOrderOptimizer.cpp polygonOptimizer.cpp raft.cpp settings.cpp skin.cpp skirt.cpp slicer.cpp support.cpp timeEstimate.cpp
+SOURCES += clipper/clipper.cpp modelFile/modelFile.cpp utils/gettime.cpp utils/logoutput.cpp
+OBJECTS = $(SOURCES:.cpp=.o)
+EXECUTABLE = ./CuraEngine
+UNAME := $(shell uname)
+
+ifeq ($(UNAME), Linux)
+ OPEN_HTML=firefox
+ LDFLAGS += --static
+endif
+ifeq ($(UNAME), Darwin)
+ OPEN_HTML=open
+ #For MacOS force to build
+ CFLAGS += -force_cpusubtype_ALL -mmacosx-version-min=10.6 -arch x86_64 -arch i386
+ LDFLAGS += -force_cpusubtype_ALL -mmacosx-version-min=10.6 -arch x86_64 -arch i386
+endif
+ifeq ($(UNAME), MINGW32_NT-6.1)
+ #For windows make it large address aware, which allows the process to use more then 2GB of memory.
+ EXECUTABLE := $(EXECUTABLE).exe
+ CFLAGS += -march=pentium4
+ LDFLAGS += -Wl,--large-address-aware -lm
+endif
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+ $(CXX) $(LDFLAGS) $(OBJECTS) -o $@
+
+.cpp.o:
+ $(CXX) $(CFLAGS) $< -o $@
+
+test: $(EXECUTABLE)
+ python _tests/runtest.py $(abspath $(EXECUTABLE))
+
+## clean stuff
+clean:
+ rm -f $(EXECUTABLE) $(OBJECTS)
+
+help:
+ @cat Makefile |grep \#\#| grep \: |cut -d\# -f3
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6e89555
--- /dev/null
+++ b/README.md
@@ -0,0 +1,81 @@
+CuraEngine
+==========
+The CuraEngine is a C++ console application for 3D printing GCode generation. It has been made as a better and faster alternative to the old Skeinforge engine.
+
+The CuraEngine is pure C++ and uses Clipper from http://www.angusj.com/delphi/clipper.php
+There are no external dependences and Clipper is included in the source code without modifications.
+
+This is just a console application for GCode generation. For a full graphical application look at https://github.com/daid/Cura with is the graphical frontend for CuraEngine.
+
+The CuraEngine can be used seperately or in other applications. Feel free to add it to your application. But to take note of the License.
+
+License
+=======
+CuraEngine is released under terms of the AGPLv3 License.
+Terms of the license can be found in the LICENSE file. Or at http://www.gnu.org/licenses/agpl.html
+
+But in general it boils down to: You need to share the source of any CuraEngine modifications if you make an application with the CuraEngine. (Even if you make a web-based slicer, you still need to share the source!)
+
+
+Internals
+=========
+
+The Cura Engine is structured as mainly .h files. This is not standard for an C++ project. However, using less cpp files makes the optimizer work harder and removes linking error issues. It's partialy a result of lazyness but also for optimalizations.
+
+The .h files contain different steps called from the main.cpp file. The main.cpp file contains the global slicing logic.
+
+The slicing process follows the following global steps:
+* Load 3D model
+* Analize and fix 3D model
+* Slice 3D model into 2D layers
+* Build LayerParts from sliced layers
+* Generate Insets
+* Generate up/down skins areas
+* Generate sparse infill areas
+* Generate GCode for each layer
+
+Each step has more logic in it. But this is a general overview.
+All data for the engine is stored in the "SliceDataStorage". It's important to remember that only the data from the previous step is valid.
+
+Coordinates are stored in 64bit integers as microns in the code. So if you see a value of 1000 then this mean 1mm of distance. This is because Clipper works on 64bit integers and microns give a high enough resolution without limiting the size too much. Note that there are some bits and pieces of code that need to be careful about 64bit overflows, especially calculating lengths sqrt(x*x+y*y) can cause overflows.
+
+OptimizedModel
+==============
+The OptimizedModel is a 3D model stored with vertex<->face relations. This gives touching face relations which are used later on to slice into layers faster.
+
+Slicer
+======
+While usually the whole GCode generation process is called Slicing. The slicer in the CuraEngine is the piece of code that generates layers. Each layer has closed 2D polygons.
+These polygons are generated in a 2 step process. First all triangles are cut into lines per layer, for each layer a "line segment" is added to that layer.
+Next all these line-segments are connected to eachother to make Polygons. The vertex<->face relations of the OptimizedModel help to make this process fast, as there is a huge chance that 2 connecting faces also make 2 connecting line-segments.
+This code also fixes up small holes in the 3D model, so your model doesn't need to be perfect Manifold. It also accounts for incorrect normals, so it can flip around line-segments to fit end-to-end.
+
+After the Slicer we have closed Polygons which can be used in Clipper, as Clipper can only opperate on closed 2D polygons.
+
+LayerParts
+==========
+An important concept to grasp is the LayerParts. LayerParts are seperate parts inside a single layer. For example, if you have a cube. Then each layer has a single LayerPart. However, if you have a table, then the layers which build the legs have a LayerPart per leg, and thus there will be 4 LayerParts.
+A LayerPart is a seperated area inside a single layer which does not touch any other LayerParts. Most operations run on LayerParts as it reduces the amount of data to process. During GCode generation handling each LayerPart as an own step makes sure you never travel between LayerParts and thus reducing the amount of external travel.
+LayerParts are generated after the Slicer step.
+
+To generate the LayerParts Clipper is used. A Clipper union with extended results gives a list of Polygons with holes in them. Each polygon is a LayerPart, and the holes are added to this LayerPart.
+
+
+Insets
+======
+Insets are also called "Perimeters" or "Loops" sometimes. Generating the insets is only a small bit of code, as Clipper does most of the heavy lifting.
+
+Up/Down skin
+============
+The skin code generates the fully filled areas, it does this with some heavy boolean Clipper action. The skin step uses data from different layers to get the job done. Check the code for details.
+The sparse infill area code is almost the same as the skin code. With the difference that it keeps the other areas and uses different offsets.
+
+Note that these steps generate the areas, not the actual infill lines. The infill line paths are generated later on. So the result of this step are list of Polygons which are the areas that need to be filled.
+
+GCode generation
+================
+The GCode generation is quite a large bit of code. As a lot is going on here. Important bits here are:
+* PathOrderOptimizer: This piece of code needs to solve a TravelingSalesmanProblem. Given a list of polygons/lines it tries to find the best order in which to print them. It currently does this by finding the closest next polygon to print.
+* Infill: This code generates a group of lines from an area. This is the code that generates the actuall infill pattern. There is also a concentric infill function, which is currently not used.
+* Comb: The combing code is the code that tries to avoid holes when moving around the head without printing. This code also detects when it fails. The final GCode generator uses the combing code while generating the final GCode. So they interact closely.
+* GCodeExport: The GCode export is a 2 step process. First it collects all the paths for a layer that it needs to print, this includes all moves, prints, extrusion widths. And then it generates the final GCode. This is the only piece of code that has knowledge about GCode, and to generate a different flavor of GCode it will be the only piece that needs adjustment. All volumatric calculations also happen here.
diff --git a/_tests/runtest.py b/_tests/runtest.py
new file mode 100644
index 0000000..23fe7ec
--- /dev/null
+++ b/_tests/runtest.py
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+
+import sys
+import subprocess
+
+def main(engine):
+ p = subprocess.Popen([engine, '-c', 'supportAngle=60', '-c', 'supportEverywhere=1', '_tests/testModel.stl'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+ if p.wait() != 0:
+ print "Engine failed to report success on test object slice..."
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main(sys.argv[1])
diff --git a/_tests/testModel.stl b/_tests/testModel.stl
new file mode 100644
index 0000000..affcb50
--- /dev/null
+++ b/_tests/testModel.stl
Binary files differ
diff --git a/bridge.cpp b/bridge.cpp
new file mode 100644
index 0000000..7778861
--- /dev/null
+++ b/bridge.cpp
@@ -0,0 +1,56 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "bridge.h"
+#include "utils/polygondebug.h"
+
+int bridgeAngle(SliceLayerPart* part, SliceLayer* prevLayer)
+{
+ //To detect if we have a bridge, first calculate the intersection of the current layer with the previous layer.
+ // This gives us the islands that the layer rests on.
+ Polygons islands;
+ for(unsigned int n=0; n<prevLayer->parts.size(); n++)
+ {
+ if (!part->boundaryBox.hit(prevLayer->parts[n].boundaryBox)) continue;
+
+ islands.add(part->outline.intersection(prevLayer->parts[n].outline));
+ }
+ if (islands.size() > 5)
+ return -1;
+
+ //Next find the 2 largest islands that we rest on.
+ double area1 = 0;
+ double area2 = 0;
+ int idx1 = -1;
+ int idx2 = -1;
+ for(unsigned int n=0; n<islands.size(); n++)
+ {
+ //Skip internal holes
+ if (!islands[n].orientation())
+ continue;
+ double area = fabs(islands[n].area());
+ if (area > area1)
+ {
+ if (area1 > area2)
+ {
+ area2 = area1;
+ idx2 = idx1;
+ }
+ area1 = area;
+ idx1 = n;
+ }else if (area > area2)
+ {
+ area2 = area;
+ idx2 = n;
+ }
+ }
+
+ if (idx1 < 0 || idx2 < 0)
+ return -1;
+
+ Point center1 = islands[idx1].centerOfMass();
+ Point center2 = islands[idx2].centerOfMass();
+
+ double angle = atan2(center2.X - center1.X, center2.Y - center1.Y) / M_PI * 180;
+ if (angle < 0) angle += 360;
+ return angle;
+}
+
diff --git a/bridge.h b/bridge.h
new file mode 100644
index 0000000..fa5fc37
--- /dev/null
+++ b/bridge.h
@@ -0,0 +1,9 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef BRIDGE_H
+#define BRIDGE_H
+
+#include "sliceDataStorage.h"
+
+int bridgeAngle(SliceLayerPart* part, SliceLayer* prevLayer);
+
+#endif//BRIDGE_H
diff --git a/clipper/License.txt b/clipper/License.txt
new file mode 100644
index 0000000..3793cdd
--- /dev/null
+++ b/clipper/License.txt
@@ -0,0 +1,26 @@
+The Clipper Library (including Delphi, C++ & C# source code, other accompanying
+code, examples and documentation), hereafter called "the Software", has been
+released under the following license, terms and conditions:
+
+Boost Software License - Version 1.0 - August 17th, 2003
+http://www.boost.org/LICENSE_1_0.txt
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the Software covered by this license to use, reproduce,
+display, distribute, execute, and transmit the Software, and to prepare
+derivative works of the Software, and to permit third-parties to whom the
+Software is furnished to do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including the
+above license grant, this restriction and the following disclaimer, must be
+included in all copies of the Software, in whole or in part, and all derivative
+works of the Software, unless such copies or derivative works are solely in the
+form of machine-executable object code generated by a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY
+DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/clipper/README b/clipper/README
new file mode 100644
index 0000000..1293e59
--- /dev/null
+++ b/clipper/README
@@ -0,0 +1,365 @@
+============================================================
+Clipper Change Log
+============================================================
+
+v6.1.2 (15 December 2013)
+* Fixed broken C++ header file.
+* Minor improvement to joining polygons.
+
+v6.1.1 (13 December 2013)
+* Fixed a couple of bugs affecting open paths that could
+ raise unhandled exceptions.
+
+v6.1.0 (12 December 2013)
+* Deleted: Previously deprecated code has been removed.
+* Modified: The OffsetPaths function is now deprecated as it has
+ been replaced by the ClipperOffset class which is much more
+ flexible.
+* Bugfixes: Several minor bugs have been fixed including
+ occasionally an incorrect nesting within the PolyTree structure.
+
+v6.0.0 (30 October 2013)
+* Added: Open path (polyline) clipping. A new 'Curves' demo
+ application showcases this (see the 'Curves' directory).
+* Update: Major improvement in the merging of
+ shared/collinear edges in clip solutions (see Execute).
+* Added: The IntPoint structure now has an optional 'Z' member.
+ (See the precompiler directive use_xyz.)
+* Added: Users can now force Clipper to use 32bit integers
+ (via the precompiler directive use_int32) instead of using
+ 64bit integers.
+* Modified: To accommodate open paths, the Polygon and Polygons
+ structures have been renamed Path and Paths respectively. The
+ AddPolygon and AddPolygons methods of the ClipperBase class
+ have been renamed AddPath and AddPaths respectively. Several
+ other functions have been similarly renamed.
+* Modified: The PolyNode Class has a new IsOpen property.
+* Modified: The Clipper class has a new ZFillFunction property.
+* Added: MinkowskiSum and MinkowskiDiff functions added.
+* Added: Several other new functions have been added including
+ PolyTreeToPaths, OpenPathsFromPolyTree and ClosedPathsFromPolyTree.
+* Added: The Clipper constructor now accepts an optional InitOptions
+ parameter to simplify setting properties.
+* Bugfixes: Numerous minor bugs have been fixed.
+* Deprecated: Version 6 is a major upgrade from previous versions
+ and quite a number of changes have been made to exposed structures
+ and functions. To minimize inconvenience to existing library users,
+ some code has been retained and some added to maintain backward
+ compatibility. However, because this code will be removed in a
+ future update, it has been marked as deprecated and a precompiler
+ directive use_deprecated has been defined.
+
+v5.1.6 (23 May 2013)
+* BugFix: CleanPolygon function was buggy.
+* Changed: The behaviour of the 'miter' JoinType has been
+ changed so that when squaring occurs, it's no longer
+ extended up to the miter limit but is squared off at
+ exactly 'delta' units. (This improves the look of mitering
+ with larger limits at acute angles.)
+* Added: New OffsetPolyLines function
+* Update: Minor code refactoring and optimisations
+
+v5.1.5 (5 May 2013)
+* Added: ForceSimple property to Clipper class
+* Update: Improved documentation
+
+v5.1.4 (24 March 2013)
+* Update: CleanPolygon function enhanced.
+* Update: Documentation improved.
+
+v5.1.3 (14 March 2013)
+* Bugfix: Minor bugfixes.
+* Update: Documentation significantly improved.
+
+v5.1.2 (26 February 2013)
+* Bugfix: PolyNode class was missing a constructor.
+* Update: The MiterLimit parameter in the OffsetPolygons
+ function has been renamed Limit and can now also be used to
+ limit the number of vertices used to construct arcs when
+ JoinType is set to jtRound.
+
+v5.1.0 (17 February 2013)
+* Update: ExPolygons has been replaced with the PolyTree &
+ PolyNode classes to more fully represent the parent-child
+ relationships of the polygons returned by Clipper.
+* Added: New CleanPolygon and CleanPolygons functions.
+* Bugfix: Another orientation bug fixed.
+
+v5.0.2 - 30 December 2012
+* Bugfix: Significant fixes in and tidy of the internal
+ Int128 class (which is used only when polygon coordinate
+ values are greater than ±0x3FFFFFFF (~1.07e9)).
+* Update: The Area algorithm has been updated and is faster.
+* Update: Documentation updates. The newish but undocumented
+ 'CheckInputs' parameter of the OffsetPolygons function has been
+ renamed 'AutoFix' and documented too. The comments on rounding
+ have also been improved (ie clearer and expanded).
+
+v4.10.0 - 25 December 2012
+* Bugfix: Orientation bugs should now be resolved (finally!).
+* Bugfix: Bug in Int128 class
+
+v4.9.8 - 2 December 2012
+* Bugfix: Further fixes to rare Orientation bug.
+
+v4.9.7 - 29 November 2012
+* Bugfix: Bug that very rarely returned the wrong polygon
+ orientation.
+* Bugfix: Obscure bug affecting OffsetPolygons when using
+ jtRound for the JoinType parameter and when polygons also
+ contain very large coordinate values (> +/-100000000000).
+
+v4.9.6 - 9 November 2012
+* Bugfix: Another obscure bug related to joining polygons.
+
+v4.9.4 - 2 November 2012
+* Bugfix: Bugs in Int128 class occasionally causing
+ wrong orientations.
+* Bugfix: Further fixes related to joining polygons.
+
+v4.9.0 - 9 October 2012
+* Bugfix: Obscure bug related to joining polygons.
+
+v4.8.9 - 25 September 2012
+* Bugfix: Obscure bug related to precision of intersections.
+
+v4.8.8 - 30 August 2012
+* Bugfix: Fixed bug in OffsetPolygons function introduced in
+ version 4.8.5.
+
+v4.8.7 - 24 August 2012
+* Bugfix: ReversePolygon function in C++ translation was broken.
+* Bugfix: Two obscure bugs affecting orientation fixed too.
+
+v4.8.6 - 11 August 2012
+* Bugfix: Potential for memory overflow errors when using
+ ExPolygons structure.
+* Bugfix: The polygon coordinate range has been reduced to
+ +/- 0x3FFFFFFFFFFFFFFF (4.6e18).
+* Update: ReversePolygons function was misnamed ReversePoints in C++.
+* Update: SimplifyPolygon function now takes a PolyFillType parameter.
+
+v4.8.5 - 15 July 2012
+* Bugfix: Potential for memory overflow errors in OffsetPolygons().
+
+v4.8.4 - 1 June 2012
+* Bugfix: Another obscure bug affecting ExPolygons structure.
+
+v4.8.3 - 27 May 2012
+* Bugfix: Obscure bug causing incorrect removal of a vertex.
+
+v4.8.2 - 21 May 2012
+* Bugfix: Obscure bug could cause an exception when using
+ ExPolygon structure.
+
+v4.8.1 - 12 May 2012
+* Update: Cody tidy and minor bug fixes.
+
+v4.8.0 - 30 April 2012
+* Bugfix: Occasional errors in orientation fixed.
+* Update: Added notes on rounding to the documentation.
+
+v4.7.6 - 11 April 2012
+* Fixed a bug in Orientation function (affecting C# translations only).
+* Minor documentation update.
+
+v4.7.5 - 28 March 2012
+* Bugfix: Fixed a recently introduced bug that occasionally caused an
+ unhandled exception in C++ and C# translations.
+
+v4.7.4 - 15 March 2012
+* Bugfix: Another minor bugfix.
+
+v4.7.2 - 4 March 2012
+* Bugfix: Fixed bug introduced in ver 4.7 which sometimes caused
+ an exception if ExPolygon structure was passed to Clipper's
+ Execute method.
+
+v4.7.1 - 3 March 2012
+* Bugfix: Rare crash when JoinCommonEdges joined polygons that
+ 'cancelled' each other.
+* Bugfix: Clipper's internal Orientation method occasionally
+ returned wrong result.
+* Update: Improved C# code (thanks to numerous excellent suggestions
+ from David Piepgrass)
+
+v4.7 - 10 February 2012
+* Improved the joining of output polygons sharing a common edge.
+
+v4.6.6 - 3 February 2012
+* Bugfix: Another obscure bug occasionally causing incorrect
+ polygon orientation.
+
+v4.6.5 - 17 January 2012
+* Bugfix: Obscure bug occasionally causing incorrect hole
+ assignment in ExPolygon structure.
+
+v4.6.4 - 8 November 2011
+* Added: SimplifyPolygon and SimplifyPolygons functions.
+
+v4.6.3 - 11 November 2011
+* Bugfix: Fixed another minor mitering bug in OffsetPolygons.
+
+v4.6.2 - 10 November 2011
+* Bugfix: Fixed a rare bug in the orientation of polygons
+ returned by Clipper's Execute() method.
+* Bugfix: Previous update introduced a mitering bug in the
+ OffsetPolygons function.
+
+v4.6 - 29 October 2011
+* Added: Support for Positive and Negative polygon fill
+ types (in addition to the EvenOdd and NonZero fill types).
+* Bugfix: The OffsetPolygons function was generating the
+ occasional artefact when 'shrinking' polygons.
+
+v4.5.5 - 8 October 2011
+* Bugfix: Fixed an obscure bug in Clipper's JoinCommonEdges
+ method.
+* Update: Replaced IsClockwise function with Orientation
+ function. The orientation issues affecting OffsetPolygons
+ should now be finally resolved.
+* Change: The Area function once again returns a signed value.
+
+v4.5.1 - 28 September 2011
+* Deleted: The UseFullCoordinateRange property has been
+ deleted since integer range is now managed implicitly.
+* BugFix: Minor bug in OffsetPolygon mitering.
+* Change: C# JoinType enum moved from Clipper class to
+ ClipperLib namespace.
+* Change: The Area function now returns the absolute area
+ (irrespective of orientation).
+* Change: The IsClockwise function now requires a second
+ parameter - YAxisPositiveUpward - to accommodate displays
+ with Y-axis oriented in either direction
+
+v4.4.4 - 10 September 2011
+* Change: Deleted jtButt from JoinType (used by the
+ OffsetPolygons function).
+* BugFix: Fixed another minor bug in OffsetPolygons function.
+* Update: Further improvements to the help file
+
+v4.4.3 - 29 August 2011
+* BugFix: fixed a minor rounding issue in OffsetPolygons
+ function (affected C++ & C# translations).
+* BugFix: fixed a minor bug in OffsetPolygons' function
+ declaration (affected C++ translation only).
+* Change: 'clipper' namespace changed to 'ClipperLib'
+ namespace in both C++ and C# code to remove the ambiguity
+ between the Clipper class and the namespace. (This also
+ required numerous updates to the accompanying demos.)
+
+v4.4.2 - 26 August 2011
+* BugFix: minor bugfixes in Clipper.
+* Update: the OffsetPolygons function has been significantly
+ improved by offering 4 different join styles.
+
+v4.4.0 - 6 August 2011
+* BugFix: A number of minor bugs have been fixed that mostly
+ affected the new ExPolygons structure.
+
+v4.3.0 - 17 June 2011
+* New: ExPolygons structure that explicitly associates 'hole'
+ polygons with their 'outer' container polygons.
+* New: Execute method overloaded so the solution parameter
+ can now be either Polygons or ExPolygons.
+* BugFix: Fixed a rare bug in solution polygons orientation.
+
+v4.2.8 - 21 May 2011
+* Update: JoinCommonEdges() improved once more.
+* BugFix: Several minor bugs fixed.
+
+v4.2.6 - 1 May 2011
+* Bugfix: minor bug in SlopesEqual function.
+* Update: Merging of output polygons sharing common edges
+ has been significantly inproved
+
+v4.2.4 - 26 April 2011
+ Input polygon coordinates can now contain the full range of
+ signed 64bit integers (ie +/-9,223,372,036,854,775,807). This
+ means that floating point values can be converted to and from
+ Clipper's 64bit integer coordinates structure (IntPoint) and
+ still retain a precision of up to 18 decimal places. However,
+ since the large-integer math that supports this expanded range
+ imposes a small cost on performance (~15%), a new property
+ UseFullCoordinateRange has been added to the Clipper class to
+ allow users the choice of whether or not to use this expanded
+ coordinate range. If this property is disabled, coordinate values
+ are restricted to +/-1,500,000,000.
+
+v4.2 - 12 April 2011
+ JoinCommonEdges() code significantly improved plus other minor
+ improvements.
+
+v4.1.2 - 9 April 2011
+* Update: Minor code tidy.
+* Bugfix: Possible endless loop in JoinCommonEdges() in clipper.pas.
+
+v4.1.1 - 8 April 2011
+* Update: All polygon coordinates are now stored as 64bit integers
+ (though they're still restricted to range -1.5e9 to +1.5e9 pending
+ the inclusion of code supporting 64bit math).
+* Change: AddPolygon and AddPolygons methods now return boolean
+ values.
+* Bugfix: Bug in JoinCommonEdges() caused potential endless loop.
+* Bugfix: Bug in IsClockwise(). (C++ code only)
+
+v4.0 - 5 April 2011
+* Clipper 4 is a major rewrite of earlier versions. The biggest
+ change is that floating point values are no longer used,
+ except for the storing of edge slope values. The main benefit
+ of this is the issue of numerical robustness has been
+ addressed. Due to other major code improvements Clipper v4
+ is approximately 40% faster than Clipper v3.
+* The AddPolyPolygon method has been renamed to AddPolygons.
+* The IgnoreOrientation property has been removed.
+* The clipper_misc library has been merged back into the
+ main clipper library.
+
+v3.1.0 - 17 February 2011
+* Bugfix: Obscure bug in TClipperBase.SetDx method that caused
+ problems with very small edges ( edges <1/1000th pixel in size).
+
+v3.0.3 - 9 February 2011
+* Bugfix: Significant bug, but only in C# code.
+* Update: Minor refactoring.
+
+v3.0 - 31 January 2011
+* Update: Major rewrite of the portion of code that calculates
+ the output polygons' orientation.
+* Update: Help file significantly improved.
+* Change: Renamed ForceOrientation property to IgnoreOrientation.
+ If the orientation of output polygons is not important, or can
+ be managed separately, clipping routines can be sped up by about
+ 60% by setting IgnoreOrientation to true. Defaults to false.
+* Change: The OffsetPolygon and Area functions have been moved to
+ the new unit - clipper_misc.
+
+2.99 - 15 January 2011
+* Bugfix: Obscure bug in AddPolygon method could cause an endless loop.
+
+2.8 - 20 November 2010
+* Updated: Output polygons which previously shared a common
+ edge are now merged.
+* Changed: The orientation of outer polygons is now clockwise
+ when the display's Y axis is positive downwards (as is
+ typical for most Windows applications). Inner polygons
+ (holes) have the opposite orientation.
+* Added: Support module for Cairo Graphics Library (with demo).
+* Updated: C# and C++ demos.
+
+2.522 - 15 October 2010
+* Added C# translation (thanks to Olivier Lejeune) and
+ a link to Ruby bindings (thanks to Mike Owens).
+
+2.0 - 30 July 2010
+* Clipper now clips using both the Even-Odd (alternate) and
+ Non-Zero (winding) polygon filling rules. (Previously Clipper
+ assumed the Even-Odd rule for polygon filling.)
+
+1.4c - 16 June 2010
+* Added C++ support for AGG graphics library
+
+1.2s - 2 June 2010
+* Added C++ translation of clipper.pas
+
+1.0 - 9 May 2010 \ No newline at end of file
diff --git a/clipper/clipper.cpp b/clipper/clipper.cpp
new file mode 100644
index 0000000..340c149
--- /dev/null
+++ b/clipper/clipper.cpp
@@ -0,0 +1,4545 @@
+/*******************************************************************************
+* *
+* Author : Angus Johnson *
+* Version : 6.1.2 *
+* Date : 15 December 2013 *
+* Website : http://www.angusj.com *
+* Copyright : Angus Johnson 2010-2013 *
+* *
+* License: *
+* Use, modification & distribution is subject to Boost Software License Ver 1. *
+* http://www.boost.org/LICENSE_1_0.txt *
+* *
+* Attributions: *
+* The code in this library is an extension of Bala Vatti's clipping algorithm: *
+* "A generic solution to polygon clipping" *
+* Communications of the ACM, Vol 35, Issue 7 (July 1992) pp 56-63. *
+* http://portal.acm.org/citation.cfm?id=129906 *
+* *
+* Computer graphics and geometric modeling: implementation and algorithms *
+* By Max K. Agoston *
+* Springer; 1 edition (January 4, 2005) *
+* http://books.google.com/books?q=vatti+clipping+agoston *
+* *
+* See also: *
+* "Polygon Offsetting by Computing Winding Numbers" *
+* Paper no. DETC2005-85513 pp. 565-575 *
+* ASME 2005 International Design Engineering Technical Conferences *
+* and Computers and Information in Engineering Conference (IDETC/CIE2005) *
+* September 24-28, 2005 , Long Beach, California, USA *
+* http://www.me.berkeley.edu/~mcmains/pubs/DAC05OffsetPolygon.pdf *
+* *
+*******************************************************************************/
+
+/*******************************************************************************
+* *
+* This is a translation of the Delphi Clipper library and the naming style *
+* used has retained a Delphi flavour. *
+* *
+*******************************************************************************/
+
+#include "clipper.hpp"
+#include <cmath>
+#include <vector>
+#include <algorithm>
+#include <stdexcept>
+#include <cstring>
+#include <cstdlib>
+#include <ostream>
+#include <functional>
+
+namespace ClipperLib {
+
+#ifdef use_int32
+ static cInt const loRange = 46340;
+ static cInt const hiRange = 46340;
+#else
+ static cInt const loRange = 0x3FFFFFFF;
+ static cInt const hiRange = 0x3FFFFFFFFFFFFFFFLL;
+ typedef unsigned long long ulong64;
+#endif
+
+static double const pi = 3.141592653589793238;
+static double const two_pi = pi *2;
+static double const def_arc_tolerance = 0.25;
+
+enum Direction { dRightToLeft, dLeftToRight };
+
+static int const Unassigned = -1; //edge not currently 'owning' a solution
+static int const Skip = -2; //edge that would otherwise close a path
+
+#define HORIZONTAL (-1.0E+40)
+#define TOLERANCE (1.0e-20)
+#define NEAR_ZERO(val) (((val) > -TOLERANCE) && ((val) < TOLERANCE))
+
+struct TEdge {
+ IntPoint Bot;
+ IntPoint Curr;
+ IntPoint Top;
+ IntPoint Delta;
+ double Dx;
+ PolyType PolyTyp;
+ EdgeSide Side;
+ int WindDelta; //1 or -1 depending on winding direction
+ int WindCnt;
+ int WindCnt2; //winding count of the opposite polytype
+ int OutIdx;
+ TEdge *Next;
+ TEdge *Prev;
+ TEdge *NextInLML;
+ TEdge *NextInAEL;
+ TEdge *PrevInAEL;
+ TEdge *NextInSEL;
+ TEdge *PrevInSEL;
+};
+
+struct IntersectNode {
+ TEdge *Edge1;
+ TEdge *Edge2;
+ IntPoint Pt;
+};
+
+struct LocalMinima {
+ cInt Y;
+ TEdge *LeftBound;
+ TEdge *RightBound;
+ LocalMinima *Next;
+};
+
+struct OutPt;
+
+struct OutRec {
+ int Idx;
+ bool IsHole;
+ bool IsOpen;
+ OutRec *FirstLeft; //see comments in clipper.pas
+ PolyNode *PolyNd;
+ OutPt *Pts;
+ OutPt *BottomPt;
+};
+
+struct OutPt {
+ int Idx;
+ IntPoint Pt;
+ OutPt *Next;
+ OutPt *Prev;
+};
+
+struct Join {
+ OutPt *OutPt1;
+ OutPt *OutPt2;
+ IntPoint OffPt;
+};
+
+//------------------------------------------------------------------------------
+//------------------------------------------------------------------------------
+
+inline cInt Round(double val)
+{
+ if ((val < 0)) return static_cast<cInt>(val - 0.5);
+ else return static_cast<cInt>(val + 0.5);
+}
+//------------------------------------------------------------------------------
+
+inline cInt Abs(cInt val)
+{
+ return val < 0 ? -val : val;
+}
+
+//------------------------------------------------------------------------------
+// PolyTree methods ...
+//------------------------------------------------------------------------------
+
+void PolyTree::Clear()
+{
+ for (PolyNodes::size_type i = 0; i < AllNodes.size(); ++i)
+ delete AllNodes[i];
+ AllNodes.resize(0);
+ Childs.resize(0);
+}
+//------------------------------------------------------------------------------
+
+PolyNode* PolyTree::GetFirst() const
+{
+ if (!Childs.empty())
+ return Childs[0];
+ else
+ return 0;
+}
+//------------------------------------------------------------------------------
+
+int PolyTree::Total() const
+{
+ return (int)AllNodes.size();
+}
+
+//------------------------------------------------------------------------------
+// PolyNode methods ...
+//------------------------------------------------------------------------------
+
+PolyNode::PolyNode(): Childs(), Parent(0), Index(0), m_IsOpen(false)
+{
+}
+//------------------------------------------------------------------------------
+
+int PolyNode::ChildCount() const
+{
+ return (int)Childs.size();
+}
+//------------------------------------------------------------------------------
+
+void PolyNode::AddChild(PolyNode& child)
+{
+ unsigned cnt = (unsigned)Childs.size();
+ Childs.push_back(&child);
+ child.Parent = this;
+ child.Index = cnt;
+}
+//------------------------------------------------------------------------------
+
+PolyNode* PolyNode::GetNext() const
+{
+ if (!Childs.empty())
+ return Childs[0];
+ else
+ return GetNextSiblingUp();
+}
+//------------------------------------------------------------------------------
+
+PolyNode* PolyNode::GetNextSiblingUp() const
+{
+ if (!Parent) //protects against PolyTree.GetNextSiblingUp()
+ return 0;
+ else if (Index == Parent->Childs.size() - 1)
+ return Parent->GetNextSiblingUp();
+ else
+ return Parent->Childs[Index + 1];
+}
+//------------------------------------------------------------------------------
+
+bool PolyNode::IsHole() const
+{
+ bool result = true;
+ PolyNode* node = Parent;
+ while (node)
+ {
+ result = !result;
+ node = node->Parent;
+ }
+ return result;
+}
+//------------------------------------------------------------------------------
+
+bool PolyNode::IsOpen() const
+{
+ return m_IsOpen;
+}
+//------------------------------------------------------------------------------
+
+#ifndef use_int32
+
+//------------------------------------------------------------------------------
+// Int128 class (enables safe math on signed 64bit integers)
+// eg Int128 val1((cInt)9223372036854775807); //ie 2^63 -1
+// Int128 val2((cInt)9223372036854775807);
+// Int128 val3 = val1 * val2;
+// val3.AsString => "85070591730234615847396907784232501249" (8.5e+37)
+//------------------------------------------------------------------------------
+
+class Int128
+{
+ public:
+
+ cUInt lo;
+ cInt hi;
+
+ Int128(cInt _lo = 0)
+ {
+ lo = (cUInt)_lo;
+ if (_lo < 0) hi = -1; else hi = 0;
+ }
+
+
+ Int128(const Int128 &val): lo(val.lo), hi(val.hi){}
+
+ Int128(const cInt& _hi, const ulong64& _lo): lo(_lo), hi(_hi){}
+
+ Int128& operator = (const cInt &val)
+ {
+ lo = (ulong64)val;
+ if (val < 0) hi = -1; else hi = 0;
+ return *this;
+ }
+
+ bool operator == (const Int128 &val) const
+ {return (hi == val.hi && lo == val.lo);}
+
+ bool operator != (const Int128 &val) const
+ { return !(*this == val);}
+
+ bool operator > (const Int128 &val) const
+ {
+ if (hi != val.hi)
+ return hi > val.hi;
+ else
+ return lo > val.lo;
+ }
+
+ bool operator < (const Int128 &val) const
+ {
+ if (hi != val.hi)
+ return hi < val.hi;
+ else
+ return lo < val.lo;
+ }
+
+ bool operator >= (const Int128 &val) const
+ { return !(*this < val);}
+
+ bool operator <= (const Int128 &val) const
+ { return !(*this > val);}
+
+ Int128& operator += (const Int128 &rhs)
+ {
+ hi += rhs.hi;
+ lo += rhs.lo;
+ if (lo < rhs.lo) hi++;
+ return *this;
+ }
+
+ Int128 operator + (const Int128 &rhs) const
+ {
+ Int128 result(*this);
+ result+= rhs;
+ return result;
+ }
+
+ Int128& operator -= (const Int128 &rhs)
+ {
+ *this += -rhs;
+ return *this;
+ }
+
+ Int128 operator - (const Int128 &rhs) const
+ {
+ Int128 result(*this);
+ result -= rhs;
+ return result;
+ }
+
+ Int128 operator-() const //unary negation
+ {
+ if (lo == 0)
+ return Int128(-hi,0);
+ else
+ return Int128(~hi,~lo +1);
+ }
+
+ Int128 operator/ (const Int128 &rhs) const
+ {
+ if (rhs.lo == 0 && rhs.hi == 0)
+ throw "Int128 operator/: divide by zero";
+
+ bool negate = (rhs.hi < 0) != (hi < 0);
+ Int128 dividend = *this;
+ Int128 divisor = rhs;
+ if (dividend.hi < 0) dividend = -dividend;
+ if (divisor.hi < 0) divisor = -divisor;
+
+ if (divisor < dividend)
+ {
+ Int128 result = Int128(0);
+ Int128 cntr = Int128(1);
+ while (divisor.hi >= 0 && !(divisor > dividend))
+ {
+ divisor.hi <<= 1;
+ if ((cInt)divisor.lo < 0) divisor.hi++;
+ divisor.lo <<= 1;
+
+ cntr.hi <<= 1;
+ if ((cInt)cntr.lo < 0) cntr.hi++;
+ cntr.lo <<= 1;
+ }
+ divisor.lo >>= 1;
+ if ((divisor.hi & 1) == 1)
+ divisor.lo |= 0x8000000000000000LL;
+ divisor.hi = (ulong64)divisor.hi >> 1;
+
+ cntr.lo >>= 1;
+ if ((cntr.hi & 1) == 1)
+ cntr.lo |= 0x8000000000000000LL;
+ cntr.hi >>= 1;
+
+ while (cntr.hi != 0 || cntr.lo != 0)
+ {
+ if (!(dividend < divisor))
+ {
+ dividend -= divisor;
+ result.hi |= cntr.hi;
+ result.lo |= cntr.lo;
+ }
+ divisor.lo >>= 1;
+ if ((divisor.hi & 1) == 1)
+ divisor.lo |= 0x8000000000000000LL;
+ divisor.hi >>= 1;
+
+ cntr.lo >>= 1;
+ if ((cntr.hi & 1) == 1)
+ cntr.lo |= 0x8000000000000000LL;
+ cntr.hi >>= 1;
+ }
+ if (negate) result = -result;
+ return result;
+ }
+ else if (rhs.hi == this->hi && rhs.lo == this->lo)
+ return Int128(negate ? -1: 1);
+ else
+ return Int128(0);
+ }
+
+ double AsDouble() const
+ {
+ const double shift64 = 18446744073709551616.0; //2^64
+ if (hi < 0)
+ {
+ cUInt lo_ = ~lo + 1;
+ if (lo_ == 0) return (double)hi * shift64;
+ else return -(double)(lo_ + ~hi * shift64);
+ }
+ else
+ return (double)(lo + hi * shift64);
+ }
+
+};
+//------------------------------------------------------------------------------
+
+Int128 Int128Mul (cInt lhs, cInt rhs)
+{
+ bool negate = (lhs < 0) != (rhs < 0);
+
+ if (lhs < 0) lhs = -lhs;
+ ulong64 int1Hi = ulong64(lhs) >> 32;
+ ulong64 int1Lo = ulong64(lhs & 0xFFFFFFFF);
+
+ if (rhs < 0) rhs = -rhs;
+ ulong64 int2Hi = ulong64(rhs) >> 32;
+ ulong64 int2Lo = ulong64(rhs & 0xFFFFFFFF);
+
+ //nb: see comments in clipper.pas
+ ulong64 a = int1Hi * int2Hi;
+ ulong64 b = int1Lo * int2Lo;
+ ulong64 c = int1Hi * int2Lo + int1Lo * int2Hi;
+
+ Int128 tmp;
+ tmp.hi = cInt(a + (c >> 32));
+ tmp.lo = cInt(c << 32);
+ tmp.lo += cInt(b);
+ if (tmp.lo < b) tmp.hi++;
+ if (negate) tmp = -tmp;
+ return tmp;
+};
+#endif
+
+//------------------------------------------------------------------------------
+// Miscellaneous global functions
+//------------------------------------------------------------------------------
+
+bool Orientation(const Path &poly)
+{
+ return Area(poly) >= 0;
+}
+//------------------------------------------------------------------------------
+
+double Area(const Path &poly)
+{
+ int size = (int)poly.size();
+ if (size < 3) return 0;
+
+ double a = 0;
+ for (int i = 0, j = size -1; i < size; ++i)
+ {
+ a += ((double)poly[j].X + poly[i].X) * ((double)poly[j].Y - poly[i].Y);
+ j = i;
+ }
+ return -a * 0.5;
+}
+//------------------------------------------------------------------------------
+
+double Area(const OutRec &outRec)
+{
+ OutPt *op = outRec.Pts;
+ if (!op) return 0;
+ double a = 0;
+ do {
+ a += (double)(op->Prev->Pt.X + op->Pt.X) * (double)(op->Prev->Pt.Y - op->Pt.Y);
+ op = op->Next;
+ } while (op != outRec.Pts);
+ return a * 0.5;
+}
+//------------------------------------------------------------------------------
+
+bool PointIsVertex(const IntPoint &Pt, OutPt *pp)
+{
+ OutPt *pp2 = pp;
+ do
+ {
+ if (pp2->Pt == Pt) return true;
+ pp2 = pp2->Next;
+ }
+ while (pp2 != pp);
+ return false;
+}
+//------------------------------------------------------------------------------
+
+int PointInPolygon (const IntPoint& pt, OutPt* op)
+{
+ //returns 0 if false, +1 if true, -1 if pt ON polygon boundary
+ //http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.88.5498&rep=rep1&type=pdf
+ int result = 0;
+ OutPt* startOp = op;
+ for(;;)
+ {
+ if (op->Next->Pt.Y == pt.Y)
+ {
+ if ((op->Next->Pt.X == pt.X) || (op->Pt.Y == pt.Y &&
+ ((op->Next->Pt.X > pt.X) == (op->Pt.X < pt.X)))) return -1;
+ }
+ if ((op->Pt.Y < pt.Y) != (op->Next->Pt.Y < pt.Y))
+ {
+ if (op->Pt.X >= pt.X)
+ {
+ if (op->Next->Pt.X > pt.X) result = 1 - result;
+ else
+ {
+ double d = (double)(op->Pt.X - pt.X) * (op->Next->Pt.Y - pt.Y) -
+ (double)(op->Next->Pt.X - pt.X) * (op->Pt.Y - pt.Y);
+ if (!d) return -1;
+ if ((d > 0) == (op->Next->Pt.Y > op->Pt.Y)) result = 1 - result;
+ }
+ } else
+ {
+ if (op->Next->Pt.X > pt.X)
+ {
+ double d = (double)(op->Pt.X - pt.X) * (op->Next->Pt.Y - pt.Y) -
+ (double)(op->Next->Pt.X - pt.X) * (op->Pt.Y - pt.Y);
+ if (!d) return -1;
+ if ((d > 0) == (op->Next->Pt.Y > op->Pt.Y)) result = 1 - result;
+ }
+ }
+ }
+ op = op->Next;
+ if (startOp == op) break;
+ }
+ return result;
+}
+//------------------------------------------------------------------------------
+
+bool Poly2ContainsPoly1(OutPt* OutPt1, OutPt* OutPt2)
+{
+ OutPt* op = OutPt1;
+ do
+ {
+ int res = PointInPolygon(op->Pt, OutPt2);
+ if (res >= 0) return res != 0;
+ op = op->Next;
+ }
+ while (op != OutPt1);
+ return true;
+}
+//----------------------------------------------------------------------
+
+bool SlopesEqual(const TEdge &e1, const TEdge &e2, bool UseFullInt64Range)
+{
+#ifndef use_int32
+ if (UseFullInt64Range)
+ return Int128Mul(e1.Delta.Y, e2.Delta.X) == Int128Mul(e1.Delta.X, e2.Delta.Y);
+ else
+#endif
+ return e1.Delta.Y * e2.Delta.X == e1.Delta.X * e2.Delta.Y;
+}
+//------------------------------------------------------------------------------
+
+bool SlopesEqual(const IntPoint pt1, const IntPoint pt2,
+ const IntPoint pt3, bool UseFullInt64Range)
+{
+#ifndef use_int32
+ if (UseFullInt64Range)
+ return Int128Mul(pt1.Y-pt2.Y, pt2.X-pt3.X) == Int128Mul(pt1.X-pt2.X, pt2.Y-pt3.Y);
+ else
+#endif
+ return (pt1.Y-pt2.Y)*(pt2.X-pt3.X) == (pt1.X-pt2.X)*(pt2.Y-pt3.Y);
+}
+//------------------------------------------------------------------------------
+
+bool SlopesEqual(const IntPoint pt1, const IntPoint pt2,
+ const IntPoint pt3, const IntPoint pt4, bool UseFullInt64Range)
+{
+#ifndef use_int32
+ if (UseFullInt64Range)
+ return Int128Mul(pt1.Y-pt2.Y, pt3.X-pt4.X) == Int128Mul(pt1.X-pt2.X, pt3.Y-pt4.Y);
+ else
+#endif
+ return (pt1.Y-pt2.Y)*(pt3.X-pt4.X) == (pt1.X-pt2.X)*(pt3.Y-pt4.Y);
+}
+//------------------------------------------------------------------------------
+
+inline bool IsHorizontal(TEdge &e)
+{
+ return e.Delta.Y == 0;
+}
+//------------------------------------------------------------------------------
+
+inline double GetDx(const IntPoint pt1, const IntPoint pt2)
+{
+ return (pt1.Y == pt2.Y) ?
+ HORIZONTAL : (double)(pt2.X - pt1.X) / (pt2.Y - pt1.Y);
+}
+//---------------------------------------------------------------------------
+
+inline void SetDx(TEdge &e)
+{
+ e.Delta.X = (e.Top.X - e.Bot.X);
+ e.Delta.Y = (e.Top.Y - e.Bot.Y);
+
+ if (e.Delta.Y == 0) e.Dx = HORIZONTAL;
+ else e.Dx = (double)(e.Delta.X) / e.Delta.Y;
+}
+//---------------------------------------------------------------------------
+
+inline void SwapSides(TEdge &Edge1, TEdge &Edge2)
+{
+ EdgeSide Side = Edge1.Side;
+ Edge1.Side = Edge2.Side;
+ Edge2.Side = Side;
+}
+//------------------------------------------------------------------------------
+
+inline void SwapPolyIndexes(TEdge &Edge1, TEdge &Edge2)
+{
+ int OutIdx = Edge1.OutIdx;
+ Edge1.OutIdx = Edge2.OutIdx;
+ Edge2.OutIdx = OutIdx;
+}
+//------------------------------------------------------------------------------
+
+inline cInt TopX(TEdge &edge, const cInt currentY)
+{
+ return ( currentY == edge.Top.Y ) ?
+ edge.Top.X : edge.Bot.X + Round(edge.Dx *(currentY - edge.Bot.Y));
+}
+//------------------------------------------------------------------------------
+
+bool IntersectPoint(TEdge &Edge1, TEdge &Edge2,
+ IntPoint &ip, bool UseFullInt64Range)
+{
+#ifdef use_xyz
+ ip.Z = 0;
+#endif
+ double b1, b2;
+ //nb: with very large coordinate values, it's possible for SlopesEqual() to
+ //return false but for the edge.Dx value be equal due to double precision rounding.
+ if (SlopesEqual(Edge1, Edge2, UseFullInt64Range) || Edge1.Dx == Edge2.Dx)
+ {
+ if (Edge2.Bot.Y > Edge1.Bot.Y) ip = Edge2.Bot;
+ else ip = Edge1.Bot;
+ return false;
+ }
+ else if (Edge1.Delta.X == 0)
+ {
+ ip.X = Edge1.Bot.X;
+ if (IsHorizontal(Edge2))
+ ip.Y = Edge2.Bot.Y;
+ else
+ {
+ b2 = Edge2.Bot.Y - (Edge2.Bot.X / Edge2.Dx);
+ ip.Y = Round(ip.X / Edge2.Dx + b2);
+ }
+ }
+ else if (Edge2.Delta.X == 0)
+ {
+ ip.X = Edge2.Bot.X;
+ if (IsHorizontal(Edge1))
+ ip.Y = Edge1.Bot.Y;
+ else
+ {
+ b1 = Edge1.Bot.Y - (Edge1.Bot.X / Edge1.Dx);
+ ip.Y = Round(ip.X / Edge1.Dx + b1);
+ }
+ }
+ else
+ {
+ b1 = Edge1.Bot.X - Edge1.Bot.Y * Edge1.Dx;
+ b2 = Edge2.Bot.X - Edge2.Bot.Y * Edge2.Dx;
+ double q = (b2-b1) / (Edge1.Dx - Edge2.Dx);
+ ip.Y = Round(q);
+ if (std::fabs(Edge1.Dx) < std::fabs(Edge2.Dx))
+ ip.X = Round(Edge1.Dx * q + b1);
+ else
+ ip.X = Round(Edge2.Dx * q + b2);
+ }
+
+ if (ip.Y < Edge1.Top.Y || ip.Y < Edge2.Top.Y)
+ {
+ if (Edge1.Top.Y > Edge2.Top.Y)
+ ip.Y = Edge1.Top.Y;
+ else
+ ip.Y = Edge2.Top.Y;
+ if (std::fabs(Edge1.Dx) < std::fabs(Edge2.Dx))
+ ip.X = TopX(Edge1, ip.Y);
+ else
+ ip.X = TopX(Edge2, ip.Y);
+ }
+ return true;
+}
+//------------------------------------------------------------------------------
+
+void ReversePolyPtLinks(OutPt *pp)
+{
+ if (!pp) return;
+ OutPt *pp1, *pp2;
+ pp1 = pp;
+ do {
+ pp2 = pp1->Next;
+ pp1->Next = pp1->Prev;
+ pp1->Prev = pp2;
+ pp1 = pp2;
+ } while( pp1 != pp );
+}
+//------------------------------------------------------------------------------
+
+void DisposeOutPts(OutPt*& pp)
+{
+ if (pp == 0) return;
+ pp->Prev->Next = 0;
+ while( pp )
+ {
+ OutPt *tmpPp = pp;
+ pp = pp->Next;
+ delete tmpPp;
+ }
+}
+//------------------------------------------------------------------------------
+
+inline void InitEdge(TEdge* e, TEdge* eNext, TEdge* ePrev, const IntPoint& Pt)
+{
+ std::memset(e, 0, sizeof(TEdge));
+ e->Next = eNext;
+ e->Prev = ePrev;
+ e->Curr = Pt;
+ e->OutIdx = Unassigned;
+}
+//------------------------------------------------------------------------------
+
+void InitEdge2(TEdge& e, PolyType Pt)
+{
+ if (e.Curr.Y >= e.Next->Curr.Y)
+ {
+ e.Bot = e.Curr;
+ e.Top = e.Next->Curr;
+ } else
+ {
+ e.Top = e.Curr;
+ e.Bot = e.Next->Curr;
+ }
+ SetDx(e);
+ e.PolyTyp = Pt;
+}
+//------------------------------------------------------------------------------
+
+TEdge* RemoveEdge(TEdge* e)
+{
+ //removes e from double_linked_list (but without removing from memory)
+ e->Prev->Next = e->Next;
+ e->Next->Prev = e->Prev;
+ TEdge* result = e->Next;
+ e->Prev = 0; //flag as removed (see ClipperBase.Clear)
+ return result;
+}
+//------------------------------------------------------------------------------
+
+inline void ReverseHorizontal(TEdge &e)
+{
+ //swap horizontal edges' Top and Bottom x's so they follow the natural
+ //progression of the bounds - ie so their xbots will align with the
+ //adjoining lower edge. [Helpful in the ProcessHorizontal() method.]
+ cInt tmp = e.Top.X;
+ e.Top.X = e.Bot.X;
+ e.Bot.X = tmp;
+#ifdef use_xyz
+ tmp = e.Top.Z;
+ e.Top.Z = e.Bot.Z;
+ e.Bot.Z = tmp;
+#endif
+}
+//------------------------------------------------------------------------------
+
+void SwapPoints(IntPoint &pt1, IntPoint &pt2)
+{
+ IntPoint tmp = pt1;
+ pt1 = pt2;
+ pt2 = tmp;
+}
+//------------------------------------------------------------------------------
+
+bool GetOverlapSegment(IntPoint pt1a, IntPoint pt1b, IntPoint pt2a,
+ IntPoint pt2b, IntPoint &pt1, IntPoint &pt2)
+{
+ //precondition: segments are Collinear.
+ if (Abs(pt1a.X - pt1b.X) > Abs(pt1a.Y - pt1b.Y))
+ {
+ if (pt1a.X > pt1b.X) SwapPoints(pt1a, pt1b);
+ if (pt2a.X > pt2b.X) SwapPoints(pt2a, pt2b);
+ if (pt1a.X > pt2a.X) pt1 = pt1a; else pt1 = pt2a;
+ if (pt1b.X < pt2b.X) pt2 = pt1b; else pt2 = pt2b;
+ return pt1.X < pt2.X;
+ } else
+ {
+ if (pt1a.Y < pt1b.Y) SwapPoints(pt1a, pt1b);
+ if (pt2a.Y < pt2b.Y) SwapPoints(pt2a, pt2b);
+ if (pt1a.Y < pt2a.Y) pt1 = pt1a; else pt1 = pt2a;
+ if (pt1b.Y > pt2b.Y) pt2 = pt1b; else pt2 = pt2b;
+ return pt1.Y > pt2.Y;
+ }
+}
+//------------------------------------------------------------------------------
+
+bool FirstIsBottomPt(const OutPt* btmPt1, const OutPt* btmPt2)
+{
+ OutPt *p = btmPt1->Prev;
+ while ((p->Pt == btmPt1->Pt) && (p != btmPt1)) p = p->Prev;
+ double dx1p = std::fabs(GetDx(btmPt1->Pt, p->Pt));
+ p = btmPt1->Next;
+ while ((p->Pt == btmPt1->Pt) && (p != btmPt1)) p = p->Next;
+ double dx1n = std::fabs(GetDx(btmPt1->Pt, p->Pt));
+
+ p = btmPt2->Prev;
+ while ((p->Pt == btmPt2->Pt) && (p != btmPt2)) p = p->Prev;
+ double dx2p = std::fabs(GetDx(btmPt2->Pt, p->Pt));
+ p = btmPt2->Next;
+ while ((p->Pt == btmPt2->Pt) && (p != btmPt2)) p = p->Next;
+ double dx2n = std::fabs(GetDx(btmPt2->Pt, p->Pt));
+ return (dx1p >= dx2p && dx1p >= dx2n) || (dx1n >= dx2p && dx1n >= dx2n);
+}
+//------------------------------------------------------------------------------
+
+OutPt* GetBottomPt(OutPt *pp)
+{
+ OutPt* dups = 0;
+ OutPt* p = pp->Next;
+ while (p != pp)
+ {
+ if (p->Pt.Y > pp->Pt.Y)
+ {
+ pp = p;
+ dups = 0;
+ }
+ else if (p->Pt.Y == pp->Pt.Y && p->Pt.X <= pp->Pt.X)
+ {
+ if (p->Pt.X < pp->Pt.X)
+ {
+ dups = 0;
+ pp = p;
+ } else
+ {
+ if (p->Next != pp && p->Prev != pp) dups = p;
+ }
+ }
+ p = p->Next;
+ }
+ if (dups)
+ {
+ //there appears to be at least 2 vertices at BottomPt so ...
+ while (dups != p)
+ {
+ if (!FirstIsBottomPt(p, dups)) pp = dups;
+ dups = dups->Next;
+ while (dups->Pt != pp->Pt) dups = dups->Next;
+ }
+ }
+ return pp;
+}
+//------------------------------------------------------------------------------
+
+bool FindSegment(OutPt* &pp, bool UseFullInt64Range,
+ IntPoint &pt1, IntPoint &pt2)
+{
+ //OutPt1 & OutPt2 => the overlap segment (if the function returns true)
+ if (!pp) return false;
+ OutPt* pp2 = pp;
+ IntPoint pt1a = pt1, pt2a = pt2;
+ do
+ {
+ if (SlopesEqual(pt1a, pt2a, pp->Pt, pp->Prev->Pt, UseFullInt64Range) &&
+ SlopesEqual(pt1a, pt2a, pp->Pt, UseFullInt64Range) &&
+ GetOverlapSegment(pt1a, pt2a, pp->Pt, pp->Prev->Pt, pt1, pt2))
+ return true;
+ pp = pp->Next;
+ }
+ while (pp != pp2);
+ return false;
+}
+//------------------------------------------------------------------------------
+
+bool Pt2IsBetweenPt1AndPt3(const IntPoint pt1,
+ const IntPoint pt2, const IntPoint pt3)
+{
+ if ((pt1 == pt3) || (pt1 == pt2) || (pt3 == pt2))
+ return false;
+ else if (pt1.X != pt3.X)
+ return (pt2.X > pt1.X) == (pt2.X < pt3.X);
+ else
+ return (pt2.Y > pt1.Y) == (pt2.Y < pt3.Y);
+}
+//------------------------------------------------------------------------------
+
+OutPt* InsertPolyPtBetween(OutPt* p1, OutPt* p2, const IntPoint Pt)
+{
+ if (p1 == p2) throw "JoinError";
+ OutPt* result = new OutPt;
+ result->Pt = Pt;
+ if (p2 == p1->Next)
+ {
+ p1->Next = result;
+ p2->Prev = result;
+ result->Next = p2;
+ result->Prev = p1;
+ } else
+ {
+ p2->Next = result;
+ p1->Prev = result;
+ result->Next = p1;
+ result->Prev = p2;
+ }
+ return result;
+}
+//------------------------------------------------------------------------------
+
+bool HorzSegmentsOverlap(const IntPoint& pt1a, const IntPoint& pt1b,
+ const IntPoint& pt2a, const IntPoint& pt2b)
+{
+ //precondition: both segments are horizontal
+ if ((pt1a.X > pt2a.X) == (pt1a.X < pt2b.X)) return true;
+ else if ((pt1b.X > pt2a.X) == (pt1b.X < pt2b.X)) return true;
+ else if ((pt2a.X > pt1a.X) == (pt2a.X < pt1b.X)) return true;
+ else if ((pt2b.X > pt1a.X) == (pt2b.X < pt1b.X)) return true;
+ else if ((pt1a.X == pt2a.X) && (pt1b.X == pt2b.X)) return true;
+ else if ((pt1a.X == pt2b.X) && (pt1b.X == pt2a.X)) return true;
+ else return false;
+}
+
+
+//------------------------------------------------------------------------------
+// ClipperBase class methods ...
+//------------------------------------------------------------------------------
+
+ClipperBase::ClipperBase() //constructor
+{
+ m_MinimaList = 0;
+ m_CurrentLM = 0;
+ m_UseFullRange = false;
+}
+//------------------------------------------------------------------------------
+
+ClipperBase::~ClipperBase() //destructor
+{
+ Clear();
+}
+//------------------------------------------------------------------------------
+
+void RangeTest(const IntPoint& Pt, bool& useFullRange)
+{
+ if (useFullRange)
+ {
+ if (Pt.X > hiRange || Pt.Y > hiRange || -Pt.X > hiRange || -Pt.Y > hiRange)
+ throw "Coordinate outside allowed range";
+ }
+ else if (Pt.X > loRange|| Pt.Y > loRange || -Pt.X > loRange || -Pt.Y > loRange)
+ {
+ useFullRange = true;
+ RangeTest(Pt, useFullRange);
+ }
+}
+//------------------------------------------------------------------------------
+
+TEdge* FindNextLocMin(TEdge* E)
+{
+ for (;;)
+ {
+ while (E->Bot != E->Prev->Bot || E->Curr == E->Top) E = E->Next;
+ if (!IsHorizontal(*E) && !IsHorizontal(*E->Prev)) break;
+ while (IsHorizontal(*E->Prev)) E = E->Prev;
+ TEdge* E2 = E;
+ while (IsHorizontal(*E)) E = E->Next;
+ if (E->Top.Y == E->Prev->Bot.Y) continue; //ie just an intermediate horz.
+ if (E2->Prev->Bot.X < E->Bot.X) E = E2;
+ break;
+ }
+ return E;
+}
+//------------------------------------------------------------------------------
+
+TEdge* ClipperBase::ProcessBound(TEdge* E, bool IsClockwise)
+{
+ TEdge *EStart = E, *Result = E;
+ TEdge *Horz = 0;
+ cInt StartX;
+ if (IsHorizontal(*E))
+ {
+ //it's possible for adjacent overlapping horz edges to start heading left
+ //before finishing right, so ...
+ if (IsClockwise) StartX = E->Prev->Bot.X;
+ else StartX = E->Next->Bot.X;
+ if (E->Bot.X != StartX) ReverseHorizontal(*E);
+ }
+
+ if (Result->OutIdx != Skip)
+ {
+ if (IsClockwise)
+ {
+ while (Result->Top.Y == Result->Next->Bot.Y && Result->Next->OutIdx != Skip)
+ Result = Result->Next;
+ if (IsHorizontal(*Result) && Result->Next->OutIdx != Skip)
+ {
+ //nb: at the top of a bound, horizontals are added to the bound
+ //only when the preceding edge attaches to the horizontal's left vertex
+ //unless a Skip edge is encountered when that becomes the top divide
+ Horz = Result;
+ while (IsHorizontal(*Horz->Prev)) Horz = Horz->Prev;
+ if (Horz->Prev->Top.X == Result->Next->Top.X)
+ {
+ if (!IsClockwise) Result = Horz->Prev;
+ }
+ else if (Horz->Prev->Top.X > Result->Next->Top.X) Result = Horz->Prev;
+ }
+ while (E != Result)
+ {
+ E->NextInLML = E->Next;
+ if (IsHorizontal(*E) && E != EStart &&
+ E->Bot.X != E->Prev->Top.X) ReverseHorizontal(*E);
+ E = E->Next;
+ }
+ if (IsHorizontal(*E) && E != EStart && E->Bot.X != E->Prev->Top.X)
+ ReverseHorizontal(*E);
+ Result = Result->Next; //move to the edge just beyond current bound
+ } else
+ {
+ while (Result->Top.Y == Result->Prev->Bot.Y && Result->Prev->OutIdx != Skip)
+ Result = Result->Prev;
+ if (IsHorizontal(*Result) && Result->Prev->OutIdx != Skip)
+ {
+ Horz = Result;
+ while (IsHorizontal(*Horz->Next)) Horz = Horz->Next;
+ if (Horz->Next->Top.X == Result->Prev->Top.X)
+ {
+ if (!IsClockwise) Result = Horz->Next;
+ }
+ else if (Horz->Next->Top.X > Result->Prev->Top.X) Result = Horz->Next;
+ }
+
+ while (E != Result)
+ {
+ E->NextInLML = E->Prev;
+ if (IsHorizontal(*E) && E != EStart && E->Bot.X != E->Next->Top.X)
+ ReverseHorizontal(*E);
+ E = E->Prev;
+ }
+ if (IsHorizontal(*E) && E != EStart && E->Bot.X != E->Next->Top.X)
+ ReverseHorizontal(*E);
+ Result = Result->Prev; //move to the edge just beyond current bound
+ }
+ }
+
+ if (Result->OutIdx == Skip)
+ {
+ //if edges still remain in the current bound beyond the skip edge then
+ //create another LocMin and call ProcessBound once more
+ E = Result;
+ if (IsClockwise)
+ {
+ while (E->Top.Y == E->Next->Bot.Y) E = E->Next;
+ //don't include top horizontals when parsing a bound a second time,
+ //they will be contained in the opposite bound ...
+ while (E != Result && IsHorizontal(*E)) E = E->Prev;
+ } else
+ {
+ while (E->Top.Y == E->Prev->Bot.Y) E = E->Prev;
+ while (E != Result && IsHorizontal(*E)) E = E->Next;
+ }
+ if (E == Result)
+ {
+ if (IsClockwise) Result = E->Next;
+ else Result = E->Prev;
+ } else
+ {
+ //there are more edges in the bound beyond result starting with E
+ if (IsClockwise)
+ E = Result->Next;
+ else
+ E = Result->Prev;
+ LocalMinima* locMin = new LocalMinima;
+ locMin->Next = 0;
+ locMin->Y = E->Bot.Y;
+ locMin->LeftBound = 0;
+ locMin->RightBound = E;
+ locMin->RightBound->WindDelta = 0;
+ Result = ProcessBound(locMin->RightBound, IsClockwise);
+ InsertLocalMinima(locMin);
+ }
+ }
+ return Result;
+}
+//------------------------------------------------------------------------------
+
+bool ClipperBase::AddPath(const Path &pg, PolyType PolyTyp, bool Closed)
+{
+#ifdef use_lines
+ if (!Closed && PolyTyp == ptClip)
+ throw clipperException("AddPath: Open paths must be subject.");
+#else
+ if (!Closed)
+ throw clipperException("AddPath: Open paths have been disabled.");
+#endif
+
+ int highI = (int)pg.size() -1;
+ if (Closed) while (highI > 0 && (pg[highI] == pg[0])) --highI;
+ while (highI > 0 && (pg[highI] == pg[highI -1])) --highI;
+ if ((Closed && highI < 2) || (!Closed && highI < 1)) return false;
+
+ //create a new edge array ...
+ TEdge *edges = new TEdge [highI +1];
+
+ bool IsFlat = true;
+ //1. Basic (first) edge initialization ...
+ try
+ {
+ edges[1].Curr = pg[1];
+ RangeTest(pg[0], m_UseFullRange);
+ RangeTest(pg[highI], m_UseFullRange);
+ InitEdge(&edges[0], &edges[1], &edges[highI], pg[0]);
+ InitEdge(&edges[highI], &edges[0], &edges[highI-1], pg[highI]);
+ for (int i = highI - 1; i >= 1; --i)
+ {
+ RangeTest(pg[i], m_UseFullRange);
+ InitEdge(&edges[i], &edges[i+1], &edges[i-1], pg[i]);
+ }
+ }
+ catch(...)
+ {
+ delete [] edges;
+ return false; //almost certainly a vertex has exceeded range
+ }
+
+ TEdge *eStart = &edges[0];
+ if (!Closed) eStart->Prev->OutIdx = Skip;
+
+ //2. Remove duplicate vertices, and (when closed) collinear edges ...
+ TEdge *E = eStart, *eLoopStop = eStart;
+ for (;;)
+ {
+ if ((E->Curr == E->Next->Curr))
+ {
+ if (E == E->Next) break;
+ if (E == eStart) eStart = E->Next;
+ E = RemoveEdge(E);
+ eLoopStop = E;
+ continue;
+ }
+ if (E->Prev == E->Next)
+ break; //only two vertices
+ else if (Closed &&
+ SlopesEqual(E->Prev->Curr, E->Curr, E->Next->Curr, m_UseFullRange) &&
+ (!m_PreserveCollinear ||
+ !Pt2IsBetweenPt1AndPt3(E->Prev->Curr, E->Curr, E->Next->Curr)))
+ {
+ //Collinear edges are allowed for open paths but in closed paths
+ //the default is to merge adjacent collinear edges into a single edge.
+ //However, if the PreserveCollinear property is enabled, only overlapping
+ //collinear edges (ie spikes) will be removed from closed paths.
+ if (E == eStart) eStart = E->Next;
+ E = RemoveEdge(E);
+ E = E->Prev;
+ eLoopStop = E;
+ continue;
+ }
+ E = E->Next;
+ if (E == eLoopStop) break;
+ }
+
+ if ((!Closed && (E == E->Next)) || (Closed && (E->Prev == E->Next)))
+ {
+ delete [] edges;
+ return false;
+ }
+
+ if (!Closed) m_HasOpenPaths = true;
+
+ //3. Do second stage of edge initialization ...
+ E = eStart;
+ do
+ {
+ InitEdge2(*E, PolyTyp);
+ E = E->Next;
+ if (IsFlat && E->Curr.Y != eStart->Curr.Y) IsFlat = false;
+ }
+ while (E != eStart);
+
+ //4. Finally, add edge bounds to LocalMinima list ...
+
+ //Totally flat paths must be handled differently when adding them
+ //to LocalMinima list to avoid endless loops etc ...
+ if (IsFlat)
+ {
+ if (Closed)
+ {
+ delete [] edges;
+ return false;
+ }
+ E->Prev->OutIdx = Skip;
+ if (E->Prev->Bot.X < E->Prev->Top.X) ReverseHorizontal(*E->Prev);
+ LocalMinima* locMin = new LocalMinima();
+ locMin->Next = 0;
+ locMin->Y = E->Bot.Y;
+ locMin->LeftBound = 0;
+ locMin->RightBound = E;
+ locMin->RightBound->Side = esRight;
+ locMin->RightBound->WindDelta = 0;
+ while (E->Next->OutIdx != Skip)
+ {
+ E->NextInLML = E->Next;
+ if (E->Bot.X != E->Prev->Top.X) ReverseHorizontal(*E);
+ E = E->Next;
+ }
+ InsertLocalMinima(locMin);
+ m_edges.push_back(edges);
+ return true;
+ }
+
+ m_edges.push_back(edges);
+ bool clockwise;
+ TEdge* EMin = 0;
+ for (;;)
+ {
+ E = FindNextLocMin(E);
+ if (E == EMin) break;
+ else if (!EMin) EMin = E;
+
+ //E and E.Prev now share a local minima (left aligned if horizontal).
+ //Compare their slopes to find which starts which bound ...
+ LocalMinima* locMin = new LocalMinima;
+ locMin->Next = 0;
+ locMin->Y = E->Bot.Y;
+ if (E->Dx < E->Prev->Dx)
+ {
+ locMin->LeftBound = E->Prev;
+ locMin->RightBound = E;
+ clockwise = false; //Q.nextInLML = Q.prev
+ } else
+ {
+ locMin->LeftBound = E;
+ locMin->RightBound = E->Prev;
+ clockwise = true; //Q.nextInLML = Q.next
+ }
+ locMin->LeftBound->Side = esLeft;
+ locMin->RightBound->Side = esRight;
+
+ if (!Closed) locMin->LeftBound->WindDelta = 0;
+ else if (locMin->LeftBound->Next == locMin->RightBound)
+ locMin->LeftBound->WindDelta = -1;
+ else locMin->LeftBound->WindDelta = 1;
+ locMin->RightBound->WindDelta = -locMin->LeftBound->WindDelta;
+
+ E = ProcessBound(locMin->LeftBound, clockwise);
+ TEdge* E2 = ProcessBound(locMin->RightBound, !clockwise);
+
+ if (locMin->LeftBound->OutIdx == Skip)
+ locMin->LeftBound = 0;
+ else if (locMin->RightBound->OutIdx == Skip)
+ locMin->RightBound = 0;
+ InsertLocalMinima(locMin);
+ if (!clockwise) E = E2;
+ }
+ return true;
+}
+//------------------------------------------------------------------------------
+
+bool ClipperBase::AddPaths(const Paths &ppg, PolyType PolyTyp, bool Closed)
+{
+ bool result = false;
+ for (Paths::size_type i = 0; i < ppg.size(); ++i)
+ if (AddPath(ppg[i], PolyTyp, Closed)) result = true;
+ return result;
+}
+//------------------------------------------------------------------------------
+
+void ClipperBase::InsertLocalMinima(LocalMinima *newLm)
+{
+ if( ! m_MinimaList )
+ {
+ m_MinimaList = newLm;
+ }
+ else if( newLm->Y >= m_MinimaList->Y )
+ {
+ newLm->Next = m_MinimaList;
+ m_MinimaList = newLm;
+ } else
+ {
+ LocalMinima* tmpLm = m_MinimaList;
+ while( tmpLm->Next && ( newLm->Y < tmpLm->Next->Y ) )
+ tmpLm = tmpLm->Next;
+ newLm->Next = tmpLm->Next;
+ tmpLm->Next = newLm;
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperBase::Clear()
+{
+ DisposeLocalMinimaList();
+ for (EdgeList::size_type i = 0; i < m_edges.size(); ++i)
+ {
+ //for each edge array in turn, find the first used edge and
+ //check for and remove any hiddenPts in each edge in the array.
+ TEdge* edges = m_edges[i];
+ delete [] edges;
+ }
+ m_edges.clear();
+ m_UseFullRange = false;
+ m_HasOpenPaths = false;
+}
+//------------------------------------------------------------------------------
+
+void ClipperBase::Reset()
+{
+ m_CurrentLM = m_MinimaList;
+ if( !m_CurrentLM ) return; //ie nothing to process
+
+ //reset all edges ...
+ LocalMinima* lm = m_MinimaList;
+ while( lm )
+ {
+ TEdge* e = lm->LeftBound;
+ if (e)
+ {
+ e->Curr = e->Bot;
+ e->Side = esLeft;
+ e->OutIdx = Unassigned;
+ }
+
+ e = lm->RightBound;
+ if (e)
+ {
+ e->Curr = e->Bot;
+ e->Side = esRight;
+ e->OutIdx = Unassigned;
+ }
+ lm = lm->Next;
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperBase::DisposeLocalMinimaList()
+{
+ while( m_MinimaList )
+ {
+ LocalMinima* tmpLm = m_MinimaList->Next;
+ delete m_MinimaList;
+ m_MinimaList = tmpLm;
+ }
+ m_CurrentLM = 0;
+}
+//------------------------------------------------------------------------------
+
+void ClipperBase::PopLocalMinima()
+{
+ if( ! m_CurrentLM ) return;
+ m_CurrentLM = m_CurrentLM->Next;
+}
+//------------------------------------------------------------------------------
+
+IntRect ClipperBase::GetBounds()
+{
+ IntRect result;
+ LocalMinima* lm = m_MinimaList;
+ if (!lm)
+ {
+ result.left = result.top = result.right = result.bottom = 0;
+ return result;
+ }
+ result.left = lm->LeftBound->Bot.X;
+ result.top = lm->LeftBound->Bot.Y;
+ result.right = lm->LeftBound->Bot.X;
+ result.bottom = lm->LeftBound->Bot.Y;
+ while (lm)
+ {
+ if (lm->LeftBound->Bot.Y > result.bottom)
+ result.bottom = lm->LeftBound->Bot.Y;
+ TEdge* e = lm->LeftBound;
+ for (;;) {
+ TEdge* bottomE = e;
+ while (e->NextInLML)
+ {
+ if (e->Bot.X < result.left) result.left = e->Bot.X;
+ if (e->Bot.X > result.right) result.right = e->Bot.X;
+ e = e->NextInLML;
+ }
+ if (e->Bot.X < result.left) result.left = e->Bot.X;
+ if (e->Bot.X > result.right) result.right = e->Bot.X;
+ if (e->Top.X < result.left) result.left = e->Top.X;
+ if (e->Top.X > result.right) result.right = e->Top.X;
+ if (e->Top.Y < result.top) result.top = e->Top.Y;
+
+ if (bottomE == lm->LeftBound) e = lm->RightBound;
+ else break;
+ }
+ lm = lm->Next;
+ }
+ return result;
+}
+
+//------------------------------------------------------------------------------
+// TClipper methods ...
+//------------------------------------------------------------------------------
+
+Clipper::Clipper(int initOptions) : ClipperBase() //constructor
+{
+ m_ActiveEdges = 0;
+ m_SortedEdges = 0;
+ m_ExecuteLocked = false;
+ m_UseFullRange = false;
+ m_ReverseOutput = ((initOptions & ioReverseSolution) != 0);
+ m_StrictSimple = ((initOptions & ioStrictlySimple) != 0);
+ m_PreserveCollinear = ((initOptions & ioPreserveCollinear) != 0);
+ m_HasOpenPaths = false;
+#ifdef use_xyz
+ m_ZFill = 0;
+#endif
+}
+//------------------------------------------------------------------------------
+
+Clipper::~Clipper() //destructor
+{
+ Clear();
+ m_Scanbeam.clear();
+}
+//------------------------------------------------------------------------------
+
+#ifdef use_xyz
+void Clipper::ZFillFunction(TZFillCallback zFillFunc)
+{
+ m_ZFill = zFillFunc;
+}
+//------------------------------------------------------------------------------
+#endif
+
+void Clipper::Clear()
+{
+ if (m_edges.empty()) return; //avoids problems with ClipperBase destructor
+ DisposeAllOutRecs();
+ ClipperBase::Clear();
+}
+//------------------------------------------------------------------------------
+
+void Clipper::Reset()
+{
+ ClipperBase::Reset();
+ m_Scanbeam.clear();
+ m_ActiveEdges = 0;
+ m_SortedEdges = 0;
+ DisposeAllOutRecs();
+ LocalMinima* lm = m_MinimaList;
+ while (lm)
+ {
+ InsertScanbeam(lm->Y);
+ lm = lm->Next;
+ }
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::Execute(ClipType clipType, Paths &solution,
+ PolyFillType subjFillType, PolyFillType clipFillType)
+{
+ if( m_ExecuteLocked ) return false;
+ if (m_HasOpenPaths)
+ throw clipperException("Error: PolyTree struct is need for open path clipping.");
+ m_ExecuteLocked = true;
+ solution.resize(0);
+ m_SubjFillType = subjFillType;
+ m_ClipFillType = clipFillType;
+ m_ClipType = clipType;
+ m_UsingPolyTree = false;
+ bool succeeded = ExecuteInternal();
+ if (succeeded) BuildResult(solution);
+ m_ExecuteLocked = false;
+ return succeeded;
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::Execute(ClipType clipType, PolyTree& polytree,
+ PolyFillType subjFillType, PolyFillType clipFillType)
+{
+ if( m_ExecuteLocked ) return false;
+ m_ExecuteLocked = true;
+ m_SubjFillType = subjFillType;
+ m_ClipFillType = clipFillType;
+ m_ClipType = clipType;
+ m_UsingPolyTree = true;
+ bool succeeded = ExecuteInternal();
+ if (succeeded) BuildResult2(polytree);
+ m_ExecuteLocked = false;
+ return succeeded;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::FixHoleLinkage(OutRec &outrec)
+{
+ //skip OutRecs that (a) contain outermost polygons or
+ //(b) already have the correct owner/child linkage ...
+ if (!outrec.FirstLeft ||
+ (outrec.IsHole != outrec.FirstLeft->IsHole &&
+ outrec.FirstLeft->Pts)) return;
+
+ OutRec* orfl = outrec.FirstLeft;
+ while (orfl && ((orfl->IsHole == outrec.IsHole) || !orfl->Pts))
+ orfl = orfl->FirstLeft;
+ outrec.FirstLeft = orfl;
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::ExecuteInternal()
+{
+ bool succeeded = true;
+ try {
+ Reset();
+ if (!m_CurrentLM) return false;
+ cInt botY = PopScanbeam();
+ do {
+ InsertLocalMinimaIntoAEL(botY);
+ ClearGhostJoins();
+ ProcessHorizontals(false);
+ if (m_Scanbeam.empty()) break;
+ cInt topY = PopScanbeam();
+ succeeded = ProcessIntersections(botY, topY);
+ if (!succeeded) break;
+ ProcessEdgesAtTopOfScanbeam(topY);
+ botY = topY;
+ } while (!m_Scanbeam.empty() || m_CurrentLM);
+ }
+ catch(...)
+ {
+ succeeded = false;
+ }
+
+ if (succeeded)
+ {
+ //fix orientations ...
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); ++i)
+ {
+ OutRec *outRec = m_PolyOuts[i];
+ if (!outRec->Pts || outRec->IsOpen) continue;
+ if ((outRec->IsHole ^ m_ReverseOutput) == (Area(*outRec) > 0))
+ ReversePolyPtLinks(outRec->Pts);
+ }
+
+ if (!m_Joins.empty()) JoinCommonEdges();
+
+ //unfortunately FixupOutPolygon() must be done after JoinCommonEdges()
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); ++i)
+ {
+ OutRec *outRec = m_PolyOuts[i];
+ if (outRec->Pts && !outRec->IsOpen)
+ FixupOutPolygon(*outRec);
+ }
+
+ if (m_StrictSimple) DoSimplePolygons();
+ }
+
+ ClearJoins();
+ ClearGhostJoins();
+ return succeeded;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::InsertScanbeam(const cInt Y)
+{
+ m_Scanbeam.insert(Y);
+}
+//------------------------------------------------------------------------------
+
+cInt Clipper::PopScanbeam()
+{
+ cInt Y = *m_Scanbeam.begin();
+ m_Scanbeam.erase(m_Scanbeam.begin());
+ return Y;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::DisposeAllOutRecs(){
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); ++i)
+ DisposeOutRec(i);
+ m_PolyOuts.clear();
+}
+//------------------------------------------------------------------------------
+
+void Clipper::DisposeOutRec(PolyOutList::size_type index)
+{
+ OutRec *outRec = m_PolyOuts[index];
+ if (outRec->Pts) DisposeOutPts(outRec->Pts);
+ delete outRec;
+ m_PolyOuts[index] = 0;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::SetWindingCount(TEdge &edge)
+{
+ TEdge *e = edge.PrevInAEL;
+ //find the edge of the same polytype that immediately preceeds 'edge' in AEL
+ while (e && ((e->PolyTyp != edge.PolyTyp) || (e->WindDelta == 0))) e = e->PrevInAEL;
+ if (!e)
+ {
+ edge.WindCnt = (edge.WindDelta == 0 ? 1 : edge.WindDelta);
+ edge.WindCnt2 = 0;
+ e = m_ActiveEdges; //ie get ready to calc WindCnt2
+ }
+ else if (edge.WindDelta == 0 && m_ClipType != ctUnion)
+ {
+ edge.WindCnt = 1;
+ edge.WindCnt2 = e->WindCnt2;
+ e = e->NextInAEL; //ie get ready to calc WindCnt2
+ }
+ else if (IsEvenOddFillType(edge))
+ {
+ //EvenOdd filling ...
+ if (edge.WindDelta == 0)
+ {
+ //are we inside a subj polygon ...
+ bool Inside = true;
+ TEdge *e2 = e->PrevInAEL;
+ while (e2)
+ {
+ if (e2->PolyTyp == e->PolyTyp && e2->WindDelta != 0)
+ Inside = !Inside;
+ e2 = e2->PrevInAEL;
+ }
+ edge.WindCnt = (Inside ? 0 : 1);
+ }
+ else
+ {
+ edge.WindCnt = edge.WindDelta;
+ }
+ edge.WindCnt2 = e->WindCnt2;
+ e = e->NextInAEL; //ie get ready to calc WindCnt2
+ }
+ else
+ {
+ //nonZero, Positive or Negative filling ...
+ if (e->WindCnt * e->WindDelta < 0)
+ {
+ //prev edge is 'decreasing' WindCount (WC) toward zero
+ //so we're outside the previous polygon ...
+ if (Abs(e->WindCnt) > 1)
+ {
+ //outside prev poly but still inside another.
+ //when reversing direction of prev poly use the same WC
+ if (e->WindDelta * edge.WindDelta < 0) edge.WindCnt = e->WindCnt;
+ //otherwise continue to 'decrease' WC ...
+ else edge.WindCnt = e->WindCnt + edge.WindDelta;
+ }
+ else
+ //now outside all polys of same polytype so set own WC ...
+ edge.WindCnt = (edge.WindDelta == 0 ? 1 : edge.WindDelta);
+ } else
+ {
+ //prev edge is 'increasing' WindCount (WC) away from zero
+ //so we're inside the previous polygon ...
+ if (edge.WindDelta == 0)
+ edge.WindCnt = (e->WindCnt < 0 ? e->WindCnt - 1 : e->WindCnt + 1);
+ //if wind direction is reversing prev then use same WC
+ else if (e->WindDelta * edge.WindDelta < 0) edge.WindCnt = e->WindCnt;
+ //otherwise add to WC ...
+ else edge.WindCnt = e->WindCnt + edge.WindDelta;
+ }
+ edge.WindCnt2 = e->WindCnt2;
+ e = e->NextInAEL; //ie get ready to calc WindCnt2
+ }
+
+ //update WindCnt2 ...
+ if (IsEvenOddAltFillType(edge))
+ {
+ //EvenOdd filling ...
+ while (e != &edge)
+ {
+ if (e->WindDelta != 0)
+ edge.WindCnt2 = (edge.WindCnt2 == 0 ? 1 : 0);
+ e = e->NextInAEL;
+ }
+ } else
+ {
+ //nonZero, Positive or Negative filling ...
+ while ( e != &edge )
+ {
+ edge.WindCnt2 += e->WindDelta;
+ e = e->NextInAEL;
+ }
+ }
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::IsEvenOddFillType(const TEdge& edge) const
+{
+ if (edge.PolyTyp == ptSubject)
+ return m_SubjFillType == pftEvenOdd; else
+ return m_ClipFillType == pftEvenOdd;
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::IsEvenOddAltFillType(const TEdge& edge) const
+{
+ if (edge.PolyTyp == ptSubject)
+ return m_ClipFillType == pftEvenOdd; else
+ return m_SubjFillType == pftEvenOdd;
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::IsContributing(const TEdge& edge) const
+{
+ PolyFillType pft, pft2;
+ if (edge.PolyTyp == ptSubject)
+ {
+ pft = m_SubjFillType;
+ pft2 = m_ClipFillType;
+ } else
+ {
+ pft = m_ClipFillType;
+ pft2 = m_SubjFillType;
+ }
+
+ switch(pft)
+ {
+ case pftEvenOdd:
+ //return false if a subj line has been flagged as inside a subj polygon
+ if (edge.WindDelta == 0 && edge.WindCnt != 1) return false;
+ break;
+ case pftNonZero:
+ if (Abs(edge.WindCnt) != 1) return false;
+ break;
+ case pftPositive:
+ if (edge.WindCnt != 1) return false;
+ break;
+ default: //pftNegative
+ if (edge.WindCnt != -1) return false;
+ }
+
+ switch(m_ClipType)
+ {
+ case ctIntersection:
+ switch(pft2)
+ {
+ case pftEvenOdd:
+ case pftNonZero:
+ return (edge.WindCnt2 != 0);
+ case pftPositive:
+ return (edge.WindCnt2 > 0);
+ default:
+ return (edge.WindCnt2 < 0);
+ }
+ break;
+ case ctUnion:
+ switch(pft2)
+ {
+ case pftEvenOdd:
+ case pftNonZero:
+ return (edge.WindCnt2 == 0);
+ case pftPositive:
+ return (edge.WindCnt2 <= 0);
+ default:
+ return (edge.WindCnt2 >= 0);
+ }
+ break;
+ case ctDifference:
+ if (edge.PolyTyp == ptSubject)
+ switch(pft2)
+ {
+ case pftEvenOdd:
+ case pftNonZero:
+ return (edge.WindCnt2 == 0);
+ case pftPositive:
+ return (edge.WindCnt2 <= 0);
+ default:
+ return (edge.WindCnt2 >= 0);
+ }
+ else
+ switch(pft2)
+ {
+ case pftEvenOdd:
+ case pftNonZero:
+ return (edge.WindCnt2 != 0);
+ case pftPositive:
+ return (edge.WindCnt2 > 0);
+ default:
+ return (edge.WindCnt2 < 0);
+ }
+ break;
+ case ctXor:
+ if (edge.WindDelta == 0) //XOr always contributing unless open
+ switch(pft2)
+ {
+ case pftEvenOdd:
+ case pftNonZero:
+ return (edge.WindCnt2 == 0);
+ case pftPositive:
+ return (edge.WindCnt2 <= 0);
+ default:
+ return (edge.WindCnt2 >= 0);
+ }
+ else
+ return true;
+ break;
+ default:
+ return true;
+ }
+}
+//------------------------------------------------------------------------------
+
+OutPt* Clipper::AddLocalMinPoly(TEdge *e1, TEdge *e2, const IntPoint &Pt)
+{
+ OutPt* result;
+ TEdge *e, *prevE;
+ if (IsHorizontal(*e2) || ( e1->Dx > e2->Dx ))
+ {
+ result = AddOutPt(e1, Pt);
+ e2->OutIdx = e1->OutIdx;
+ e1->Side = esLeft;
+ e2->Side = esRight;
+ e = e1;
+ if (e->PrevInAEL == e2)
+ prevE = e2->PrevInAEL;
+ else
+ prevE = e->PrevInAEL;
+ } else
+ {
+ result = AddOutPt(e2, Pt);
+ e1->OutIdx = e2->OutIdx;
+ e1->Side = esRight;
+ e2->Side = esLeft;
+ e = e2;
+ if (e->PrevInAEL == e1)
+ prevE = e1->PrevInAEL;
+ else
+ prevE = e->PrevInAEL;
+ }
+
+ if (prevE && prevE->OutIdx >= 0 &&
+ (TopX(*prevE, Pt.Y) == TopX(*e, Pt.Y)) &&
+ SlopesEqual(*e, *prevE, m_UseFullRange) &&
+ (e->WindDelta != 0) && (prevE->WindDelta != 0))
+ {
+ OutPt* outPt = AddOutPt(prevE, Pt);
+ AddJoin(result, outPt, e->Top);
+ }
+ return result;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::AddLocalMaxPoly(TEdge *e1, TEdge *e2, const IntPoint &Pt)
+{
+ AddOutPt( e1, Pt );
+ if (e2->WindDelta == 0) AddOutPt(e2, Pt);
+ if( e1->OutIdx == e2->OutIdx )
+ {
+ e1->OutIdx = Unassigned;
+ e2->OutIdx = Unassigned;
+ }
+ else if (e1->OutIdx < e2->OutIdx)
+ AppendPolygon(e1, e2);
+ else
+ AppendPolygon(e2, e1);
+}
+//------------------------------------------------------------------------------
+
+void Clipper::AddEdgeToSEL(TEdge *edge)
+{
+ //SEL pointers in PEdge are reused to build a list of horizontal edges.
+ //However, we don't need to worry about order with horizontal edge processing.
+ if( !m_SortedEdges )
+ {
+ m_SortedEdges = edge;
+ edge->PrevInSEL = 0;
+ edge->NextInSEL = 0;
+ }
+ else
+ {
+ edge->NextInSEL = m_SortedEdges;
+ edge->PrevInSEL = 0;
+ m_SortedEdges->PrevInSEL = edge;
+ m_SortedEdges = edge;
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::CopyAELToSEL()
+{
+ TEdge* e = m_ActiveEdges;
+ m_SortedEdges = e;
+ while ( e )
+ {
+ e->PrevInSEL = e->PrevInAEL;
+ e->NextInSEL = e->NextInAEL;
+ e = e->NextInAEL;
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::AddJoin(OutPt *op1, OutPt *op2, const IntPoint OffPt)
+{
+ Join* j = new Join;
+ j->OutPt1 = op1;
+ j->OutPt2 = op2;
+ j->OffPt = OffPt;
+ m_Joins.push_back(j);
+}
+//------------------------------------------------------------------------------
+
+void Clipper::ClearJoins()
+{
+ for (JoinList::size_type i = 0; i < m_Joins.size(); i++)
+ delete m_Joins[i];
+ m_Joins.resize(0);
+}
+//------------------------------------------------------------------------------
+
+void Clipper::ClearGhostJoins()
+{
+ for (JoinList::size_type i = 0; i < m_GhostJoins.size(); i++)
+ delete m_GhostJoins[i];
+ m_GhostJoins.resize(0);
+}
+//------------------------------------------------------------------------------
+
+void Clipper::AddGhostJoin(OutPt *op, const IntPoint OffPt)
+{
+ Join* j = new Join;
+ j->OutPt1 = op;
+ j->OutPt2 = 0;
+ j->OffPt = OffPt;
+ m_GhostJoins.push_back(j);
+}
+//------------------------------------------------------------------------------
+
+void Clipper::InsertLocalMinimaIntoAEL(const cInt botY)
+{
+ while( m_CurrentLM && ( m_CurrentLM->Y == botY ) )
+ {
+ TEdge* lb = m_CurrentLM->LeftBound;
+ TEdge* rb = m_CurrentLM->RightBound;
+ PopLocalMinima();
+ OutPt *Op1 = 0;
+ if (!lb)
+ {
+ //nb: don't insert LB into either AEL or SEL
+ InsertEdgeIntoAEL(rb, 0);
+ SetWindingCount(*rb);
+ if (IsContributing(*rb))
+ Op1 = AddOutPt(rb, rb->Bot);
+ }
+ else if (!rb)
+ {
+ InsertEdgeIntoAEL(lb, 0);
+ SetWindingCount(*lb);
+ if (IsContributing(*lb))
+ Op1 = AddOutPt(lb, lb->Bot);
+ InsertScanbeam(lb->Top.Y);
+ }
+ else
+ {
+ InsertEdgeIntoAEL(lb, 0);
+ InsertEdgeIntoAEL(rb, lb);
+ SetWindingCount( *lb );
+ rb->WindCnt = lb->WindCnt;
+ rb->WindCnt2 = lb->WindCnt2;
+ if (IsContributing(*lb))
+ Op1 = AddLocalMinPoly(lb, rb, lb->Bot);
+ InsertScanbeam(lb->Top.Y);
+ }
+
+ if (rb)
+ {
+ if(IsHorizontal(*rb)) AddEdgeToSEL(rb);
+ else InsertScanbeam( rb->Top.Y );
+ }
+
+ if (!lb || !rb) continue;
+
+ //if any output polygons share an edge, they'll need joining later ...
+ if (Op1 && IsHorizontal(*rb) &&
+ m_GhostJoins.size() > 0 && (rb->WindDelta != 0))
+ {
+ for (JoinList::size_type i = 0; i < m_GhostJoins.size(); ++i)
+ {
+ Join* jr = m_GhostJoins[i];
+ //if the horizontal Rb and a 'ghost' horizontal overlap, then convert
+ //the 'ghost' join to a real join ready for later ...
+ if (HorzSegmentsOverlap(jr->OutPt1->Pt, jr->OffPt, rb->Bot, rb->Top))
+ AddJoin(jr->OutPt1, Op1, jr->OffPt);
+ }
+ }
+
+ if (lb->OutIdx >= 0 && lb->PrevInAEL &&
+ lb->PrevInAEL->Curr.X == lb->Bot.X &&
+ lb->PrevInAEL->OutIdx >= 0 &&
+ SlopesEqual(*lb->PrevInAEL, *lb, m_UseFullRange) &&
+ (lb->WindDelta != 0) && (lb->PrevInAEL->WindDelta != 0))
+ {
+ OutPt *Op2 = AddOutPt(lb->PrevInAEL, lb->Bot);
+ AddJoin(Op1, Op2, lb->Top);
+ }
+
+ if(lb->NextInAEL != rb)
+ {
+
+ if (rb->OutIdx >= 0 && rb->PrevInAEL->OutIdx >= 0 &&
+ SlopesEqual(*rb->PrevInAEL, *rb, m_UseFullRange) &&
+ (rb->WindDelta != 0) && (rb->PrevInAEL->WindDelta != 0))
+ {
+ OutPt *Op2 = AddOutPt(rb->PrevInAEL, rb->Bot);
+ AddJoin(Op1, Op2, rb->Top);
+ }
+
+ TEdge* e = lb->NextInAEL;
+ if (e)
+ {
+ while( e != rb )
+ {
+ //nb: For calculating winding counts etc, IntersectEdges() assumes
+ //that param1 will be to the Right of param2 ABOVE the intersection ...
+ IntersectEdges(rb , e , lb->Curr); //order important here
+ e = e->NextInAEL;
+ }
+ }
+ }
+
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::DeleteFromAEL(TEdge *e)
+{
+ TEdge* AelPrev = e->PrevInAEL;
+ TEdge* AelNext = e->NextInAEL;
+ if( !AelPrev && !AelNext && (e != m_ActiveEdges) ) return; //already deleted
+ if( AelPrev ) AelPrev->NextInAEL = AelNext;
+ else m_ActiveEdges = AelNext;
+ if( AelNext ) AelNext->PrevInAEL = AelPrev;
+ e->NextInAEL = 0;
+ e->PrevInAEL = 0;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::DeleteFromSEL(TEdge *e)
+{
+ TEdge* SelPrev = e->PrevInSEL;
+ TEdge* SelNext = e->NextInSEL;
+ if( !SelPrev && !SelNext && (e != m_SortedEdges) ) return; //already deleted
+ if( SelPrev ) SelPrev->NextInSEL = SelNext;
+ else m_SortedEdges = SelNext;
+ if( SelNext ) SelNext->PrevInSEL = SelPrev;
+ e->NextInSEL = 0;
+ e->PrevInSEL = 0;
+}
+//------------------------------------------------------------------------------
+
+#ifdef use_xyz
+
+void Clipper::SetZ(IntPoint& pt, TEdge& e)
+{
+ pt.Z = 0;
+ if (m_ZFill)
+ {
+ //put the 'preferred' point as first parameter ...
+ if (e.OutIdx < 0)
+ (*m_ZFill)(e.Bot, e.Top, pt); //outside a path so presume entering
+ else
+ (*m_ZFill)(e.Top, e.Bot, pt); //inside a path so presume exiting
+ }
+}
+//------------------------------------------------------------------------------
+#endif
+
+void Clipper::IntersectEdges(TEdge *e1, TEdge *e2,
+ const IntPoint &Pt, bool protect)
+{
+ //e1 will be to the Left of e2 BELOW the intersection. Therefore e1 is before
+ //e2 in AEL except when e1 is being inserted at the intersection point ...
+ bool e1stops = !protect && !e1->NextInLML &&
+ e1->Top.X == Pt.X && e1->Top.Y == Pt.Y;
+ bool e2stops = !protect && !e2->NextInLML &&
+ e2->Top.X == Pt.X && e2->Top.Y == Pt.Y;
+ bool e1Contributing = ( e1->OutIdx >= 0 );
+ bool e2Contributing = ( e2->OutIdx >= 0 );
+
+#ifdef use_lines
+ //if either edge is on an OPEN path ...
+ if (e1->WindDelta == 0 || e2->WindDelta == 0)
+ {
+ //ignore subject-subject open path intersections UNLESS they
+ //are both open paths, AND they are both 'contributing maximas' ...
+ if (e1->WindDelta == 0 && e2->WindDelta == 0)
+ {
+ if ((e1stops || e2stops) && e1Contributing && e2Contributing)
+ AddLocalMaxPoly(e1, e2, Pt);
+ }
+
+ //if intersecting a subj line with a subj poly ...
+ else if (e1->PolyTyp == e2->PolyTyp &&
+ e1->WindDelta != e2->WindDelta && m_ClipType == ctUnion)
+ {
+ if (e1->WindDelta == 0)
+ {
+ if (e2Contributing)
+ {
+ AddOutPt(e1, Pt);
+ if (e1Contributing) e1->OutIdx = Unassigned;
+ }
+ }
+ else
+ {
+ if (e1Contributing)
+ {
+ AddOutPt(e2, Pt);
+ if (e2Contributing) e2->OutIdx = Unassigned;
+ }
+ }
+ }
+ else if (e1->PolyTyp != e2->PolyTyp)
+ {
+ //toggle subj open path OutIdx on/off when Abs(clip.WndCnt) == 1 ...
+ if ((e1->WindDelta == 0) && abs(e2->WindCnt) == 1 &&
+ (m_ClipType != ctUnion || e2->WindCnt2 == 0))
+ {
+ AddOutPt(e1, Pt);
+ if (e1Contributing) e1->OutIdx = Unassigned;
+ }
+ else if ((e2->WindDelta == 0) && (abs(e1->WindCnt) == 1) &&
+ (m_ClipType != ctUnion || e1->WindCnt2 == 0))
+ {
+ AddOutPt(e2, Pt);
+ if (e2Contributing) e2->OutIdx = Unassigned;
+ }
+ }
+
+ if (e1stops)
+ if (e1->OutIdx < 0) DeleteFromAEL(e1);
+ else throw clipperException("Error intersecting polylines");
+ if (e2stops)
+ if (e2->OutIdx < 0) DeleteFromAEL(e2);
+ else throw clipperException("Error intersecting polylines");
+ return;
+ }
+#endif
+
+ //update winding counts...
+ //assumes that e1 will be to the Right of e2 ABOVE the intersection
+ if ( e1->PolyTyp == e2->PolyTyp )
+ {
+ if ( IsEvenOddFillType( *e1) )
+ {
+ int oldE1WindCnt = e1->WindCnt;
+ e1->WindCnt = e2->WindCnt;
+ e2->WindCnt = oldE1WindCnt;
+ } else
+ {
+ if (e1->WindCnt + e2->WindDelta == 0 ) e1->WindCnt = -e1->WindCnt;
+ else e1->WindCnt += e2->WindDelta;
+ if ( e2->WindCnt - e1->WindDelta == 0 ) e2->WindCnt = -e2->WindCnt;
+ else e2->WindCnt -= e1->WindDelta;
+ }
+ } else
+ {
+ if (!IsEvenOddFillType(*e2)) e1->WindCnt2 += e2->WindDelta;
+ else e1->WindCnt2 = ( e1->WindCnt2 == 0 ) ? 1 : 0;
+ if (!IsEvenOddFillType(*e1)) e2->WindCnt2 -= e1->WindDelta;
+ else e2->WindCnt2 = ( e2->WindCnt2 == 0 ) ? 1 : 0;
+ }
+
+ PolyFillType e1FillType, e2FillType, e1FillType2, e2FillType2;
+ if (e1->PolyTyp == ptSubject)
+ {
+ e1FillType = m_SubjFillType;
+ e1FillType2 = m_ClipFillType;
+ } else
+ {
+ e1FillType = m_ClipFillType;
+ e1FillType2 = m_SubjFillType;
+ }
+ if (e2->PolyTyp == ptSubject)
+ {
+ e2FillType = m_SubjFillType;
+ e2FillType2 = m_ClipFillType;
+ } else
+ {
+ e2FillType = m_ClipFillType;
+ e2FillType2 = m_SubjFillType;
+ }
+
+ cInt e1Wc, e2Wc;
+ switch (e1FillType)
+ {
+ case pftPositive: e1Wc = e1->WindCnt; break;
+ case pftNegative: e1Wc = -e1->WindCnt; break;
+ default: e1Wc = Abs(e1->WindCnt);
+ }
+ switch(e2FillType)
+ {
+ case pftPositive: e2Wc = e2->WindCnt; break;
+ case pftNegative: e2Wc = -e2->WindCnt; break;
+ default: e2Wc = Abs(e2->WindCnt);
+ }
+
+ if ( e1Contributing && e2Contributing )
+ {
+ if ( e1stops || e2stops ||
+ (e1Wc != 0 && e1Wc != 1) || (e2Wc != 0 && e2Wc != 1) ||
+ (e1->PolyTyp != e2->PolyTyp && m_ClipType != ctXor) )
+ AddLocalMaxPoly(e1, e2, Pt);
+ else
+ {
+ AddOutPt(e1, Pt);
+ AddOutPt(e2, Pt);
+ SwapSides( *e1 , *e2 );
+ SwapPolyIndexes( *e1 , *e2 );
+ }
+ }
+ else if ( e1Contributing )
+ {
+ if (e2Wc == 0 || e2Wc == 1)
+ {
+ AddOutPt(e1, Pt);
+ SwapSides(*e1, *e2);
+ SwapPolyIndexes(*e1, *e2);
+ }
+ }
+ else if ( e2Contributing )
+ {
+ if (e1Wc == 0 || e1Wc == 1)
+ {
+ AddOutPt(e2, Pt);
+ SwapSides(*e1, *e2);
+ SwapPolyIndexes(*e1, *e2);
+ }
+ }
+ else if ( (e1Wc == 0 || e1Wc == 1) &&
+ (e2Wc == 0 || e2Wc == 1) && !e1stops && !e2stops )
+ {
+ //neither edge is currently contributing ...
+
+ cInt e1Wc2, e2Wc2;
+ switch (e1FillType2)
+ {
+ case pftPositive: e1Wc2 = e1->WindCnt2; break;
+ case pftNegative : e1Wc2 = -e1->WindCnt2; break;
+ default: e1Wc2 = Abs(e1->WindCnt2);
+ }
+ switch (e2FillType2)
+ {
+ case pftPositive: e2Wc2 = e2->WindCnt2; break;
+ case pftNegative: e2Wc2 = -e2->WindCnt2; break;
+ default: e2Wc2 = Abs(e2->WindCnt2);
+ }
+
+ if (e1->PolyTyp != e2->PolyTyp)
+ AddLocalMinPoly(e1, e2, Pt);
+ else if (e1Wc == 1 && e2Wc == 1)
+ switch( m_ClipType ) {
+ case ctIntersection:
+ if (e1Wc2 > 0 && e2Wc2 > 0)
+ AddLocalMinPoly(e1, e2, Pt);
+ break;
+ case ctUnion:
+ if ( e1Wc2 <= 0 && e2Wc2 <= 0 )
+ AddLocalMinPoly(e1, e2, Pt);
+ break;
+ case ctDifference:
+ if (((e1->PolyTyp == ptClip) && (e1Wc2 > 0) && (e2Wc2 > 0)) ||
+ ((e1->PolyTyp == ptSubject) && (e1Wc2 <= 0) && (e2Wc2 <= 0)))
+ AddLocalMinPoly(e1, e2, Pt);
+ break;
+ case ctXor:
+ AddLocalMinPoly(e1, e2, Pt);
+ }
+ else
+ SwapSides( *e1, *e2 );
+ }
+
+ if( (e1stops != e2stops) &&
+ ( (e1stops && (e1->OutIdx >= 0)) || (e2stops && (e2->OutIdx >= 0)) ) )
+ {
+ SwapSides( *e1, *e2 );
+ SwapPolyIndexes( *e1, *e2 );
+ }
+
+ //finally, delete any non-contributing maxima edges ...
+ if( e1stops ) DeleteFromAEL( e1 );
+ if( e2stops ) DeleteFromAEL( e2 );
+}
+//------------------------------------------------------------------------------
+
+void Clipper::SetHoleState(TEdge *e, OutRec *outrec)
+{
+ bool IsHole = false;
+ TEdge *e2 = e->PrevInAEL;
+ while (e2)
+ {
+ if (e2->OutIdx >= 0 && e2->WindDelta != 0)
+ {
+ IsHole = !IsHole;
+ if (! outrec->FirstLeft)
+ outrec->FirstLeft = m_PolyOuts[e2->OutIdx];
+ }
+ e2 = e2->PrevInAEL;
+ }
+ if (IsHole) outrec->IsHole = true;
+}
+//------------------------------------------------------------------------------
+
+OutRec* GetLowermostRec(OutRec *outRec1, OutRec *outRec2)
+{
+ //work out which polygon fragment has the correct hole state ...
+ if (!outRec1->BottomPt)
+ outRec1->BottomPt = GetBottomPt(outRec1->Pts);
+ if (!outRec2->BottomPt)
+ outRec2->BottomPt = GetBottomPt(outRec2->Pts);
+ OutPt *OutPt1 = outRec1->BottomPt;
+ OutPt *OutPt2 = outRec2->BottomPt;
+ if (OutPt1->Pt.Y > OutPt2->Pt.Y) return outRec1;
+ else if (OutPt1->Pt.Y < OutPt2->Pt.Y) return outRec2;
+ else if (OutPt1->Pt.X < OutPt2->Pt.X) return outRec1;
+ else if (OutPt1->Pt.X > OutPt2->Pt.X) return outRec2;
+ else if (OutPt1->Next == OutPt1) return outRec2;
+ else if (OutPt2->Next == OutPt2) return outRec1;
+ else if (FirstIsBottomPt(OutPt1, OutPt2)) return outRec1;
+ else return outRec2;
+}
+//------------------------------------------------------------------------------
+
+bool Param1RightOfParam2(OutRec* outRec1, OutRec* outRec2)
+{
+ do
+ {
+ outRec1 = outRec1->FirstLeft;
+ if (outRec1 == outRec2) return true;
+ } while (outRec1);
+ return false;
+}
+//------------------------------------------------------------------------------
+
+OutRec* Clipper::GetOutRec(int Idx)
+{
+ OutRec* outrec = m_PolyOuts[Idx];
+ while (outrec != m_PolyOuts[outrec->Idx])
+ outrec = m_PolyOuts[outrec->Idx];
+ return outrec;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::AppendPolygon(TEdge *e1, TEdge *e2)
+{
+ //get the start and ends of both output polygons ...
+ OutRec *outRec1 = m_PolyOuts[e1->OutIdx];
+ OutRec *outRec2 = m_PolyOuts[e2->OutIdx];
+
+ OutRec *holeStateRec;
+ if (Param1RightOfParam2(outRec1, outRec2))
+ holeStateRec = outRec2;
+ else if (Param1RightOfParam2(outRec2, outRec1))
+ holeStateRec = outRec1;
+ else
+ holeStateRec = GetLowermostRec(outRec1, outRec2);
+
+ //get the start and ends of both output polygons and
+ //join e2 poly onto e1 poly and delete pointers to e2 ...
+
+ OutPt* p1_lft = outRec1->Pts;
+ OutPt* p1_rt = p1_lft->Prev;
+ OutPt* p2_lft = outRec2->Pts;
+ OutPt* p2_rt = p2_lft->Prev;
+
+ EdgeSide Side;
+ //join e2 poly onto e1 poly and delete pointers to e2 ...
+ if( e1->Side == esLeft )
+ {
+ if( e2->Side == esLeft )
+ {
+ //z y x a b c
+ ReversePolyPtLinks(p2_lft);
+ p2_lft->Next = p1_lft;
+ p1_lft->Prev = p2_lft;
+ p1_rt->Next = p2_rt;
+ p2_rt->Prev = p1_rt;
+ outRec1->Pts = p2_rt;
+ } else
+ {
+ //x y z a b c
+ p2_rt->Next = p1_lft;
+ p1_lft->Prev = p2_rt;
+ p2_lft->Prev = p1_rt;
+ p1_rt->Next = p2_lft;
+ outRec1->Pts = p2_lft;
+ }
+ Side = esLeft;
+ } else
+ {
+ if( e2->Side == esRight )
+ {
+ //a b c z y x
+ ReversePolyPtLinks(p2_lft);
+ p1_rt->Next = p2_rt;
+ p2_rt->Prev = p1_rt;
+ p2_lft->Next = p1_lft;
+ p1_lft->Prev = p2_lft;
+ } else
+ {
+ //a b c x y z
+ p1_rt->Next = p2_lft;
+ p2_lft->Prev = p1_rt;
+ p1_lft->Prev = p2_rt;
+ p2_rt->Next = p1_lft;
+ }
+ Side = esRight;
+ }
+
+ outRec1->BottomPt = 0;
+ if (holeStateRec == outRec2)
+ {
+ if (outRec2->FirstLeft != outRec1)
+ outRec1->FirstLeft = outRec2->FirstLeft;
+ outRec1->IsHole = outRec2->IsHole;
+ }
+ outRec2->Pts = 0;
+ outRec2->BottomPt = 0;
+ outRec2->FirstLeft = outRec1;
+
+ int OKIdx = e1->OutIdx;
+ int ObsoleteIdx = e2->OutIdx;
+
+ e1->OutIdx = Unassigned; //nb: safe because we only get here via AddLocalMaxPoly
+ e2->OutIdx = Unassigned;
+
+ TEdge* e = m_ActiveEdges;
+ while( e )
+ {
+ if( e->OutIdx == ObsoleteIdx )
+ {
+ e->OutIdx = OKIdx;
+ e->Side = Side;
+ break;
+ }
+ e = e->NextInAEL;
+ }
+
+ outRec2->Idx = outRec1->Idx;
+}
+//------------------------------------------------------------------------------
+
+OutRec* Clipper::CreateOutRec()
+{
+ OutRec* result = new OutRec;
+ result->IsHole = false;
+ result->IsOpen = false;
+ result->FirstLeft = 0;
+ result->Pts = 0;
+ result->BottomPt = 0;
+ result->PolyNd = 0;
+ m_PolyOuts.push_back(result);
+ result->Idx = (int)m_PolyOuts.size()-1;
+ return result;
+}
+//------------------------------------------------------------------------------
+
+OutPt* Clipper::AddOutPt(TEdge *e, const IntPoint &pt)
+{
+ bool ToFront = (e->Side == esLeft);
+ if( e->OutIdx < 0 )
+ {
+ OutRec *outRec = CreateOutRec();
+ outRec->IsOpen = (e->WindDelta == 0);
+ OutPt* newOp = new OutPt;
+ outRec->Pts = newOp;
+ newOp->Idx = outRec->Idx;
+ newOp->Pt = pt;
+ newOp->Next = newOp;
+ newOp->Prev = newOp;
+ if (!outRec->IsOpen)
+ SetHoleState(e, outRec);
+#ifdef use_xyz
+ if (pt == e->Bot) newOp->Pt = e->Bot;
+ else if (pt == e->Top) newOp->Pt = e->Top;
+ else SetZ(newOp->Pt, *e);
+#endif
+ e->OutIdx = outRec->Idx; //nb: do this after SetZ !
+ return newOp;
+ } else
+ {
+ OutRec *outRec = m_PolyOuts[e->OutIdx];
+ //OutRec.Pts is the 'Left-most' point & OutRec.Pts.Prev is the 'Right-most'
+ OutPt* op = outRec->Pts;
+
+ if (ToFront && (pt == op->Pt)) return op;
+ else if (!ToFront && (pt == op->Prev->Pt)) return op->Prev;
+
+ OutPt* newOp = new OutPt;
+ newOp->Idx = outRec->Idx;
+ newOp->Pt = pt;
+ newOp->Next = op;
+ newOp->Prev = op->Prev;
+ newOp->Prev->Next = newOp;
+ op->Prev = newOp;
+ if (ToFront) outRec->Pts = newOp;
+#ifdef use_xyz
+ if (pt == e->Bot) newOp->Pt = e->Bot;
+ else if (pt == e->Top) newOp->Pt = e->Top;
+ else SetZ(newOp->Pt, *e);
+#endif
+ return newOp;
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::ProcessHorizontals(bool IsTopOfScanbeam)
+{
+ TEdge* horzEdge = m_SortedEdges;
+ while(horzEdge)
+ {
+ DeleteFromSEL(horzEdge);
+ ProcessHorizontal(horzEdge, IsTopOfScanbeam);
+ horzEdge = m_SortedEdges;
+ }
+}
+//------------------------------------------------------------------------------
+
+inline bool IsMinima(TEdge *e)
+{
+ return e && (e->Prev->NextInLML != e) && (e->Next->NextInLML != e);
+}
+//------------------------------------------------------------------------------
+
+inline bool IsMaxima(TEdge *e, const cInt Y)
+{
+ return e && e->Top.Y == Y && !e->NextInLML;
+}
+//------------------------------------------------------------------------------
+
+inline bool IsIntermediate(TEdge *e, const cInt Y)
+{
+ return e->Top.Y == Y && e->NextInLML;
+}
+//------------------------------------------------------------------------------
+
+TEdge *GetMaximaPair(TEdge *e)
+{
+ TEdge* result = 0;
+ if ((e->Next->Top == e->Top) && !e->Next->NextInLML)
+ result = e->Next;
+ else if ((e->Prev->Top == e->Top) && !e->Prev->NextInLML)
+ result = e->Prev;
+
+ if (result && (result->OutIdx == Skip ||
+ //result is false if both NextInAEL & PrevInAEL are nil & not horizontal ...
+ (result->NextInAEL == result->PrevInAEL && !IsHorizontal(*result))))
+ return 0;
+ return result;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::SwapPositionsInAEL(TEdge *Edge1, TEdge *Edge2)
+{
+ //check that one or other edge hasn't already been removed from AEL ...
+ if (Edge1->NextInAEL == Edge1->PrevInAEL ||
+ Edge2->NextInAEL == Edge2->PrevInAEL) return;
+
+ if( Edge1->NextInAEL == Edge2 )
+ {
+ TEdge* Next = Edge2->NextInAEL;
+ if( Next ) Next->PrevInAEL = Edge1;
+ TEdge* Prev = Edge1->PrevInAEL;
+ if( Prev ) Prev->NextInAEL = Edge2;
+ Edge2->PrevInAEL = Prev;
+ Edge2->NextInAEL = Edge1;
+ Edge1->PrevInAEL = Edge2;
+ Edge1->NextInAEL = Next;
+ }
+ else if( Edge2->NextInAEL == Edge1 )
+ {
+ TEdge* Next = Edge1->NextInAEL;
+ if( Next ) Next->PrevInAEL = Edge2;
+ TEdge* Prev = Edge2->PrevInAEL;
+ if( Prev ) Prev->NextInAEL = Edge1;
+ Edge1->PrevInAEL = Prev;
+ Edge1->NextInAEL = Edge2;
+ Edge2->PrevInAEL = Edge1;
+ Edge2->NextInAEL = Next;
+ }
+ else
+ {
+ TEdge* Next = Edge1->NextInAEL;
+ TEdge* Prev = Edge1->PrevInAEL;
+ Edge1->NextInAEL = Edge2->NextInAEL;
+ if( Edge1->NextInAEL ) Edge1->NextInAEL->PrevInAEL = Edge1;
+ Edge1->PrevInAEL = Edge2->PrevInAEL;
+ if( Edge1->PrevInAEL ) Edge1->PrevInAEL->NextInAEL = Edge1;
+ Edge2->NextInAEL = Next;
+ if( Edge2->NextInAEL ) Edge2->NextInAEL->PrevInAEL = Edge2;
+ Edge2->PrevInAEL = Prev;
+ if( Edge2->PrevInAEL ) Edge2->PrevInAEL->NextInAEL = Edge2;
+ }
+
+ if( !Edge1->PrevInAEL ) m_ActiveEdges = Edge1;
+ else if( !Edge2->PrevInAEL ) m_ActiveEdges = Edge2;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::SwapPositionsInSEL(TEdge *Edge1, TEdge *Edge2)
+{
+ if( !( Edge1->NextInSEL ) && !( Edge1->PrevInSEL ) ) return;
+ if( !( Edge2->NextInSEL ) && !( Edge2->PrevInSEL ) ) return;
+
+ if( Edge1->NextInSEL == Edge2 )
+ {
+ TEdge* Next = Edge2->NextInSEL;
+ if( Next ) Next->PrevInSEL = Edge1;
+ TEdge* Prev = Edge1->PrevInSEL;
+ if( Prev ) Prev->NextInSEL = Edge2;
+ Edge2->PrevInSEL = Prev;
+ Edge2->NextInSEL = Edge1;
+ Edge1->PrevInSEL = Edge2;
+ Edge1->NextInSEL = Next;
+ }
+ else if( Edge2->NextInSEL == Edge1 )
+ {
+ TEdge* Next = Edge1->NextInSEL;
+ if( Next ) Next->PrevInSEL = Edge2;
+ TEdge* Prev = Edge2->PrevInSEL;
+ if( Prev ) Prev->NextInSEL = Edge1;
+ Edge1->PrevInSEL = Prev;
+ Edge1->NextInSEL = Edge2;
+ Edge2->PrevInSEL = Edge1;
+ Edge2->NextInSEL = Next;
+ }
+ else
+ {
+ TEdge* Next = Edge1->NextInSEL;
+ TEdge* Prev = Edge1->PrevInSEL;
+ Edge1->NextInSEL = Edge2->NextInSEL;
+ if( Edge1->NextInSEL ) Edge1->NextInSEL->PrevInSEL = Edge1;
+ Edge1->PrevInSEL = Edge2->PrevInSEL;
+ if( Edge1->PrevInSEL ) Edge1->PrevInSEL->NextInSEL = Edge1;
+ Edge2->NextInSEL = Next;
+ if( Edge2->NextInSEL ) Edge2->NextInSEL->PrevInSEL = Edge2;
+ Edge2->PrevInSEL = Prev;
+ if( Edge2->PrevInSEL ) Edge2->PrevInSEL->NextInSEL = Edge2;
+ }
+
+ if( !Edge1->PrevInSEL ) m_SortedEdges = Edge1;
+ else if( !Edge2->PrevInSEL ) m_SortedEdges = Edge2;
+}
+//------------------------------------------------------------------------------
+
+TEdge* GetNextInAEL(TEdge *e, Direction dir)
+{
+ return dir == dLeftToRight ? e->NextInAEL : e->PrevInAEL;
+}
+//------------------------------------------------------------------------------
+
+void GetHorzDirection(TEdge& HorzEdge, Direction& Dir, cInt& Left, cInt& Right)
+{
+ if (HorzEdge.Bot.X < HorzEdge.Top.X)
+ {
+ Left = HorzEdge.Bot.X;
+ Right = HorzEdge.Top.X;
+ Dir = dLeftToRight;
+ } else
+ {
+ Left = HorzEdge.Top.X;
+ Right = HorzEdge.Bot.X;
+ Dir = dRightToLeft;
+ }
+}
+//------------------------------------------------------------------------
+
+void Clipper::PrepareHorzJoins(TEdge* horzEdge, bool isTopOfScanbeam)
+{
+ //get the last Op for this horizontal edge
+ //the point may be anywhere along the horizontal ...
+ OutPt* outPt = m_PolyOuts[horzEdge->OutIdx]->Pts;
+ if (horzEdge->Side != esLeft) outPt = outPt->Prev;
+
+ //First, match up overlapping horizontal edges (eg when one polygon's
+ //intermediate horz edge overlaps an intermediate horz edge of another, or
+ //when one polygon sits on top of another) ...
+ for (JoinList::size_type i = 0; i < m_GhostJoins.size(); ++i)
+ {
+ Join* j = m_GhostJoins[i];
+ if (HorzSegmentsOverlap(j->OutPt1->Pt, j->OffPt, horzEdge->Bot, horzEdge->Top))
+ AddJoin(j->OutPt1, outPt, j->OffPt);
+ }
+ //Also, since horizontal edges at the top of one SB are often removed from
+ //the AEL before we process the horizontal edges at the bottom of the next,
+ //we need to create 'ghost' Join records of 'contrubuting' horizontals that
+ //we can compare with horizontals at the bottom of the next SB.
+ if (isTopOfScanbeam)
+ {
+ if (outPt->Pt == horzEdge->Top)
+ AddGhostJoin(outPt, horzEdge->Bot);
+ else
+ AddGhostJoin(outPt, horzEdge->Top);
+ }
+}
+//------------------------------------------------------------------------------
+
+/*******************************************************************************
+* Notes: Horizontal edges (HEs) at scanline intersections (ie at the Top or *
+* Bottom of a scanbeam) are processed as if layered. The order in which HEs *
+* are processed doesn't matter. HEs intersect with other HE Bot.Xs only [#] *
+* (or they could intersect with Top.Xs only, ie EITHER Bot.Xs OR Top.Xs), *
+* and with other non-horizontal edges [*]. Once these intersections are *
+* processed, intermediate HEs then 'promote' the Edge above (NextInLML) into *
+* the AEL. These 'promoted' edges may in turn intersect [%] with other HEs. *
+*******************************************************************************/
+
+void Clipper::ProcessHorizontal(TEdge *horzEdge, bool isTopOfScanbeam)
+{
+ Direction dir;
+ cInt horzLeft, horzRight;
+
+ GetHorzDirection(*horzEdge, dir, horzLeft, horzRight);
+
+ TEdge* eLastHorz = horzEdge, *eMaxPair = 0;
+ while (eLastHorz->NextInLML && IsHorizontal(*eLastHorz->NextInLML))
+ eLastHorz = eLastHorz->NextInLML;
+ if (!eLastHorz->NextInLML)
+ eMaxPair = GetMaximaPair(eLastHorz);
+
+ for (;;)
+ {
+ bool IsLastHorz = (horzEdge == eLastHorz);
+ TEdge* e = GetNextInAEL(horzEdge, dir);
+ while(e)
+ {
+ //Break if we've got to the end of an intermediate horizontal edge ...
+ //nb: Smaller Dx's are to the right of larger Dx's ABOVE the horizontal.
+ if (e->Curr.X == horzEdge->Top.X && horzEdge->NextInLML &&
+ e->Dx < horzEdge->NextInLML->Dx) break;
+
+ TEdge* eNext = GetNextInAEL(e, dir); //saves eNext for later
+
+ if ((dir == dLeftToRight && e->Curr.X <= horzRight) ||
+ (dir == dRightToLeft && e->Curr.X >= horzLeft))
+ {
+ if (horzEdge->OutIdx >= 0 && horzEdge->WindDelta != 0)
+ PrepareHorzJoins(horzEdge, isTopOfScanbeam);
+ //so far we're still in range of the horizontal Edge but make sure
+ //we're at the last of consec. horizontals when matching with eMaxPair
+ if(e == eMaxPair && IsLastHorz)
+ {
+ if (dir == dLeftToRight)
+ IntersectEdges(horzEdge, e, e->Top);
+ else
+ IntersectEdges(e, horzEdge, e->Top);
+ if (eMaxPair->OutIdx >= 0) throw clipperException("ProcessHorizontal error");
+ return;
+ }
+ else if(dir == dLeftToRight)
+ {
+ IntPoint Pt = IntPoint(e->Curr.X, horzEdge->Curr.Y);
+ IntersectEdges(horzEdge, e, Pt, true);
+ }
+ else
+ {
+ IntPoint Pt = IntPoint(e->Curr.X, horzEdge->Curr.Y);
+ IntersectEdges( e, horzEdge, Pt, true);
+ }
+ SwapPositionsInAEL( horzEdge, e );
+ }
+ else if( (dir == dLeftToRight && e->Curr.X >= horzRight) ||
+ (dir == dRightToLeft && e->Curr.X <= horzLeft) ) break;
+ e = eNext;
+ } //end while
+
+ if (horzEdge->OutIdx >= 0 && horzEdge->WindDelta != 0)
+ PrepareHorzJoins(horzEdge, isTopOfScanbeam);
+
+ if (horzEdge->NextInLML && IsHorizontal(*horzEdge->NextInLML))
+ {
+ UpdateEdgeIntoAEL(horzEdge);
+ if (horzEdge->OutIdx >= 0) AddOutPt(horzEdge, horzEdge->Bot);
+ GetHorzDirection(*horzEdge, dir, horzLeft, horzRight);
+ } else
+ break;
+ } //end for (;;)
+
+ if(horzEdge->NextInLML)
+ {
+ if(horzEdge->OutIdx >= 0)
+ {
+ OutPt* op1 = AddOutPt( horzEdge, horzEdge->Top);
+ UpdateEdgeIntoAEL(horzEdge);
+ if (horzEdge->WindDelta == 0) return;
+ //nb: HorzEdge is no longer horizontal here
+ TEdge* ePrev = horzEdge->PrevInAEL;
+ TEdge* eNext = horzEdge->NextInAEL;
+ if (ePrev && ePrev->Curr.X == horzEdge->Bot.X &&
+ ePrev->Curr.Y == horzEdge->Bot.Y && ePrev->WindDelta != 0 &&
+ (ePrev->OutIdx >= 0 && ePrev->Curr.Y > ePrev->Top.Y &&
+ SlopesEqual(*horzEdge, *ePrev, m_UseFullRange)))
+ {
+ OutPt* op2 = AddOutPt(ePrev, horzEdge->Bot);
+ AddJoin(op1, op2, horzEdge->Top);
+ }
+ else if (eNext && eNext->Curr.X == horzEdge->Bot.X &&
+ eNext->Curr.Y == horzEdge->Bot.Y && eNext->WindDelta != 0 &&
+ eNext->OutIdx >= 0 && eNext->Curr.Y > eNext->Top.Y &&
+ SlopesEqual(*horzEdge, *eNext, m_UseFullRange))
+ {
+ OutPt* op2 = AddOutPt(eNext, horzEdge->Bot);
+ AddJoin(op1, op2, horzEdge->Top);
+ }
+ }
+ else
+ UpdateEdgeIntoAEL(horzEdge);
+ }
+ else if (eMaxPair)
+ {
+ if (eMaxPair->OutIdx >= 0)
+ {
+ if (dir == dLeftToRight)
+ IntersectEdges(horzEdge, eMaxPair, horzEdge->Top);
+ else
+ IntersectEdges(eMaxPair, horzEdge, horzEdge->Top);
+ if (eMaxPair->OutIdx >= 0)
+ throw clipperException("ProcessHorizontal error");
+ } else
+ {
+ DeleteFromAEL(horzEdge);
+ DeleteFromAEL(eMaxPair);
+ }
+ } else
+ {
+ if (horzEdge->OutIdx >= 0) AddOutPt(horzEdge, horzEdge->Top);
+ DeleteFromAEL(horzEdge);
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::UpdateEdgeIntoAEL(TEdge *&e)
+{
+ if( !e->NextInLML ) throw
+ clipperException("UpdateEdgeIntoAEL: invalid call");
+
+ e->NextInLML->OutIdx = e->OutIdx;
+ TEdge* AelPrev = e->PrevInAEL;
+ TEdge* AelNext = e->NextInAEL;
+ if (AelPrev) AelPrev->NextInAEL = e->NextInLML;
+ else m_ActiveEdges = e->NextInLML;
+ if (AelNext) AelNext->PrevInAEL = e->NextInLML;
+ e->NextInLML->Side = e->Side;
+ e->NextInLML->WindDelta = e->WindDelta;
+ e->NextInLML->WindCnt = e->WindCnt;
+ e->NextInLML->WindCnt2 = e->WindCnt2;
+ e = e->NextInLML;
+ e->Curr = e->Bot;
+ e->PrevInAEL = AelPrev;
+ e->NextInAEL = AelNext;
+ if (!IsHorizontal(*e)) InsertScanbeam(e->Top.Y);
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::ProcessIntersections(const cInt botY, const cInt topY)
+{
+ if( !m_ActiveEdges ) return true;
+ try {
+ BuildIntersectList(botY, topY);
+ size_t IlSize = m_IntersectList.size();
+ if (IlSize == 0) return true;
+ if (IlSize == 1 || FixupIntersectionOrder()) ProcessIntersectList();
+ else return false;
+ }
+ catch(...)
+ {
+ m_SortedEdges = 0;
+ DisposeIntersectNodes();
+ throw clipperException("ProcessIntersections error");
+ }
+ m_SortedEdges = 0;
+ return true;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::DisposeIntersectNodes()
+{
+ for (size_t i = 0; i < m_IntersectList.size(); ++i )
+ delete m_IntersectList[i];
+ m_IntersectList.clear();
+}
+//------------------------------------------------------------------------------
+
+void Clipper::BuildIntersectList(const cInt botY, const cInt topY)
+{
+ if ( !m_ActiveEdges ) return;
+
+ //prepare for sorting ...
+ TEdge* e = m_ActiveEdges;
+ m_SortedEdges = e;
+ while( e )
+ {
+ e->PrevInSEL = e->PrevInAEL;
+ e->NextInSEL = e->NextInAEL;
+ e->Curr.X = TopX( *e, topY );
+ e = e->NextInAEL;
+ }
+
+ //bubblesort ...
+ bool isModified;
+ do
+ {
+ isModified = false;
+ e = m_SortedEdges;
+ while( e->NextInSEL )
+ {
+ TEdge *eNext = e->NextInSEL;
+ IntPoint Pt;
+ if(e->Curr.X > eNext->Curr.X)
+ {
+ if (!IntersectPoint(*e, *eNext, Pt, m_UseFullRange) && e->Curr.X > eNext->Curr.X +1)
+ throw clipperException("Intersection error");
+ if (Pt.Y > botY)
+ {
+ Pt.Y = botY;
+ if (std::fabs(e->Dx) > std::fabs(eNext->Dx))
+ Pt.X = TopX(*eNext, botY); else
+ Pt.X = TopX(*e, botY);
+ }
+
+ IntersectNode * newNode = new IntersectNode;
+ newNode->Edge1 = e;
+ newNode->Edge2 = eNext;
+ newNode->Pt = Pt;
+ m_IntersectList.push_back(newNode);
+
+ SwapPositionsInSEL(e, eNext);
+ isModified = true;
+ }
+ else
+ e = eNext;
+ }
+ if( e->PrevInSEL ) e->PrevInSEL->NextInSEL = 0;
+ else break;
+ }
+ while ( isModified );
+ m_SortedEdges = 0; //important
+}
+//------------------------------------------------------------------------------
+
+
+void Clipper::ProcessIntersectList()
+{
+ for (size_t i = 0; i < m_IntersectList.size(); ++i)
+ {
+ IntersectNode* iNode = m_IntersectList[i];
+ {
+ IntersectEdges( iNode->Edge1, iNode->Edge2, iNode->Pt, true);
+ SwapPositionsInAEL( iNode->Edge1 , iNode->Edge2 );
+ }
+ delete iNode;
+ }
+ m_IntersectList.clear();
+}
+//------------------------------------------------------------------------------
+
+bool IntersectListSort(IntersectNode* node1, IntersectNode* node2)
+{
+ return node2->Pt.Y < node1->Pt.Y;
+}
+//------------------------------------------------------------------------------
+
+inline bool EdgesAdjacent(const IntersectNode &inode)
+{
+ return (inode.Edge1->NextInSEL == inode.Edge2) ||
+ (inode.Edge1->PrevInSEL == inode.Edge2);
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::FixupIntersectionOrder()
+{
+ //pre-condition: intersections are sorted Bottom-most first.
+ //Now it's crucial that intersections are made only between adjacent edges,
+ //so to ensure this the order of intersections may need adjusting ...
+ CopyAELToSEL();
+ std::sort(m_IntersectList.begin(), m_IntersectList.end(), IntersectListSort);
+ size_t cnt = m_IntersectList.size();
+ for (size_t i = 0; i < cnt; ++i)
+ {
+ if (!EdgesAdjacent(*m_IntersectList[i]))
+ {
+ size_t j = i + 1;
+ while (j < cnt && !EdgesAdjacent(*m_IntersectList[j])) j++;
+ if (j == cnt) return false;
+ std::swap(m_IntersectList[i], m_IntersectList[j]);
+ }
+ SwapPositionsInSEL(m_IntersectList[i]->Edge1, m_IntersectList[i]->Edge2);
+ }
+ return true;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::DoMaxima(TEdge *e)
+{
+ TEdge* eMaxPair = GetMaximaPair(e);
+ if (!eMaxPair)
+ {
+ if (e->OutIdx >= 0)
+ AddOutPt(e, e->Top);
+ DeleteFromAEL(e);
+ return;
+ }
+
+ TEdge* eNext = e->NextInAEL;
+ while(eNext && eNext != eMaxPair)
+ {
+ IntersectEdges(e, eNext, e->Top, true);
+ SwapPositionsInAEL(e, eNext);
+ eNext = e->NextInAEL;
+ }
+
+ if(e->OutIdx == Unassigned && eMaxPair->OutIdx == Unassigned)
+ {
+ DeleteFromAEL(e);
+ DeleteFromAEL(eMaxPair);
+ }
+ else if( e->OutIdx >= 0 && eMaxPair->OutIdx >= 0 )
+ {
+ IntersectEdges( e, eMaxPair, e->Top);
+ }
+#ifdef use_lines
+ else if (e->WindDelta == 0)
+ {
+ if (e->OutIdx >= 0)
+ {
+ AddOutPt(e, e->Top);
+ e->OutIdx = Unassigned;
+ }
+ DeleteFromAEL(e);
+
+ if (eMaxPair->OutIdx >= 0)
+ {
+ AddOutPt(eMaxPair, e->Top);
+ eMaxPair->OutIdx = Unassigned;
+ }
+ DeleteFromAEL(eMaxPair);
+ }
+#endif
+ else throw clipperException("DoMaxima error");
+}
+//------------------------------------------------------------------------------
+
+void Clipper::ProcessEdgesAtTopOfScanbeam(const cInt topY)
+{
+ TEdge* e = m_ActiveEdges;
+ while( e )
+ {
+ //1. process maxima, treating them as if they're 'bent' horizontal edges,
+ // but exclude maxima with horizontal edges. nb: e can't be a horizontal.
+ bool IsMaximaEdge = IsMaxima(e, topY);
+
+ if(IsMaximaEdge)
+ {
+ TEdge* eMaxPair = GetMaximaPair(e);
+ IsMaximaEdge = (!eMaxPair || !IsHorizontal(*eMaxPair));
+ }
+
+ if(IsMaximaEdge)
+ {
+ TEdge* ePrev = e->PrevInAEL;
+ DoMaxima(e);
+ if( !ePrev ) e = m_ActiveEdges;
+ else e = ePrev->NextInAEL;
+ }
+ else
+ {
+ //2. promote horizontal edges, otherwise update Curr.X and Curr.Y ...
+ if (IsIntermediate(e, topY) && IsHorizontal(*e->NextInLML))
+ {
+ UpdateEdgeIntoAEL(e);
+ if (e->OutIdx >= 0)
+ AddOutPt(e, e->Bot);
+ AddEdgeToSEL(e);
+ }
+ else
+ {
+ e->Curr.X = TopX( *e, topY );
+ e->Curr.Y = topY;
+ }
+
+ if (m_StrictSimple)
+ {
+ TEdge* ePrev = e->PrevInAEL;
+ if ((e->OutIdx >= 0) && (e->WindDelta != 0) && ePrev && (ePrev->OutIdx >= 0) &&
+ (ePrev->Curr.X == e->Curr.X) && (ePrev->WindDelta != 0))
+ {
+ OutPt* op = AddOutPt(ePrev, e->Curr);
+ OutPt* op2 = AddOutPt(e, e->Curr);
+ AddJoin(op, op2, e->Curr); //StrictlySimple (type-3) join
+ }
+ }
+
+ e = e->NextInAEL;
+ }
+ }
+
+ //3. Process horizontals at the Top of the scanbeam ...
+ ProcessHorizontals(true);
+
+ //4. Promote intermediate vertices ...
+ e = m_ActiveEdges;
+ while(e)
+ {
+ if(IsIntermediate(e, topY))
+ {
+ OutPt* op = 0;
+ if( e->OutIdx >= 0 )
+ op = AddOutPt(e, e->Top);
+ UpdateEdgeIntoAEL(e);
+
+ //if output polygons share an edge, they'll need joining later ...
+ TEdge* ePrev = e->PrevInAEL;
+ TEdge* eNext = e->NextInAEL;
+ if (ePrev && ePrev->Curr.X == e->Bot.X &&
+ ePrev->Curr.Y == e->Bot.Y && op &&
+ ePrev->OutIdx >= 0 && ePrev->Curr.Y > ePrev->Top.Y &&
+ SlopesEqual(*e, *ePrev, m_UseFullRange) &&
+ (e->WindDelta != 0) && (ePrev->WindDelta != 0))
+ {
+ OutPt* op2 = AddOutPt(ePrev, e->Bot);
+ AddJoin(op, op2, e->Top);
+ }
+ else if (eNext && eNext->Curr.X == e->Bot.X &&
+ eNext->Curr.Y == e->Bot.Y && op &&
+ eNext->OutIdx >= 0 && eNext->Curr.Y > eNext->Top.Y &&
+ SlopesEqual(*e, *eNext, m_UseFullRange) &&
+ (e->WindDelta != 0) && (eNext->WindDelta != 0))
+ {
+ OutPt* op2 = AddOutPt(eNext, e->Bot);
+ AddJoin(op, op2, e->Top);
+ }
+ }
+ e = e->NextInAEL;
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::FixupOutPolygon(OutRec &outrec)
+{
+ //FixupOutPolygon() - removes duplicate points and simplifies consecutive
+ //parallel edges by removing the middle vertex.
+ OutPt *lastOK = 0;
+ outrec.BottomPt = 0;
+ OutPt *pp = outrec.Pts;
+
+ for (;;)
+ {
+ if (pp->Prev == pp || pp->Prev == pp->Next )
+ {
+ DisposeOutPts(pp);
+ outrec.Pts = 0;
+ return;
+ }
+
+ //test for duplicate points and collinear edges ...
+ if ((pp->Pt == pp->Next->Pt) || (pp->Pt == pp->Prev->Pt) ||
+ (SlopesEqual(pp->Prev->Pt, pp->Pt, pp->Next->Pt, m_UseFullRange) &&
+ (!m_PreserveCollinear ||
+ !Pt2IsBetweenPt1AndPt3(pp->Prev->Pt, pp->Pt, pp->Next->Pt))))
+ {
+ lastOK = 0;
+ OutPt *tmp = pp;
+ pp->Prev->Next = pp->Next;
+ pp->Next->Prev = pp->Prev;
+ pp = pp->Prev;
+ delete tmp;
+ }
+ else if (pp == lastOK) break;
+ else
+ {
+ if (!lastOK) lastOK = pp;
+ pp = pp->Next;
+ }
+ }
+ outrec.Pts = pp;
+}
+//------------------------------------------------------------------------------
+
+int PointCount(OutPt *Pts)
+{
+ if (!Pts) return 0;
+ int result = 0;
+ OutPt* p = Pts;
+ do
+ {
+ result++;
+ p = p->Next;
+ }
+ while (p != Pts);
+ return result;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::BuildResult(Paths &polys)
+{
+ polys.reserve(m_PolyOuts.size());
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); ++i)
+ {
+ if (!m_PolyOuts[i]->Pts) continue;
+ Path pg;
+ OutPt* p = m_PolyOuts[i]->Pts->Prev;
+ int cnt = PointCount(p);
+ if (cnt < 2) continue;
+ pg.reserve(cnt);
+ for (int i = 0; i < cnt; ++i)
+ {
+ pg.push_back(p->Pt);
+ p = p->Prev;
+ }
+ polys.push_back(pg);
+ }
+}
+//------------------------------------------------------------------------------
+
+void Clipper::BuildResult2(PolyTree& polytree)
+{
+ polytree.Clear();
+ polytree.AllNodes.reserve(m_PolyOuts.size());
+ //add each output polygon/contour to polytree ...
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); i++)
+ {
+ OutRec* outRec = m_PolyOuts[i];
+ int cnt = PointCount(outRec->Pts);
+ if ((outRec->IsOpen && cnt < 2) || (!outRec->IsOpen && cnt < 3)) continue;
+ FixHoleLinkage(*outRec);
+ PolyNode* pn = new PolyNode();
+ //nb: polytree takes ownership of all the PolyNodes
+ polytree.AllNodes.push_back(pn);
+ outRec->PolyNd = pn;
+ pn->Parent = 0;
+ pn->Index = 0;
+ pn->Contour.reserve(cnt);
+ OutPt *op = outRec->Pts->Prev;
+ for (int j = 0; j < cnt; j++)
+ {
+ pn->Contour.push_back(op->Pt);
+ op = op->Prev;
+ }
+ }
+
+ //fixup PolyNode links etc ...
+ polytree.Childs.reserve(m_PolyOuts.size());
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); i++)
+ {
+ OutRec* outRec = m_PolyOuts[i];
+ if (!outRec->PolyNd) continue;
+ if (outRec->IsOpen)
+ {
+ outRec->PolyNd->m_IsOpen = true;
+ polytree.AddChild(*outRec->PolyNd);
+ }
+ else if (outRec->FirstLeft && outRec->FirstLeft->PolyNd)
+ outRec->FirstLeft->PolyNd->AddChild(*outRec->PolyNd);
+ else
+ polytree.AddChild(*outRec->PolyNd);
+ }
+}
+//------------------------------------------------------------------------------
+
+void SwapIntersectNodes(IntersectNode &int1, IntersectNode &int2)
+{
+ //just swap the contents (because fIntersectNodes is a single-linked-list)
+ IntersectNode inode = int1; //gets a copy of Int1
+ int1.Edge1 = int2.Edge1;
+ int1.Edge2 = int2.Edge2;
+ int1.Pt = int2.Pt;
+ int2.Edge1 = inode.Edge1;
+ int2.Edge2 = inode.Edge2;
+ int2.Pt = inode.Pt;
+}
+//------------------------------------------------------------------------------
+
+inline bool E2InsertsBeforeE1(TEdge &e1, TEdge &e2)
+{
+ if (e2.Curr.X == e1.Curr.X)
+ {
+ if (e2.Top.Y > e1.Top.Y)
+ return e2.Top.X < TopX(e1, e2.Top.Y);
+ else return e1.Top.X > TopX(e2, e1.Top.Y);
+ }
+ else return e2.Curr.X < e1.Curr.X;
+}
+//------------------------------------------------------------------------------
+
+bool GetOverlap(const cInt a1, const cInt a2, const cInt b1, const cInt b2,
+ cInt& Left, cInt& Right)
+{
+ if (a1 < a2)
+ {
+ if (b1 < b2) {Left = std::max(a1,b1); Right = std::min(a2,b2);}
+ else {Left = std::max(a1,b2); Right = std::min(a2,b1);}
+ }
+ else
+ {
+ if (b1 < b2) {Left = std::max(a2,b1); Right = std::min(a1,b2);}
+ else {Left = std::max(a2,b2); Right = std::min(a1,b1);}
+ }
+ return Left < Right;
+}
+//------------------------------------------------------------------------------
+
+inline void UpdateOutPtIdxs(OutRec& outrec)
+{
+ OutPt* op = outrec.Pts;
+ do
+ {
+ op->Idx = outrec.Idx;
+ op = op->Prev;
+ }
+ while(op != outrec.Pts);
+}
+//------------------------------------------------------------------------------
+
+void Clipper::InsertEdgeIntoAEL(TEdge *edge, TEdge* startEdge)
+{
+ if(!m_ActiveEdges)
+ {
+ edge->PrevInAEL = 0;
+ edge->NextInAEL = 0;
+ m_ActiveEdges = edge;
+ }
+ else if(!startEdge && E2InsertsBeforeE1(*m_ActiveEdges, *edge))
+ {
+ edge->PrevInAEL = 0;
+ edge->NextInAEL = m_ActiveEdges;
+ m_ActiveEdges->PrevInAEL = edge;
+ m_ActiveEdges = edge;
+ }
+ else
+ {
+ if(!startEdge) startEdge = m_ActiveEdges;
+ while(startEdge->NextInAEL &&
+ !E2InsertsBeforeE1(*startEdge->NextInAEL , *edge))
+ startEdge = startEdge->NextInAEL;
+ edge->NextInAEL = startEdge->NextInAEL;
+ if(startEdge->NextInAEL) startEdge->NextInAEL->PrevInAEL = edge;
+ edge->PrevInAEL = startEdge;
+ startEdge->NextInAEL = edge;
+ }
+}
+//----------------------------------------------------------------------
+
+OutPt* DupOutPt(OutPt* outPt, bool InsertAfter)
+{
+ OutPt* result = new OutPt;
+ result->Pt = outPt->Pt;
+ result->Idx = outPt->Idx;
+ if (InsertAfter)
+ {
+ result->Next = outPt->Next;
+ result->Prev = outPt;
+ outPt->Next->Prev = result;
+ outPt->Next = result;
+ }
+ else
+ {
+ result->Prev = outPt->Prev;
+ result->Next = outPt;
+ outPt->Prev->Next = result;
+ outPt->Prev = result;
+ }
+ return result;
+}
+//------------------------------------------------------------------------------
+
+bool JoinHorz(OutPt* op1, OutPt* op1b, OutPt* op2, OutPt* op2b,
+ const IntPoint Pt, bool DiscardLeft)
+{
+ Direction Dir1 = (op1->Pt.X > op1b->Pt.X ? dRightToLeft : dLeftToRight);
+ Direction Dir2 = (op2->Pt.X > op2b->Pt.X ? dRightToLeft : dLeftToRight);
+ if (Dir1 == Dir2) return false;
+
+ //When DiscardLeft, we want Op1b to be on the Left of Op1, otherwise we
+ //want Op1b to be on the Right. (And likewise with Op2 and Op2b.)
+ //So, to facilitate this while inserting Op1b and Op2b ...
+ //when DiscardLeft, make sure we're AT or RIGHT of Pt before adding Op1b,
+ //otherwise make sure we're AT or LEFT of Pt. (Likewise with Op2b.)
+ if (Dir1 == dLeftToRight)
+ {
+ while (op1->Next->Pt.X <= Pt.X &&
+ op1->Next->Pt.X >= op1->Pt.X && op1->Next->Pt.Y == Pt.Y)
+ op1 = op1->Next;
+ if (DiscardLeft && (op1->Pt.X != Pt.X)) op1 = op1->Next;
+ op1b = DupOutPt(op1, !DiscardLeft);
+ if (op1b->Pt != Pt)
+ {
+ op1 = op1b;
+ op1->Pt = Pt;
+ op1b = DupOutPt(op1, !DiscardLeft);
+ }
+ }
+ else
+ {
+ while (op1->Next->Pt.X >= Pt.X &&
+ op1->Next->Pt.X <= op1->Pt.X && op1->Next->Pt.Y == Pt.Y)
+ op1 = op1->Next;
+ if (!DiscardLeft && (op1->Pt.X != Pt.X)) op1 = op1->Next;
+ op1b = DupOutPt(op1, DiscardLeft);
+ if (op1b->Pt != Pt)
+ {
+ op1 = op1b;
+ op1->Pt = Pt;
+ op1b = DupOutPt(op1, DiscardLeft);
+ }
+ }
+
+ if (Dir2 == dLeftToRight)
+ {
+ while (op2->Next->Pt.X <= Pt.X &&
+ op2->Next->Pt.X >= op2->Pt.X && op2->Next->Pt.Y == Pt.Y)
+ op2 = op2->Next;
+ if (DiscardLeft && (op2->Pt.X != Pt.X)) op2 = op2->Next;
+ op2b = DupOutPt(op2, !DiscardLeft);
+ if (op2b->Pt != Pt)
+ {
+ op2 = op2b;
+ op2->Pt = Pt;
+ op2b = DupOutPt(op2, !DiscardLeft);
+ };
+ } else
+ {
+ while (op2->Next->Pt.X >= Pt.X &&
+ op2->Next->Pt.X <= op2->Pt.X && op2->Next->Pt.Y == Pt.Y)
+ op2 = op2->Next;
+ if (!DiscardLeft && (op2->Pt.X != Pt.X)) op2 = op2->Next;
+ op2b = DupOutPt(op2, DiscardLeft);
+ if (op2b->Pt != Pt)
+ {
+ op2 = op2b;
+ op2->Pt = Pt;
+ op2b = DupOutPt(op2, DiscardLeft);
+ };
+ };
+
+ if ((Dir1 == dLeftToRight) == DiscardLeft)
+ {
+ op1->Prev = op2;
+ op2->Next = op1;
+ op1b->Next = op2b;
+ op2b->Prev = op1b;
+ }
+ else
+ {
+ op1->Next = op2;
+ op2->Prev = op1;
+ op1b->Prev = op2b;
+ op2b->Next = op1b;
+ }
+ return true;
+}
+//------------------------------------------------------------------------------
+
+bool Clipper::JoinPoints(Join *j, OutRec* outRec1, OutRec* outRec2)
+{
+ OutPt *op1 = j->OutPt1, *op1b;
+ OutPt *op2 = j->OutPt2, *op2b;
+
+ //There are 3 kinds of joins for output polygons ...
+ //1. Horizontal joins where Join.OutPt1 & Join.OutPt2 are a vertices anywhere
+ //along (horizontal) collinear edges (& Join.OffPt is on the same horizontal).
+ //2. Non-horizontal joins where Join.OutPt1 & Join.OutPt2 are at the same
+ //location at the Bottom of the overlapping segment (& Join.OffPt is above).
+ //3. StrictSimple joins where edges touch but are not collinear and where
+ //Join.OutPt1, Join.OutPt2 & Join.OffPt all share the same point.
+ bool isHorizontal = (j->OutPt1->Pt.Y == j->OffPt.Y);
+
+ if (isHorizontal && (j->OffPt == j->OutPt1->Pt) &&
+ (j->OffPt == j->OutPt2->Pt))
+ {
+ //Strictly Simple join ...
+ op1b = j->OutPt1->Next;
+ while (op1b != op1 && (op1b->Pt == j->OffPt))
+ op1b = op1b->Next;
+ bool reverse1 = (op1b->Pt.Y > j->OffPt.Y);
+ op2b = j->OutPt2->Next;
+ while (op2b != op2 && (op2b->Pt == j->OffPt))
+ op2b = op2b->Next;
+ bool reverse2 = (op2b->Pt.Y > j->OffPt.Y);
+ if (reverse1 == reverse2) return false;
+ if (reverse1)
+ {
+ op1b = DupOutPt(op1, false);
+ op2b = DupOutPt(op2, true);
+ op1->Prev = op2;
+ op2->Next = op1;
+ op1b->Next = op2b;
+ op2b->Prev = op1b;
+ j->OutPt1 = op1;
+ j->OutPt2 = op1b;
+ return true;
+ } else
+ {
+ op1b = DupOutPt(op1, true);
+ op2b = DupOutPt(op2, false);
+ op1->Next = op2;
+ op2->Prev = op1;
+ op1b->Prev = op2b;
+ op2b->Next = op1b;
+ j->OutPt1 = op1;
+ j->OutPt2 = op1b;
+ return true;
+ }
+ }
+ else if (isHorizontal)
+ {
+ //treat horizontal joins differently to non-horizontal joins since with
+ //them we're not yet sure where the overlapping is. OutPt1.Pt & OutPt2.Pt
+ //may be anywhere along the horizontal edge.
+ op1b = op1;
+ while (op1->Prev->Pt.Y == op1->Pt.Y && op1->Prev != op1b && op1->Prev != op2)
+ op1 = op1->Prev;
+ while (op1b->Next->Pt.Y == op1b->Pt.Y && op1b->Next != op1 && op1b->Next != op2)
+ op1b = op1b->Next;
+ if (op1b->Next == op1 || op1b->Next == op2) return false; //a flat 'polygon'
+
+ op2b = op2;
+ while (op2->Prev->Pt.Y == op2->Pt.Y && op2->Prev != op2b && op2->Prev != op1b)
+ op2 = op2->Prev;
+ while (op2b->Next->Pt.Y == op2b->Pt.Y && op2b->Next != op2 && op2b->Next != op1)
+ op2b = op2b->Next;
+ if (op2b->Next == op2 || op2b->Next == op1) return false; //a flat 'polygon'
+
+ cInt Left, Right;
+ //Op1 --> Op1b & Op2 --> Op2b are the extremites of the horizontal edges
+ if (!GetOverlap(op1->Pt.X, op1b->Pt.X, op2->Pt.X, op2b->Pt.X, Left, Right))
+ return false;
+
+ //DiscardLeftSide: when overlapping edges are joined, a spike will created
+ //which needs to be cleaned up. However, we don't want Op1 or Op2 caught up
+ //on the discard Side as either may still be needed for other joins ...
+ IntPoint Pt;
+ bool DiscardLeftSide;
+ if (op1->Pt.X >= Left && op1->Pt.X <= Right)
+ {
+ Pt = op1->Pt; DiscardLeftSide = (op1->Pt.X > op1b->Pt.X);
+ }
+ else if (op2->Pt.X >= Left&& op2->Pt.X <= Right)
+ {
+ Pt = op2->Pt; DiscardLeftSide = (op2->Pt.X > op2b->Pt.X);
+ }
+ else if (op1b->Pt.X >= Left && op1b->Pt.X <= Right)
+ {
+ Pt = op1b->Pt; DiscardLeftSide = op1b->Pt.X > op1->Pt.X;
+ }
+ else
+ {
+ Pt = op2b->Pt; DiscardLeftSide = (op2b->Pt.X > op2->Pt.X);
+ }
+ j->OutPt1 = op1; j->OutPt2 = op2;
+ return JoinHorz(op1, op1b, op2, op2b, Pt, DiscardLeftSide);
+ } else
+ {
+ //nb: For non-horizontal joins ...
+ // 1. Jr.OutPt1.Pt.Y == Jr.OutPt2.Pt.Y
+ // 2. Jr.OutPt1.Pt > Jr.OffPt.Y
+
+ //make sure the polygons are correctly oriented ...
+ op1b = op1->Next;
+ while ((op1b->Pt == op1->Pt) && (op1b != op1)) op1b = op1b->Next;
+ bool Reverse1 = ((op1b->Pt.Y > op1->Pt.Y) ||
+ !SlopesEqual(op1->Pt, op1b->Pt, j->OffPt, m_UseFullRange));
+ if (Reverse1)
+ {
+ op1b = op1->Prev;
+ while ((op1b->Pt == op1->Pt) && (op1b != op1)) op1b = op1b->Prev;
+ if ((op1b->Pt.Y > op1->Pt.Y) ||
+ !SlopesEqual(op1->Pt, op1b->Pt, j->OffPt, m_UseFullRange)) return false;
+ };
+ op2b = op2->Next;
+ while ((op2b->Pt == op2->Pt) && (op2b != op2))op2b = op2b->Next;
+ bool Reverse2 = ((op2b->Pt.Y > op2->Pt.Y) ||
+ !SlopesEqual(op2->Pt, op2b->Pt, j->OffPt, m_UseFullRange));
+ if (Reverse2)
+ {
+ op2b = op2->Prev;
+ while ((op2b->Pt == op2->Pt) && (op2b != op2)) op2b = op2b->Prev;
+ if ((op2b->Pt.Y > op2->Pt.Y) ||
+ !SlopesEqual(op2->Pt, op2b->Pt, j->OffPt, m_UseFullRange)) return false;
+ }
+
+ if ((op1b == op1) || (op2b == op2) || (op1b == op2b) ||
+ ((outRec1 == outRec2) && (Reverse1 == Reverse2))) return false;
+
+ if (Reverse1)
+ {
+ op1b = DupOutPt(op1, false);
+ op2b = DupOutPt(op2, true);
+ op1->Prev = op2;
+ op2->Next = op1;
+ op1b->Next = op2b;
+ op2b->Prev = op1b;
+ j->OutPt1 = op1;
+ j->OutPt2 = op1b;
+ return true;
+ } else
+ {
+ op1b = DupOutPt(op1, true);
+ op2b = DupOutPt(op2, false);
+ op1->Next = op2;
+ op2->Prev = op1;
+ op1b->Prev = op2b;
+ op2b->Next = op1b;
+ j->OutPt1 = op1;
+ j->OutPt2 = op1b;
+ return true;
+ }
+ }
+}
+//----------------------------------------------------------------------
+
+void Clipper::FixupFirstLefts1(OutRec* OldOutRec, OutRec* NewOutRec)
+{
+
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); ++i)
+ {
+ OutRec* outRec = m_PolyOuts[i];
+ if (outRec->Pts && outRec->FirstLeft == OldOutRec)
+ {
+ if (Poly2ContainsPoly1(outRec->Pts, NewOutRec->Pts))
+ outRec->FirstLeft = NewOutRec;
+ }
+ }
+}
+//----------------------------------------------------------------------
+
+void Clipper::FixupFirstLefts2(OutRec* OldOutRec, OutRec* NewOutRec)
+{
+ for (PolyOutList::size_type i = 0; i < m_PolyOuts.size(); ++i)
+ {
+ OutRec* outRec = m_PolyOuts[i];
+ if (outRec->FirstLeft == OldOutRec) outRec->FirstLeft = NewOutRec;
+ }
+}
+//----------------------------------------------------------------------
+
+static OutRec* ParseFirstLeft(OutRec* FirstLeft)
+{
+ while (FirstLeft && !FirstLeft->Pts)
+ FirstLeft = FirstLeft->FirstLeft;
+ return FirstLeft;
+}
+//------------------------------------------------------------------------------
+
+void Clipper::JoinCommonEdges()
+{
+ for (JoinList::size_type i = 0; i < m_Joins.size(); i++)
+ {
+ Join* join = m_Joins[i];
+
+ OutRec *outRec1 = GetOutRec(join->OutPt1->Idx);
+ OutRec *outRec2 = GetOutRec(join->OutPt2->Idx);
+
+ if (!outRec1->Pts || !outRec2->Pts) continue;
+
+ //get the polygon fragment with the correct hole state (FirstLeft)
+ //before calling JoinPoints() ...
+ OutRec *holeStateRec;
+ if (outRec1 == outRec2) holeStateRec = outRec1;
+ else if (Param1RightOfParam2(outRec1, outRec2)) holeStateRec = outRec2;
+ else if (Param1RightOfParam2(outRec2, outRec1)) holeStateRec = outRec1;
+ else holeStateRec = GetLowermostRec(outRec1, outRec2);
+
+ if (!JoinPoints(join, outRec1, outRec2)) continue;
+
+ if (outRec1 == outRec2)
+ {
+ //instead of joining two polygons, we've just created a new one by
+ //splitting one polygon into two.
+ outRec1->Pts = join->OutPt1;
+ outRec1->BottomPt = 0;
+ outRec2 = CreateOutRec();
+ outRec2->Pts = join->OutPt2;
+
+ //update all OutRec2.Pts Idx's ...
+ UpdateOutPtIdxs(*outRec2);
+
+ //We now need to check every OutRec.FirstLeft pointer. If it points
+ //to OutRec1 it may need to point to OutRec2 instead ...
+ if (m_UsingPolyTree)
+ for (PolyOutList::size_type j = 0; j < m_PolyOuts.size() - 1; j++)
+ {
+ OutRec* oRec = m_PolyOuts[j];
+ if (!oRec->Pts || ParseFirstLeft(oRec->FirstLeft) != outRec1 ||
+ oRec->IsHole == outRec1->IsHole) continue;
+ if (Poly2ContainsPoly1(oRec->Pts, join->OutPt2))
+ oRec->FirstLeft = outRec2;
+ }
+
+ if (Poly2ContainsPoly1(outRec2->Pts, outRec1->Pts))
+ {
+ //outRec2 is contained by outRec1 ...
+ outRec2->IsHole = !outRec1->IsHole;
+ outRec2->FirstLeft = outRec1;
+
+ //fixup FirstLeft pointers that may need reassigning to OutRec1
+ if (m_UsingPolyTree) FixupFirstLefts2(outRec2, outRec1);
+
+ if ((outRec2->IsHole ^ m_ReverseOutput) == (Area(*outRec2) > 0))
+ ReversePolyPtLinks(outRec2->Pts);
+
+ } else if (Poly2ContainsPoly1(outRec1->Pts, outRec2->Pts))
+ {
+ //outRec1 is contained by outRec2 ...
+ outRec2->IsHole = outRec1->IsHole;
+ outRec1->IsHole = !outRec2->IsHole;
+ outRec2->FirstLeft = outRec1->FirstLeft;
+ outRec1->FirstLeft = outRec2;
+
+ //fixup FirstLeft pointers that may need reassigning to OutRec1
+ if (m_UsingPolyTree) FixupFirstLefts2(outRec1, outRec2);
+
+ if ((outRec1->IsHole ^ m_ReverseOutput) == (Area(*outRec1) > 0))
+ ReversePolyPtLinks(outRec1->Pts);
+ }
+ else
+ {
+ //the 2 polygons are completely separate ...
+ outRec2->IsHole = outRec1->IsHole;
+ outRec2->FirstLeft = outRec1->FirstLeft;
+
+ //fixup FirstLeft pointers that may need reassigning to OutRec2
+ if (m_UsingPolyTree) FixupFirstLefts1(outRec1, outRec2);
+ }
+
+ } else
+ {
+ //joined 2 polygons together ...
+
+ outRec2->Pts = 0;
+ outRec2->BottomPt = 0;
+ outRec2->Idx = outRec1->Idx;
+
+ outRec1->IsHole = holeStateRec->IsHole;
+ if (holeStateRec == outRec2)
+ outRec1->FirstLeft = outRec2->FirstLeft;
+ outRec2->FirstLeft = outRec1;
+
+ //fixup FirstLeft pointers that may need reassigning to OutRec1
+ if (m_UsingPolyTree) FixupFirstLefts2(outRec2, outRec1);
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+// ClipperOffset support functions ...
+//------------------------------------------------------------------------------
+
+DoublePoint GetUnitNormal(const IntPoint &pt1, const IntPoint &pt2)
+{
+ if(pt2.X == pt1.X && pt2.Y == pt1.Y)
+ return DoublePoint(0, 0);
+
+ double Dx = (double)(pt2.X - pt1.X);
+ double dy = (double)(pt2.Y - pt1.Y);
+ double f = 1 *1.0/ std::sqrt( Dx*Dx + dy*dy );
+ Dx *= f;
+ dy *= f;
+ return DoublePoint(dy, -Dx);
+}
+
+//------------------------------------------------------------------------------
+// ClipperOffset class
+//------------------------------------------------------------------------------
+
+ClipperOffset::ClipperOffset(double miterLimit, double arcTolerance)
+{
+ this->MiterLimit = miterLimit;
+ this->ArcTolerance = arcTolerance;
+ m_lowest.X = -1;
+}
+//------------------------------------------------------------------------------
+
+ClipperOffset::~ClipperOffset()
+{
+ Clear();
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::Clear()
+{
+ for (int i = 0; i < m_polyNodes.ChildCount(); ++i)
+ delete m_polyNodes.Childs[i];
+ m_polyNodes.Childs.clear();
+ m_lowest.X = -1;
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::AddPath(const Path& path, JoinType joinType, EndType endType)
+{
+ int highI = (int)path.size() - 1;
+ if (highI < 0) return;
+ PolyNode* newNode = new PolyNode();
+ newNode->m_jointype = joinType;
+ newNode->m_endtype = endType;
+
+ //strip duplicate points from path and also get index to the lowest point ...
+ if (endType == etClosedLine || endType == etClosedPolygon)
+ while (highI > 0 && path[0] == path[highI]) highI--;
+ newNode->Contour.reserve(highI + 1);
+ newNode->Contour.push_back(path[0]);
+ int j = 0, k = 0;
+ for (int i = 1; i <= highI; i++)
+ if (newNode->Contour[j] != path[i])
+ {
+ j++;
+ newNode->Contour.push_back(path[i]);
+ if (path[i].Y > newNode->Contour[k].Y ||
+ (path[i].Y == newNode->Contour[k].Y &&
+ path[i].X < newNode->Contour[k].X)) k = j;
+ }
+ if ((endType == etClosedPolygon && j < 2) ||
+ (endType != etClosedPolygon && j < 0))
+ {
+ delete newNode;
+ return;
+ }
+ m_polyNodes.AddChild(*newNode);
+
+ //if this path's lowest pt is lower than all the others then update m_lowest
+ if (endType != etClosedPolygon) return;
+ if (m_lowest.X < 0)
+ m_lowest = IntPoint(0, k);
+ else
+ {
+ IntPoint ip = m_polyNodes.Childs[(int)m_lowest.X]->Contour[(int)m_lowest.Y];
+ if (newNode->Contour[k].Y > ip.Y ||
+ (newNode->Contour[k].Y == ip.Y &&
+ newNode->Contour[k].X < ip.X))
+ m_lowest = IntPoint(m_polyNodes.ChildCount() - 1, k);
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::AddPaths(const Paths& paths, JoinType joinType, EndType endType)
+{
+ for (Paths::size_type i = 0; i < paths.size(); ++i)
+ AddPath(paths[i], joinType, endType);
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::FixOrientations()
+{
+ //fixup orientations of all closed paths if the orientation of the
+ //closed path with the lowermost vertex is wrong ...
+ if (m_lowest.X >= 0 &&
+ !Orientation(m_polyNodes.Childs[(int)m_lowest.X]->Contour))
+ {
+ for (int i = 0; i < m_polyNodes.ChildCount(); ++i)
+ {
+ PolyNode& node = *m_polyNodes.Childs[i];
+ if (node.m_endtype == etClosedPolygon ||
+ (node.m_endtype == etClosedLine && Orientation(node.Contour)))
+ ReversePath(node.Contour);
+ }
+ } else
+ {
+ for (int i = 0; i < m_polyNodes.ChildCount(); ++i)
+ {
+ PolyNode& node = *m_polyNodes.Childs[i];
+ if (node.m_endtype == etClosedLine && !Orientation(node.Contour))
+ ReversePath(node.Contour);
+ }
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::Execute(Paths& solution, double delta)
+{
+ solution.clear();
+ FixOrientations();
+ DoOffset(delta);
+
+ //now clean up 'corners' ...
+ Clipper clpr;
+ clpr.AddPaths(m_destPolys, ptSubject, true);
+ if (delta > 0)
+ {
+ clpr.Execute(ctUnion, solution, pftPositive, pftPositive);
+ }
+ else
+ {
+ IntRect r = clpr.GetBounds();
+ Path outer(4);
+ outer[0] = IntPoint(r.left - 10, r.bottom + 10);
+ outer[1] = IntPoint(r.right + 10, r.bottom + 10);
+ outer[2] = IntPoint(r.right + 10, r.top - 10);
+ outer[3] = IntPoint(r.left - 10, r.top - 10);
+
+ clpr.AddPath(outer, ptSubject, true);
+ clpr.ReverseSolution(true);
+ clpr.Execute(ctUnion, solution, pftNegative, pftNegative);
+ if (solution.size() > 0) solution.erase(solution.begin());
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::Execute(PolyTree& solution, double delta)
+{
+ solution.Clear();
+ FixOrientations();
+ DoOffset(delta);
+
+ //now clean up 'corners' ...
+ Clipper clpr;
+ clpr.AddPaths(m_destPolys, ptSubject, true);
+ if (delta > 0)
+ {
+ clpr.Execute(ctUnion, solution, pftPositive, pftPositive);
+ }
+ else
+ {
+ IntRect r = clpr.GetBounds();
+ Path outer(4);
+ outer[0] = IntPoint(r.left - 10, r.bottom + 10);
+ outer[1] = IntPoint(r.right + 10, r.bottom + 10);
+ outer[2] = IntPoint(r.right + 10, r.top - 10);
+ outer[3] = IntPoint(r.left - 10, r.top - 10);
+
+ clpr.AddPath(outer, ptSubject, true);
+ clpr.ReverseSolution(true);
+ clpr.Execute(ctUnion, solution, pftNegative, pftNegative);
+ //remove the outer PolyNode rectangle ...
+ if (solution.ChildCount() == 1 && solution.Childs[0]->ChildCount() > 0)
+ {
+ PolyNode* outerNode = solution.Childs[0];
+ solution.Childs.reserve(outerNode->ChildCount());
+ solution.Childs[0] = outerNode->Childs[0];
+ for (int i = 1; i < outerNode->ChildCount(); ++i)
+ solution.AddChild(*outerNode->Childs[i]);
+ }
+ else
+ solution.Clear();
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::DoOffset(double delta)
+{
+ m_destPolys.clear();
+ m_delta = delta;
+
+ //if Zero offset, just copy any CLOSED polygons to m_p and return ...
+ if (NEAR_ZERO(delta))
+ {
+ m_destPolys.reserve(m_polyNodes.ChildCount());
+ for (int i = 0; i < m_polyNodes.ChildCount(); i++)
+ {
+ PolyNode& node = *m_polyNodes.Childs[i];
+ if (node.m_endtype == etClosedPolygon)
+ m_destPolys.push_back(node.Contour);
+ }
+ return;
+ }
+
+ //see offset_triginometry3.svg in the documentation folder ...
+ if (MiterLimit > 2) m_miterLim = 2/(MiterLimit * MiterLimit);
+ else m_miterLim = 0.5;
+
+ double y;
+ if (ArcTolerance <= 0.0) y = def_arc_tolerance;
+ else if (ArcTolerance > std::fabs(delta) * def_arc_tolerance)
+ y = std::fabs(delta) * def_arc_tolerance;
+ else y = ArcTolerance;
+ //see offset_triginometry2.svg in the documentation folder ...
+ double steps = pi / std::acos(1 - y / std::fabs(delta));
+ if (steps > std::fabs(delta) * pi)
+ steps = std::fabs(delta) * pi; //ie excessive precision check
+ m_sin = std::sin(two_pi / steps);
+ m_cos = std::cos(two_pi / steps);
+ m_StepsPerRad = steps / two_pi;
+ if (delta < 0.0) m_sin = -m_sin;
+
+ m_destPolys.reserve(m_polyNodes.ChildCount() * 2);
+ for (int i = 0; i < m_polyNodes.ChildCount(); i++)
+ {
+ PolyNode& node = *m_polyNodes.Childs[i];
+ m_srcPoly = node.Contour;
+
+ int len = (int)m_srcPoly.size();
+ if (len == 0 || (delta <= 0 && (len < 3 || node.m_endtype != etClosedPolygon)))
+ continue;
+
+ m_destPoly.clear();
+ if (len == 1)
+ {
+ if (node.m_jointype == jtRound)
+ {
+ double X = 1.0, Y = 0.0;
+ for (cInt j = 1; j <= steps; j++)
+ {
+ m_destPoly.push_back(IntPoint(
+ Round(m_srcPoly[0].X + X * delta),
+ Round(m_srcPoly[0].Y + Y * delta)));
+ double X2 = X;
+ X = X * m_cos - m_sin * Y;
+ Y = X2 * m_sin + Y * m_cos;
+ }
+ }
+ else
+ {
+ double X = -1.0, Y = -1.0;
+ for (int j = 0; j < 4; ++j)
+ {
+ m_destPoly.push_back(IntPoint(
+ Round(m_srcPoly[0].X + X * delta),
+ Round(m_srcPoly[0].Y + Y * delta)));
+ if (X < 0) X = 1;
+ else if (Y < 0) Y = 1;
+ else X = -1;
+ }
+ }
+ m_destPolys.push_back(m_destPoly);
+ continue;
+ }
+ //build m_normals ...
+ m_normals.clear();
+ m_normals.reserve(len);
+ for (int j = 0; j < len - 1; ++j)
+ m_normals.push_back(GetUnitNormal(m_srcPoly[j], m_srcPoly[j + 1]));
+ if (node.m_endtype == etClosedLine || node.m_endtype == etClosedPolygon)
+ m_normals.push_back(GetUnitNormal(m_srcPoly[len - 1], m_srcPoly[0]));
+ else
+ m_normals.push_back(DoublePoint(m_normals[len - 2]));
+
+ if (node.m_endtype == etClosedPolygon)
+ {
+ int k = len - 1;
+ for (int j = 0; j < len; ++j)
+ OffsetPoint(j, k, node.m_jointype);
+ m_destPolys.push_back(m_destPoly);
+ }
+ else if (node.m_endtype == etClosedLine)
+ {
+ int k = len - 1;
+ for (int j = 0; j < len; ++j)
+ OffsetPoint(j, k, node.m_jointype);
+ m_destPolys.push_back(m_destPoly);
+ m_destPoly.clear();
+ //re-build m_normals ...
+ DoublePoint n = m_normals[len -1];
+ for (int j = len - 1; j > 0; j--)
+ m_normals[j] = DoublePoint(-m_normals[j - 1].X, -m_normals[j - 1].Y);
+ m_normals[0] = DoublePoint(-n.X, -n.Y);
+ k = 0;
+ for (int j = len - 1; j >= 0; j--)
+ OffsetPoint(j, k, node.m_jointype);
+ m_destPolys.push_back(m_destPoly);
+ }
+ else
+ {
+ int k = 0;
+ for (int j = 1; j < len - 1; ++j)
+ OffsetPoint(j, k, node.m_jointype);
+
+ IntPoint pt1;
+ if (node.m_endtype == etOpenButt)
+ {
+ int j = len - 1;
+ pt1 = IntPoint((cInt)Round(m_srcPoly[j].X + m_normals[j].X *
+ delta), (cInt)Round(m_srcPoly[j].Y + m_normals[j].Y * delta));
+ m_destPoly.push_back(pt1);
+ pt1 = IntPoint((cInt)Round(m_srcPoly[j].X - m_normals[j].X *
+ delta), (cInt)Round(m_srcPoly[j].Y - m_normals[j].Y * delta));
+ m_destPoly.push_back(pt1);
+ }
+ else
+ {
+ int j = len - 1;
+ k = len - 2;
+ m_sinA = 0;
+ m_normals[j] = DoublePoint(-m_normals[j].X, -m_normals[j].Y);
+ if (node.m_endtype == etOpenSquare)
+ DoSquare(j, k);
+ else
+ DoRound(j, k);
+ }
+
+ //re-build m_normals ...
+ for (int j = len - 1; j > 0; j--)
+ m_normals[j] = DoublePoint(-m_normals[j - 1].X, -m_normals[j - 1].Y);
+ m_normals[0] = DoublePoint(-m_normals[1].X, -m_normals[1].Y);
+
+ k = len - 1;
+ for (int j = k - 1; j > 0; --j) OffsetPoint(j, k, node.m_jointype);
+
+ if (node.m_endtype == etOpenButt)
+ {
+ pt1 = IntPoint((cInt)Round(m_srcPoly[0].X - m_normals[0].X * delta),
+ (cInt)Round(m_srcPoly[0].Y - m_normals[0].Y * delta));
+ m_destPoly.push_back(pt1);
+ pt1 = IntPoint((cInt)Round(m_srcPoly[0].X + m_normals[0].X * delta),
+ (cInt)Round(m_srcPoly[0].Y + m_normals[0].Y * delta));
+ m_destPoly.push_back(pt1);
+ }
+ else
+ {
+ k = 1;
+ m_sinA = 0;
+ if (node.m_endtype == etOpenSquare)
+ DoSquare(0, 1);
+ else
+ DoRound(0, 1);
+ }
+ m_destPolys.push_back(m_destPoly);
+ }
+ }
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::OffsetPoint(int j, int& k, JoinType jointype)
+{
+ m_sinA = (m_normals[k].X * m_normals[j].Y - m_normals[j].X * m_normals[k].Y);
+ if (m_sinA < 0.00005 && m_sinA > -0.00005) return;
+ else if (m_sinA > 1.0) m_sinA = 1.0;
+ else if (m_sinA < -1.0) m_sinA = -1.0;
+
+ if (m_sinA * m_delta < 0)
+ {
+ m_destPoly.push_back(IntPoint(Round(m_srcPoly[j].X + m_normals[k].X * m_delta),
+ Round(m_srcPoly[j].Y + m_normals[k].Y * m_delta)));
+ m_destPoly.push_back(m_srcPoly[j]);
+ m_destPoly.push_back(IntPoint(Round(m_srcPoly[j].X + m_normals[j].X * m_delta),
+ Round(m_srcPoly[j].Y + m_normals[j].Y * m_delta)));
+ }
+ else
+ switch (jointype)
+ {
+ case jtMiter:
+ {
+ double r = 1 + (m_normals[j].X * m_normals[k].X +
+ m_normals[j].Y * m_normals[k].Y);
+ if (r >= m_miterLim) DoMiter(j, k, r); else DoSquare(j, k);
+ break;
+ }
+ case jtSquare: DoSquare(j, k); break;
+ case jtRound: DoRound(j, k); break;
+ }
+ k = j;
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::DoSquare(int j, int k)
+{
+ double dx = std::tan(std::atan2(m_sinA,
+ m_normals[k].X * m_normals[j].X + m_normals[k].Y * m_normals[j].Y) / 4);
+ m_destPoly.push_back(IntPoint(
+ Round(m_srcPoly[j].X + m_delta * (m_normals[k].X - m_normals[k].Y * dx)),
+ Round(m_srcPoly[j].Y + m_delta * (m_normals[k].Y + m_normals[k].X * dx))));
+ m_destPoly.push_back(IntPoint(
+ Round(m_srcPoly[j].X + m_delta * (m_normals[j].X + m_normals[j].Y * dx)),
+ Round(m_srcPoly[j].Y + m_delta * (m_normals[j].Y - m_normals[j].X * dx))));
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::DoMiter(int j, int k, double r)
+{
+ double q = m_delta / r;
+ m_destPoly.push_back(IntPoint(Round(m_srcPoly[j].X + (m_normals[k].X + m_normals[j].X) * q),
+ Round(m_srcPoly[j].Y + (m_normals[k].Y + m_normals[j].Y) * q)));
+}
+//------------------------------------------------------------------------------
+
+void ClipperOffset::DoRound(int j, int k)
+{
+ double a = std::atan2(m_sinA,
+ m_normals[k].X * m_normals[j].X + m_normals[k].Y * m_normals[j].Y);
+ int steps = (int)Round(m_StepsPerRad * std::fabs(a));
+
+ double X = m_normals[k].X, Y = m_normals[k].Y, X2;
+ for (int i = 0; i < steps; ++i)
+ {
+ m_destPoly.push_back(IntPoint(
+ Round(m_srcPoly[j].X + X * m_delta),
+ Round(m_srcPoly[j].Y + Y * m_delta)));
+ X2 = X;
+ X = X * m_cos - m_sin * Y;
+ Y = X2 * m_sin + Y * m_cos;
+ }
+ m_destPoly.push_back(IntPoint(
+ Round(m_srcPoly[j].X + m_normals[j].X * m_delta),
+ Round(m_srcPoly[j].Y + m_normals[j].Y * m_delta)));
+}
+
+//------------------------------------------------------------------------------
+// Miscellaneous public functions
+//------------------------------------------------------------------------------
+
+void Clipper::DoSimplePolygons()
+{
+ PolyOutList::size_type i = 0;
+ while (i < m_PolyOuts.size())
+ {
+ OutRec* outrec = m_PolyOuts[i++];
+ OutPt* op = outrec->Pts;
+ if (!op) continue;
+ do //for each Pt in Polygon until duplicate found do ...
+ {
+ OutPt* op2 = op->Next;
+ while (op2 != outrec->Pts)
+ {
+ if ((op->Pt == op2->Pt) && op2->Next != op && op2->Prev != op)
+ {
+ //split the polygon into two ...
+ OutPt* op3 = op->Prev;
+ OutPt* op4 = op2->Prev;
+ op->Prev = op4;
+ op4->Next = op;
+ op2->Prev = op3;
+ op3->Next = op2;
+
+ outrec->Pts = op;
+ OutRec* outrec2 = CreateOutRec();
+ outrec2->Pts = op2;
+ UpdateOutPtIdxs(*outrec2);
+ if (Poly2ContainsPoly1(outrec2->Pts, outrec->Pts))
+ {
+ //OutRec2 is contained by OutRec1 ...
+ outrec2->IsHole = !outrec->IsHole;
+ outrec2->FirstLeft = outrec;
+ }
+ else
+ if (Poly2ContainsPoly1(outrec->Pts, outrec2->Pts))
+ {
+ //OutRec1 is contained by OutRec2 ...
+ outrec2->IsHole = outrec->IsHole;
+ outrec->IsHole = !outrec2->IsHole;
+ outrec2->FirstLeft = outrec->FirstLeft;
+ outrec->FirstLeft = outrec2;
+ } else
+ {
+ //the 2 polygons are separate ...
+ outrec2->IsHole = outrec->IsHole;
+ outrec2->FirstLeft = outrec->FirstLeft;
+ }
+ op2 = op; //ie get ready for the Next iteration
+ }
+ op2 = op2->Next;
+ }
+ op = op->Next;
+ }
+ while (op != outrec->Pts);
+ }
+}
+//------------------------------------------------------------------------------
+
+void ReversePath(Path& p)
+{
+ std::reverse(p.begin(), p.end());
+}
+//------------------------------------------------------------------------------
+
+void ReversePaths(Paths& p)
+{
+ for (Paths::size_type i = 0; i < p.size(); ++i)
+ ReversePath(p[i]);
+}
+//------------------------------------------------------------------------------
+
+void SimplifyPolygons(const Paths &in_polys, Paths &out_polys, PolyFillType fillType)
+{
+ Clipper c;
+ c.StrictlySimple(true);
+ c.AddPaths(in_polys, ptSubject, true);
+ c.Execute(ctUnion, out_polys, fillType, fillType);
+}
+//------------------------------------------------------------------------------
+
+void SimplifyPolygons(Paths &polys, PolyFillType fillType)
+{
+ SimplifyPolygons(polys, polys, fillType);
+}
+//------------------------------------------------------------------------------
+
+inline double DistanceSqrd(const IntPoint& pt1, const IntPoint& pt2)
+{
+ double Dx = ((double)pt1.X - pt2.X);
+ double dy = ((double)pt1.Y - pt2.Y);
+ return (Dx*Dx + dy*dy);
+}
+//------------------------------------------------------------------------------
+
+double DistanceFromLineSqrd(
+ const IntPoint& pt, const IntPoint& ln1, const IntPoint& ln2)
+{
+ //The equation of a line in general form (Ax + By + C = 0)
+ //given 2 points (x¹,y¹) & (x²,y²) is ...
+ //(y¹ - y²)x + (x² - x¹)y + (y² - y¹)x¹ - (x² - x¹)y¹ = 0
+ //A = (y¹ - y²); B = (x² - x¹); C = (y² - y¹)x¹ - (x² - x¹)y¹
+ //perpendicular distance of point (x³,y³) = (Ax³ + By³ + C)/Sqrt(A² + B²)
+ //see http://en.wikipedia.org/wiki/Perpendicular_distance
+ double A = double(ln1.Y - ln2.Y);
+ double B = double(ln2.X - ln1.X);
+ double C = A * ln1.X + B * ln1.Y;
+ C = A * pt.X + B * pt.Y - C;
+ return (C * C) / (A * A + B * B);
+}
+//---------------------------------------------------------------------------
+
+bool SlopesNearCollinear(const IntPoint& pt1,
+ const IntPoint& pt2, const IntPoint& pt3, double distSqrd)
+{
+ return DistanceFromLineSqrd(pt2, pt1, pt3) < distSqrd;
+}
+//------------------------------------------------------------------------------
+
+bool PointsAreClose(IntPoint pt1, IntPoint pt2, double distSqrd)
+{
+ double Dx = (double)pt1.X - pt2.X;
+ double dy = (double)pt1.Y - pt2.Y;
+ return ((Dx * Dx) + (dy * dy) <= distSqrd);
+}
+//------------------------------------------------------------------------------
+
+OutPt* ExcludeOp(OutPt* op)
+{
+ OutPt* result = op->Prev;
+ result->Next = op->Next;
+ op->Next->Prev = result;
+ result->Idx = 0;
+ return result;
+}
+//------------------------------------------------------------------------------
+
+void CleanPolygon(const Path& in_poly, Path& out_poly, double distance)
+{
+ //distance = proximity in units/pixels below which vertices
+ //will be stripped. Default ~= sqrt(2).
+
+ size_t size = in_poly.size();
+
+ if (size == 0)
+ {
+ out_poly.clear();
+ return;
+ }
+
+ OutPt* outPts = new OutPt[size];
+ for (size_t i = 0; i < size; ++i)
+ {
+ outPts[i].Pt = in_poly[i];
+ outPts[i].Next = &outPts[(i + 1) % size];
+ outPts[i].Next->Prev = &outPts[i];
+ outPts[i].Idx = 0;
+ }
+
+ double distSqrd = distance * distance;
+ OutPt* op = &outPts[0];
+ while (op->Idx == 0 && op->Next != op->Prev)
+ {
+ if (PointsAreClose(op->Pt, op->Prev->Pt, distSqrd))
+ {
+ op = ExcludeOp(op);
+ size--;
+ }
+ else if (PointsAreClose(op->Prev->Pt, op->Next->Pt, distSqrd))
+ {
+ ExcludeOp(op->Next);
+ op = ExcludeOp(op);
+ size -= 2;
+ }
+ else if (SlopesNearCollinear(op->Prev->Pt, op->Pt, op->Next->Pt, distSqrd))
+ {
+ op = ExcludeOp(op);
+ size--;
+ }
+ else
+ {
+ op->Idx = 1;
+ op = op->Next;
+ }
+ }
+
+ if (size < 3) size = 0;
+ out_poly.resize(size);
+ for (size_t i = 0; i < size; ++i)
+ {
+ out_poly[i] = op->Pt;
+ op = op->Next;
+ }
+ delete [] outPts;
+}
+//------------------------------------------------------------------------------
+
+void CleanPolygon(Path& poly, double distance)
+{
+ CleanPolygon(poly, poly, distance);
+}
+//------------------------------------------------------------------------------
+
+void CleanPolygons(const Paths& in_polys, Paths& out_polys, double distance)
+{
+ for (Paths::size_type i = 0; i < in_polys.size(); ++i)
+ CleanPolygon(in_polys[i], out_polys[i], distance);
+}
+//------------------------------------------------------------------------------
+
+void CleanPolygons(Paths& polys, double distance)
+{
+ CleanPolygons(polys, polys, distance);
+}
+//------------------------------------------------------------------------------
+
+void Minkowski(const Path& poly, const Path& path,
+ Paths& solution, bool isSum, bool isClosed)
+{
+ int delta = (isClosed ? 1 : 0);
+ size_t polyCnt = poly.size();
+ size_t pathCnt = path.size();
+ Paths pp;
+ pp.reserve(pathCnt);
+ if (isSum)
+ for (size_t i = 0; i < pathCnt; ++i)
+ {
+ Path p;
+ p.reserve(polyCnt);
+ for (size_t j = 0; j < poly.size(); ++j)
+ p.push_back(IntPoint(path[i].X + poly[j].X, path[i].Y + poly[j].Y));
+ pp.push_back(p);
+ }
+ else
+ for (size_t i = 0; i < pathCnt; ++i)
+ {
+ Path p;
+ p.reserve(polyCnt);
+ for (size_t j = 0; j < poly.size(); ++j)
+ p.push_back(IntPoint(path[i].X - poly[j].X, path[i].Y - poly[j].Y));
+ pp.push_back(p);
+ }
+
+ Paths quads;
+ quads.reserve((pathCnt + delta) * (polyCnt + 1));
+ for (size_t i = 0; i <= pathCnt - 2 + delta; ++i)
+ for (size_t j = 0; j <= polyCnt - 1; ++j)
+ {
+ Path quad;
+ quad.reserve(4);
+ quad.push_back(pp[i % pathCnt][j % polyCnt]);
+ quad.push_back(pp[(i + 1) % pathCnt][j % polyCnt]);
+ quad.push_back(pp[(i + 1) % pathCnt][(j + 1) % polyCnt]);
+ quad.push_back(pp[i % pathCnt][(j + 1) % polyCnt]);
+ if (!Orientation(quad)) ReversePath(quad);
+ quads.push_back(quad);
+ }
+
+ Clipper c;
+ c.AddPaths(quads, ptSubject, true);
+ c.Execute(ctUnion, solution, pftNonZero, pftNonZero);
+}
+//------------------------------------------------------------------------------
+
+void MinkowskiSum(const Path& poly, const Path& path, Paths& solution, bool isClosed)
+{
+ Minkowski(poly, path, solution, true, isClosed);
+}
+//------------------------------------------------------------------------------
+
+void MinkowskiDiff(const Path& poly, const Path& path, Paths& solution, bool isClosed)
+{
+ Minkowski(poly, path, solution, false, isClosed);
+}
+//------------------------------------------------------------------------------
+
+enum NodeType {ntAny, ntOpen, ntClosed};
+
+void AddPolyNodeToPolygons(const PolyNode& polynode, NodeType nodetype, Paths& paths)
+{
+ bool match = true;
+ if (nodetype == ntClosed) match = !polynode.IsOpen();
+ else if (nodetype == ntOpen) return;
+
+ if (!polynode.Contour.empty() && match)
+ paths.push_back(polynode.Contour);
+ for (int i = 0; i < polynode.ChildCount(); ++i)
+ AddPolyNodeToPolygons(*polynode.Childs[i], nodetype, paths);
+}
+//------------------------------------------------------------------------------
+
+void PolyTreeToPaths(const PolyTree& polytree, Paths& paths)
+{
+ paths.resize(0);
+ paths.reserve(polytree.Total());
+ AddPolyNodeToPolygons(polytree, ntAny, paths);
+}
+//------------------------------------------------------------------------------
+
+void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths)
+{
+ paths.resize(0);
+ paths.reserve(polytree.Total());
+ AddPolyNodeToPolygons(polytree, ntClosed, paths);
+}
+//------------------------------------------------------------------------------
+
+void OpenPathsFromPolyTree(PolyTree& polytree, Paths& paths)
+{
+ paths.resize(0);
+ paths.reserve(polytree.Total());
+ //Open paths are top level only, so ...
+ for (int i = 0; i < polytree.ChildCount(); ++i)
+ if (polytree.Childs[i]->IsOpen())
+ paths.push_back(polytree.Childs[i]->Contour);
+}
+//------------------------------------------------------------------------------
+
+std::ostream& operator <<(std::ostream &s, const IntPoint &p)
+{
+ s << "(" << p.X << "," << p.Y << ")";
+ return s;
+}
+//------------------------------------------------------------------------------
+
+std::ostream& operator <<(std::ostream &s, const Path &p)
+{
+ if (p.empty()) return s;
+ Path::size_type last = p.size() -1;
+ for (Path::size_type i = 0; i < last; i++)
+ s << "(" << p[i].X << "," << p[i].Y << "), ";
+ s << "(" << p[last].X << "," << p[last].Y << ")\n";
+ return s;
+}
+//------------------------------------------------------------------------------
+
+std::ostream& operator <<(std::ostream &s, const Paths &p)
+{
+ for (Paths::size_type i = 0; i < p.size(); i++)
+ s << p[i];
+ s << "\n";
+ return s;
+}
+//------------------------------------------------------------------------------
+
+#ifdef use_deprecated
+
+void OffsetPaths(const Paths &in_polys, Paths &out_polys,
+ double delta, JoinType jointype, EndType_ endtype, double limit)
+{
+ ClipperOffset co(limit, limit);
+ co.AddPaths(in_polys, jointype, (EndType)endtype);
+ co.Execute(out_polys, delta);
+}
+//------------------------------------------------------------------------------
+
+#endif
+
+
+} //ClipperLib namespace
diff --git a/clipper/clipper.hpp b/clipper/clipper.hpp
new file mode 100644
index 0000000..0d1bd19
--- /dev/null
+++ b/clipper/clipper.hpp
@@ -0,0 +1,396 @@
+/*******************************************************************************
+* *
+* Author : Angus Johnson *
+* Version : 6.1.2 *
+* Date : 15 December 2013 *
+* Website : http://www.angusj.com *
+* Copyright : Angus Johnson 2010-2013 *
+* *
+* License: *
+* Use, modification & distribution is subject to Boost Software License Ver 1. *
+* http://www.boost.org/LICENSE_1_0.txt *
+* *
+* Attributions: *
+* The code in this library is an extension of Bala Vatti's clipping algorithm: *
+* "A generic solution to polygon clipping" *
+* Communications of the ACM, Vol 35, Issue 7 (July 1992) pp 56-63. *
+* http://portal.acm.org/citation.cfm?id=129906 *
+* *
+* Computer graphics and geometric modeling: implementation and algorithms *
+* By Max K. Agoston *
+* Springer; 1 edition (January 4, 2005) *
+* http://books.google.com/books?q=vatti+clipping+agoston *
+* *
+* See also: *
+* "Polygon Offsetting by Computing Winding Numbers" *
+* Paper no. DETC2005-85513 pp. 565-575 *
+* ASME 2005 International Design Engineering Technical Conferences *
+* and Computers and Information in Engineering Conference (IDETC/CIE2005) *
+* September 24-28, 2005 , Long Beach, California, USA *
+* http://www.me.berkeley.edu/~mcmains/pubs/DAC05OffsetPolygon.pdf *
+* *
+*******************************************************************************/
+
+#ifndef clipper_hpp
+#define clipper_hpp
+
+#define CLIPPER_VERSION "6.1.2"
+
+//use_int32: When enabled 32bit ints are used instead of 64bit ints. This
+//improve performance but coordinate values are limited to the range +/- 46340
+//#define use_int32
+
+//use_xyz: adds a Z member to IntPoint. Adds a minor cost to perfomance.
+//#define use_xyz
+
+//use_lines: Enables line clipping. Adds a very minor cost to performance.
+//#define use_lines
+
+//use_deprecated: Enables support for the obsolete OffsetPaths() function
+//which has been replace with the ClipperOffset class.
+#define use_deprecated
+
+#include <vector>
+#include <set>
+#include <stdexcept>
+#include <cstring>
+#include <cstdlib>
+#include <ostream>
+#include <functional>
+
+namespace ClipperLib {
+
+enum ClipType { ctIntersection, ctUnion, ctDifference, ctXor };
+enum PolyType { ptSubject, ptClip };
+//By far the most widely used winding rules for polygon filling are
+//EvenOdd & NonZero (GDI, GDI+, XLib, OpenGL, Cairo, AGG, Quartz, SVG, Gr32)
+//Others rules include Positive, Negative and ABS_GTR_EQ_TWO (only in OpenGL)
+//see http://glprogramming.com/red/chapter11.html
+enum PolyFillType { pftEvenOdd, pftNonZero, pftPositive, pftNegative };
+
+#ifdef use_int32
+typedef int cInt;
+typedef unsigned int cUInt;
+#else
+typedef signed long long cInt;
+typedef unsigned long long cUInt;
+#endif
+
+struct IntPoint {
+ cInt X;
+ cInt Y;
+#ifdef use_xyz
+ cInt Z;
+ IntPoint(cInt x = 0, cInt y = 0, cInt z = 0): X(x), Y(y), Z(z) {};
+#else
+ IntPoint(cInt x = 0, cInt y = 0): X(x), Y(y) {};
+#endif
+
+ friend inline bool operator== (const IntPoint& a, const IntPoint& b)
+ {
+ return a.X == b.X && a.Y == b.Y;
+ }
+ friend inline bool operator!= (const IntPoint& a, const IntPoint& b)
+ {
+ return a.X != b.X || a.Y != b.Y;
+ }
+};
+//------------------------------------------------------------------------------
+
+typedef std::vector< IntPoint > Path;
+typedef std::vector< Path > Paths;
+
+inline Path& operator <<(Path& poly, const IntPoint& p) {poly.push_back(p); return poly;}
+inline Paths& operator <<(Paths& polys, const Path& p) {polys.push_back(p); return polys;}
+
+std::ostream& operator <<(std::ostream &s, const IntPoint &p);
+std::ostream& operator <<(std::ostream &s, const Path &p);
+std::ostream& operator <<(std::ostream &s, const Paths &p);
+
+struct DoublePoint
+{
+ double X;
+ double Y;
+ DoublePoint(double x = 0, double y = 0) : X(x), Y(y) {}
+ DoublePoint(IntPoint ip) : X((double)ip.X), Y((double)ip.Y) {}
+};
+//------------------------------------------------------------------------------
+
+#ifdef use_xyz
+typedef void (*TZFillCallback)(IntPoint& z1, IntPoint& z2, IntPoint& pt);
+#endif
+
+enum InitOptions {ioReverseSolution = 1, ioStrictlySimple = 2, ioPreserveCollinear = 4};
+enum JoinType {jtSquare, jtRound, jtMiter};
+enum EndType {etClosedPolygon, etClosedLine, etOpenButt, etOpenSquare, etOpenRound};
+#ifdef use_deprecated
+ enum EndType_ {etClosed, etButt = 2, etSquare, etRound};
+#endif
+
+class PolyNode;
+typedef std::vector< PolyNode* > PolyNodes;
+
+class PolyNode
+{
+public:
+ PolyNode();
+ Path Contour;
+ PolyNodes Childs;
+ PolyNode* Parent;
+ PolyNode* GetNext() const;
+ bool IsHole() const;
+ bool IsOpen() const;
+ int ChildCount() const;
+private:
+ unsigned Index; //node index in Parent.Childs
+ bool m_IsOpen;
+ JoinType m_jointype;
+ EndType m_endtype;
+ PolyNode* GetNextSiblingUp() const;
+ void AddChild(PolyNode& child);
+ friend class Clipper; //to access Index
+ friend class ClipperOffset;
+};
+
+class PolyTree: public PolyNode
+{
+public:
+ ~PolyTree(){Clear();};
+ PolyNode* GetFirst() const;
+ void Clear();
+ int Total() const;
+private:
+ PolyNodes AllNodes;
+ friend class Clipper; //to access AllNodes
+};
+
+bool Orientation(const Path &poly);
+double Area(const Path &poly);
+
+#ifdef use_deprecated
+ void OffsetPaths(const Paths &in_polys, Paths &out_polys,
+ double delta, JoinType jointype, EndType_ endtype, double limit = 0);
+#endif
+
+void SimplifyPolygon(const Path &in_poly, Paths &out_polys, PolyFillType fillType = pftEvenOdd);
+void SimplifyPolygons(const Paths &in_polys, Paths &out_polys, PolyFillType fillType = pftEvenOdd);
+void SimplifyPolygons(Paths &polys, PolyFillType fillType = pftEvenOdd);
+
+void CleanPolygon(const Path& in_poly, Path& out_poly, double distance = 1.415);
+void CleanPolygon(Path& poly, double distance = 1.415);
+void CleanPolygons(const Paths& in_polys, Paths& out_polys, double distance = 1.415);
+void CleanPolygons(Paths& polys, double distance = 1.415);
+
+void MinkowskiSum(const Path& poly, const Path& path, Paths& solution, bool isClosed);
+void MinkowskiDiff(const Path& poly, const Path& path, Paths& solution, bool isClosed);
+
+void PolyTreeToPaths(const PolyTree& polytree, Paths& paths);
+void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths);
+void OpenPathsFromPolyTree(PolyTree& polytree, Paths& paths);
+
+void ReversePath(Path& p);
+void ReversePaths(Paths& p);
+
+struct IntRect { cInt left; cInt top; cInt right; cInt bottom; };
+
+//enums that are used internally ...
+enum EdgeSide { esLeft = 1, esRight = 2};
+
+//forward declarations (for stuff used internally) ...
+struct TEdge;
+struct IntersectNode;
+struct LocalMinima;
+struct Scanbeam;
+struct OutPt;
+struct OutRec;
+struct Join;
+
+typedef std::vector < OutRec* > PolyOutList;
+typedef std::vector < TEdge* > EdgeList;
+typedef std::vector < Join* > JoinList;
+typedef std::vector < IntersectNode* > IntersectList;
+
+
+//------------------------------------------------------------------------------
+
+//ClipperBase is the ancestor to the Clipper class. It should not be
+//instantiated directly. This class simply abstracts the conversion of sets of
+//polygon coordinates into edge objects that are stored in a LocalMinima list.
+class ClipperBase
+{
+public:
+ ClipperBase();
+ virtual ~ClipperBase();
+ bool AddPath(const Path &pg, PolyType PolyTyp, bool Closed);
+ bool AddPaths(const Paths &ppg, PolyType PolyTyp, bool Closed);
+ virtual void Clear();
+ IntRect GetBounds();
+ bool PreserveCollinear() {return m_PreserveCollinear;};
+ void PreserveCollinear(bool value) {m_PreserveCollinear = value;};
+protected:
+ void DisposeLocalMinimaList();
+ TEdge* AddBoundsToLML(TEdge *e, bool IsClosed);
+ void PopLocalMinima();
+ virtual void Reset();
+ TEdge* ProcessBound(TEdge* E, bool IsClockwise);
+ void InsertLocalMinima(LocalMinima *newLm);
+ void DoMinimaLML(TEdge* E1, TEdge* E2, bool IsClosed);
+ TEdge* DescendToMin(TEdge *&E);
+ void AscendToMax(TEdge *&E, bool Appending, bool IsClosed);
+ LocalMinima *m_CurrentLM;
+ LocalMinima *m_MinimaList;
+ bool m_UseFullRange;
+ EdgeList m_edges;
+ bool m_PreserveCollinear;
+ bool m_HasOpenPaths;
+};
+//------------------------------------------------------------------------------
+
+class Clipper : public virtual ClipperBase
+{
+public:
+ Clipper(int initOptions = 0);
+ ~Clipper();
+ bool Execute(ClipType clipType,
+ Paths &solution,
+ PolyFillType subjFillType = pftEvenOdd,
+ PolyFillType clipFillType = pftEvenOdd);
+ bool Execute(ClipType clipType,
+ PolyTree &polytree,
+ PolyFillType subjFillType = pftEvenOdd,
+ PolyFillType clipFillType = pftEvenOdd);
+ void Clear();
+ bool ReverseSolution() {return m_ReverseOutput;};
+ void ReverseSolution(bool value) {m_ReverseOutput = value;};
+ bool StrictlySimple() {return m_StrictSimple;};
+ void StrictlySimple(bool value) {m_StrictSimple = value;};
+ //set the callback function for z value filling on intersections (otherwise Z is 0)
+#ifdef use_xyz
+ void ZFillFunction(TZFillCallback zFillFunc);
+#endif
+protected:
+ void Reset();
+ virtual bool ExecuteInternal();
+private:
+ PolyOutList m_PolyOuts;
+ JoinList m_Joins;
+ JoinList m_GhostJoins;
+ IntersectList m_IntersectList;
+ ClipType m_ClipType;
+ std::set< cInt, std::greater<cInt> > m_Scanbeam;
+ TEdge *m_ActiveEdges;
+ TEdge *m_SortedEdges;
+ bool m_ExecuteLocked;
+ PolyFillType m_ClipFillType;
+ PolyFillType m_SubjFillType;
+ bool m_ReverseOutput;
+ bool m_UsingPolyTree;
+ bool m_StrictSimple;
+#ifdef use_xyz
+ TZFillCallback m_ZFill; //custom callback
+#endif
+ void SetWindingCount(TEdge& edge);
+ bool IsEvenOddFillType(const TEdge& edge) const;
+ bool IsEvenOddAltFillType(const TEdge& edge) const;
+ void InsertScanbeam(const cInt Y);
+ cInt PopScanbeam();
+ void InsertLocalMinimaIntoAEL(const cInt botY);
+ void InsertEdgeIntoAEL(TEdge *edge, TEdge* startEdge);
+ void AddEdgeToSEL(TEdge *edge);
+ void CopyAELToSEL();
+ void DeleteFromSEL(TEdge *e);
+ void DeleteFromAEL(TEdge *e);
+ void UpdateEdgeIntoAEL(TEdge *&e);
+ void SwapPositionsInSEL(TEdge *edge1, TEdge *edge2);
+ bool IsContributing(const TEdge& edge) const;
+ bool IsTopHorz(const cInt XPos);
+ void SwapPositionsInAEL(TEdge *edge1, TEdge *edge2);
+ void DoMaxima(TEdge *e);
+ void PrepareHorzJoins(TEdge* horzEdge, bool isTopOfScanbeam);
+ void ProcessHorizontals(bool IsTopOfScanbeam);
+ void ProcessHorizontal(TEdge *horzEdge, bool isTopOfScanbeam);
+ void AddLocalMaxPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
+ OutPt* AddLocalMinPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
+ OutRec* GetOutRec(int idx);
+ void AppendPolygon(TEdge *e1, TEdge *e2);
+ void IntersectEdges(TEdge *e1, TEdge *e2,
+ const IntPoint &pt, bool protect = false);
+ OutRec* CreateOutRec();
+ OutPt* AddOutPt(TEdge *e, const IntPoint &pt);
+ void DisposeAllOutRecs();
+ void DisposeOutRec(PolyOutList::size_type index);
+ bool ProcessIntersections(const cInt botY, const cInt topY);
+ void BuildIntersectList(const cInt botY, const cInt topY);
+ void ProcessIntersectList();
+ void ProcessEdgesAtTopOfScanbeam(const cInt topY);
+ void BuildResult(Paths& polys);
+ void BuildResult2(PolyTree& polytree);
+ void SetHoleState(TEdge *e, OutRec *outrec);
+ void DisposeIntersectNodes();
+ bool FixupIntersectionOrder();
+ void FixupOutPolygon(OutRec &outrec);
+ bool IsHole(TEdge *e);
+ bool FindOwnerFromSplitRecs(OutRec &outRec, OutRec *&currOrfl);
+ void FixHoleLinkage(OutRec &outrec);
+ void AddJoin(OutPt *op1, OutPt *op2, const IntPoint offPt);
+ void ClearJoins();
+ void ClearGhostJoins();
+ void AddGhostJoin(OutPt *op, const IntPoint offPt);
+ bool JoinPoints(Join *j, OutRec* outRec1, OutRec* outRec2);
+ void JoinCommonEdges();
+ void DoSimplePolygons();
+ void FixupFirstLefts1(OutRec* OldOutRec, OutRec* NewOutRec);
+ void FixupFirstLefts2(OutRec* OldOutRec, OutRec* NewOutRec);
+#ifdef use_xyz
+ void SetZ(IntPoint& pt, TEdge& e);
+#endif
+};
+//------------------------------------------------------------------------------
+
+class ClipperOffset
+{
+public:
+ ClipperOffset(double miterLimit = 2.0, double roundPrecision = 0.25);
+ ~ClipperOffset();
+ void AddPath(const Path& path, JoinType joinType, EndType endType);
+ void AddPaths(const Paths& paths, JoinType joinType, EndType endType);
+ void Execute(Paths& solution, double delta);
+ void Execute(PolyTree& solution, double delta);
+ void Clear();
+ double MiterLimit;
+ double ArcTolerance;
+private:
+ Paths m_destPolys;
+ Path m_srcPoly;
+ Path m_destPoly;
+ std::vector<DoublePoint> m_normals;
+ double m_delta, m_sinA, m_sin, m_cos;
+ double m_miterLim, m_StepsPerRad;
+ IntPoint m_lowest;
+ PolyNode m_polyNodes;
+
+ void FixOrientations();
+ void DoOffset(double delta);
+ void OffsetPoint(int j, int& k, JoinType jointype);
+ void DoSquare(int j, int k);
+ void DoMiter(int j, int k, double r);
+ void DoRound(int j, int k);
+};
+//------------------------------------------------------------------------------
+
+class clipperException : public std::exception
+{
+ public:
+ clipperException(const char* description): m_descr(description) {}
+ virtual ~clipperException() throw() {}
+ virtual const char* what() const throw() {return m_descr.c_str();}
+ private:
+ std::string m_descr;
+};
+//------------------------------------------------------------------------------
+
+} //ClipperLib namespace
+
+#endif //clipper_hpp
+
+
diff --git a/comb.cpp b/comb.cpp
new file mode 100644
index 0000000..141b8b6
--- /dev/null
+++ b/comb.cpp
@@ -0,0 +1,256 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "comb.h"
+
+bool Comb::preTest(Point startPoint, Point endPoint)
+{
+ return collisionTest(startPoint, endPoint);
+}
+
+bool Comb::collisionTest(Point startPoint, Point endPoint)
+{
+ Point diff = endPoint - startPoint;
+
+ matrix = PointMatrix(diff);
+ sp = matrix.apply(startPoint);
+ ep = matrix.apply(endPoint);
+
+ for(unsigned int n=0; n<boundery.size(); n++)
+ {
+ if (boundery[n].size() < 1)
+ continue;
+ Point p0 = matrix.apply(boundery[n][boundery[n].size()-1]);
+ for(unsigned int i=0; i<boundery[n].size(); i++)
+ {
+ Point p1 = matrix.apply(boundery[n][i]);
+ if ((p0.Y > sp.Y && p1.Y < sp.Y) || (p1.Y > sp.Y && p0.Y < sp.Y))
+ {
+ int64_t x = p0.X + (p1.X - p0.X) * (sp.Y - p0.Y) / (p1.Y - p0.Y);
+
+ if (x > sp.X && x < ep.X)
+ return true;
+ }
+ p0 = p1;
+ }
+ }
+ return false;
+}
+
+void Comb::calcMinMax()
+{
+ for(unsigned int n=0; n<boundery.size(); n++)
+ {
+ minX[n] = LLONG_MAX;
+ maxX[n] = LLONG_MIN;
+ Point p0 = matrix.apply(boundery[n][boundery[n].size()-1]);
+ for(unsigned int i=0; i<boundery[n].size(); i++)
+ {
+ Point p1 = matrix.apply(boundery[n][i]);
+ if ((p0.Y > sp.Y && p1.Y < sp.Y) || (p1.Y > sp.Y && p0.Y < sp.Y))
+ {
+ int64_t x = p0.X + (p1.X - p0.X) * (sp.Y - p0.Y) / (p1.Y - p0.Y);
+
+ if (x >= sp.X && x <= ep.X)
+ {
+ if (x < minX[n]) { minX[n] = x; minIdx[n] = i; }
+ if (x > maxX[n]) { maxX[n] = x; maxIdx[n] = i; }
+ }
+ }
+ p0 = p1;
+ }
+ }
+}
+
+unsigned int Comb::getPolygonAbove(int64_t x)
+{
+ int64_t min = LLONG_MAX;
+ unsigned int ret = UINT_MAX;
+ for(unsigned int n=0; n<boundery.size(); n++)
+ {
+ if (minX[n] > x && minX[n] < min)
+ {
+ min = minX[n];
+ ret = n;
+ }
+ }
+ return ret;
+}
+
+Point Comb::getBounderyPointWithOffset(unsigned int polygonNr, unsigned int idx)
+{
+ Point p0 = boundery[polygonNr][(idx > 0) ? (idx - 1) : (boundery[polygonNr].size() - 1)];
+ Point p1 = boundery[polygonNr][idx];
+ Point p2 = boundery[polygonNr][(idx < (boundery[polygonNr].size() - 1)) ? (idx + 1) : (0)];
+
+ Point off0 = crossZ(normal(p1 - p0, 1000));
+ Point off1 = crossZ(normal(p2 - p1, 1000));
+ Point n = normal(off0 + off1, 200);
+
+ return p1 + n;
+}
+
+Comb::Comb(Polygons& _boundery)
+: boundery(_boundery)
+{
+ minX = new int64_t[boundery.size()];
+ maxX = new int64_t[boundery.size()];
+ minIdx = new unsigned int[boundery.size()];
+ maxIdx = new unsigned int[boundery.size()];
+}
+
+Comb::~Comb()
+{
+ delete[] minX;
+ delete[] maxX;
+ delete[] minIdx;
+ delete[] maxIdx;
+}
+
+bool Comb::checkInside(Point p)
+{
+ //Check if we are inside the comb boundary. We do this by tracing from the point towards the negative X direction,
+ // every boundary we cross increments the crossings counter. If we have an even number of crossings then we are not inside the boundary
+ int crossings = 0;
+ for(unsigned int n=0; n<boundery.size(); n++)
+ {
+ if (boundery[n].size() < 1)
+ continue;
+ Point p0 = boundery[n][boundery[n].size()-1];
+ for(unsigned int i=0; i<boundery[n].size(); i++)
+ {
+ Point p1 = boundery[n][i];
+
+ if ((p0.Y >= p.Y && p1.Y < p.Y) || (p1.Y > p.Y && p0.Y <= p.Y))
+ {
+ int64_t x = p0.X + (p1.X - p0.X) * (p.Y - p0.Y) / (p1.Y - p0.Y);
+ if (x >= p.X)
+ crossings ++;
+ }
+ p0 = p1;
+ }
+ }
+ if ((crossings % 2) == 0)
+ return false;
+ return true;
+}
+
+bool Comb::moveInside(Point* p, int distance)
+{
+ Point ret = *p;
+ int64_t bestDist = 2000LL * 2000LL;
+ for(unsigned int n=0; n<boundery.size(); n++)
+ {
+ if (boundery[n].size() < 1)
+ continue;
+ Point p0 = boundery[n][boundery[n].size()-1];
+ for(unsigned int i=0; i<boundery[n].size(); i++)
+ {
+ Point p1 = boundery[n][i];
+
+ //Q = A + Normal( B - A ) * ((( B - A ) dot ( P - A )) / VSize( A - B ));
+ Point pDiff = p1 - p0;
+ int64_t lineLength = vSize(pDiff);
+ int64_t distOnLine = dot(pDiff, *p - p0) / lineLength;
+ if (distOnLine < 10)
+ distOnLine = 10;
+ if (distOnLine > lineLength - 10)
+ distOnLine = lineLength - 10;
+ Point q = p0 + pDiff * distOnLine / lineLength;
+
+ int64_t dist = vSize2(q - *p);
+ if (dist < bestDist)
+ {
+ bestDist = dist;
+ ret = q + crossZ(normal(p1 - p0, distance));
+ }
+
+ p0 = p1;
+ }
+ }
+ if (bestDist < 2000LL * 2000LL)
+ {
+ *p = ret;
+ return true;
+ }
+ return false;
+}
+
+bool Comb::calc(Point startPoint, Point endPoint, vector<Point>& combPoints)
+{
+ if (shorterThen(endPoint - startPoint, 1500))
+ return true;
+
+ bool addEndpoint = false;
+ //Check if we are inside the comb boundaries
+ if (!checkInside(startPoint))
+ {
+ if (!moveInside(&startPoint)) //If we fail to move the point inside the comb boundary we need to retract.
+ return false;
+ combPoints.push_back(startPoint);
+ }
+ if (!checkInside(endPoint))
+ {
+ if (!moveInside(&endPoint)) //If we fail to move the point inside the comb boundary we need to retract.
+ return false;
+ addEndpoint = true;
+ }
+
+ //Check if we are crossing any bounderies, and pre-calculate some values.
+ if (!preTest(startPoint, endPoint))
+ {
+ //We're not crossing any boundaries. So skip the comb generation.
+ if (!addEndpoint && combPoints.size() == 0) //Only skip if we didn't move the start and end point.
+ return true;
+ }
+
+ //Calculate the minimum and maximum positions where we cross the comb boundary
+ calcMinMax();
+
+ int64_t x = sp.X;
+ vector<Point> pointList;
+ //Now walk trough the crossings, for every boundary we cross, find the initial cross point and the exit point. Then add all the points in between
+ // to the pointList and continue with the next boundary we will cross, until there are no more boundaries to cross.
+ // This gives a path from the start to finish curved around the holes that it encounters.
+ while(true)
+ {
+ unsigned int n = getPolygonAbove(x);
+ if (n == UINT_MAX) break;
+
+ pointList.push_back(matrix.unapply(Point(minX[n] - 200, sp.Y)));
+ if ( (minIdx[n] - maxIdx[n] + boundery[n].size()) % boundery[n].size() > (maxIdx[n] - minIdx[n] + boundery[n].size()) % boundery[n].size())
+ {
+ for(unsigned int i=minIdx[n]; i != maxIdx[n]; i = (i < boundery[n].size() - 1) ? (i + 1) : (0))
+ {
+ pointList.push_back(getBounderyPointWithOffset(n, i));
+ }
+ }else{
+ minIdx[n]--;
+ if (minIdx[n] == UINT_MAX) minIdx[n] = boundery[n].size() - 1;
+ maxIdx[n]--;
+ if (maxIdx[n] == UINT_MAX) maxIdx[n] = boundery[n].size() - 1;
+ for(unsigned int i=minIdx[n]; i != maxIdx[n]; i = (i > 0) ? (i - 1) : (boundery[n].size() - 1))
+ {
+ pointList.push_back(getBounderyPointWithOffset(n, i));
+ }
+ }
+ pointList.push_back(matrix.unapply(Point(maxX[n] + 200, sp.Y)));
+
+ x = maxX[n];
+ }
+ pointList.push_back(endPoint);
+
+ //Optimize the pointList, skip each point we could already reach by not crossing a boundary. This smooths out the path and makes it skip any unneeded corners.
+ Point p0 = startPoint;
+ for(unsigned int n=1; n<pointList.size(); n++)
+ {
+ if (collisionTest(p0, pointList[n]))
+ {
+ if (collisionTest(p0, pointList[n-1]))
+ return false;
+ p0 = pointList[n-1];
+ combPoints.push_back(p0);
+ }
+ }
+ if (addEndpoint)
+ combPoints.push_back(endPoint);
+ return true;
+}
diff --git a/comb.h b/comb.h
new file mode 100644
index 0000000..e4b5e2e
--- /dev/null
+++ b/comb.h
@@ -0,0 +1,41 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef COMB_H
+#define COMB_H
+
+#include "utils/polygon.h"
+
+class Comb
+{
+private:
+ Polygons& boundery;
+
+ int64_t* minX;
+ int64_t* maxX;
+ unsigned int* minIdx;
+ unsigned int* maxIdx;
+
+ PointMatrix matrix;
+ Point sp;
+ Point ep;
+
+ bool preTest(Point startPoint, Point endPoint);
+ bool collisionTest(Point startPoint, Point endPoint);
+
+ void calcMinMax();
+
+ unsigned int getPolygonAbove(int64_t x);
+
+ Point getBounderyPointWithOffset(unsigned int polygonNr, unsigned int idx);
+
+public:
+ Comb(Polygons& _boundery);
+ ~Comb();
+
+ bool checkInside(Point p);
+
+ bool moveInside(Point* p, int distance = 100);
+
+ bool calc(Point startPoint, Point endPoint, vector<Point>& combPoints);
+};
+
+#endif//COMB_H
diff --git a/fffProcessor.h b/fffProcessor.h
new file mode 100644
index 0000000..b7a94c1
--- /dev/null
+++ b/fffProcessor.h
@@ -0,0 +1,525 @@
+#ifndef FFF_PROCESSOR_H
+#define FFF_PROCESSOR_H
+
+//FusedFilamentFabrication processor.
+class fffProcessor
+{
+private:
+ int maxObjectHeight;
+ int fileNr;
+ GCodeExport gcode;
+ ConfigSettings& config;
+ TimeKeeper timeKeeper;
+
+ GCodePathConfig skirtConfig;
+ GCodePathConfig inset0Config;
+ GCodePathConfig inset1Config;
+ GCodePathConfig fillConfig;
+ GCodePathConfig supportConfig;
+public:
+ fffProcessor(ConfigSettings& config)
+ : config(config)
+ {
+ fileNr = 1;
+ maxObjectHeight = 0;
+ }
+
+ bool setTargetFile(const char* filename)
+ {
+ gcode.setFilename(filename);
+ if (gcode.isValid())
+ gcode.addComment("Generated with Cura_SteamEngine %s", VERSION);
+ return gcode.isValid();
+ }
+
+ bool processFile(const char* input_filename)
+ {
+ if (!gcode.isValid())
+ return false;
+
+ TimeKeeper timeKeeperTotal;
+ SliceDataStorage storage;
+ preSetup();
+ if (!prepareModel(storage, input_filename))
+ return false;
+
+ processSliceData(storage);
+ writeGCode(storage);
+
+ logProgress("process", 1, 1);
+ log("Total time elapsed %5.2fs.\n", timeKeeperTotal.restart());
+
+ return true;
+ }
+
+ void finalize()
+ {
+ if (!gcode.isValid())
+ return;
+
+ gcode.addFanCommand(0);
+ gcode.addRetraction();
+ gcode.setZ(maxObjectHeight + 5000);
+ gcode.addMove(gcode.getPositionXY(), config.moveSpeed, 0);
+ gcode.addCode(config.endCode);
+ log("Print time: %d\n", int(gcode.getTotalPrintTime()));
+ log("Filament: %d\n", int(gcode.getTotalFilamentUsed(0)));
+ log("Filament2: %d\n", int(gcode.getTotalFilamentUsed(1)));
+
+ if (gcode.getFlavor() == GCODE_FLAVOR_ULTIGCODE)
+ {
+ char numberString[16];
+ sprintf(numberString, "%d", int(gcode.getTotalPrintTime()));
+ gcode.replaceTagInStart("<__TIME__>", numberString);
+ sprintf(numberString, "%d", int(gcode.getTotalFilamentUsed(0)));
+ gcode.replaceTagInStart("<FILAMENT>", numberString);
+ sprintf(numberString, "%d", int(gcode.getTotalFilamentUsed(1)));
+ gcode.replaceTagInStart("<FILAMEN2>", numberString);
+ }
+ }
+
+private:
+ void preSetup()
+ {
+ skirtConfig.setData(config.printSpeed, config.extrusionWidth, "SKIRT");
+ inset0Config.setData(config.printSpeed, config.extrusionWidth, "WALL-OUTER");
+ inset1Config.setData(config.printSpeed, config.extrusionWidth, "WALL-INNER");
+ fillConfig.setData(config.infillSpeed, config.extrusionWidth, "FILL");
+ supportConfig.setData(config.printSpeed, config.extrusionWidth, "SUPPORT");
+
+ for(unsigned int n=1; n<MAX_EXTRUDERS;n++)
+ gcode.setExtruderOffset(n, config.extruderOffset[n].p());
+ gcode.setFlavor(config.gcodeFlavor);
+ gcode.setRetractionSettings(config.retractionAmount, config.retractionSpeed, config.retractionAmountExtruderSwitch, config.minimalExtrusionBeforeRetraction);
+ }
+
+ bool prepareModel(SliceDataStorage& storage, const char* input_filename)
+ {
+ timeKeeper.restart();
+ log("Loading %s from disk...\n", input_filename);
+ SimpleModel* m = loadModel(input_filename, config.matrix);
+ if (!m)
+ {
+ log("Failed to load model: %s\n", input_filename);
+ return false;
+ }
+ log("Loaded from disk in %5.3fs\n", timeKeeper.restart());
+ log("Analyzing and optimizing model...\n");
+ OptimizedModel* om = new OptimizedModel(m, Point3(config.objectPosition.X, config.objectPosition.Y, -config.objectSink));
+ for(unsigned int v = 0; v < m->volumes.size(); v++)
+ {
+ log(" Face counts: %i -> %i %0.1f%%\n", (int)m->volumes[v].faces.size(), (int)om->volumes[v].faces.size(), float(om->volumes[v].faces.size()) / float(m->volumes[v].faces.size()) * 100);
+ log(" Vertex counts: %i -> %i %0.1f%%\n", (int)m->volumes[v].faces.size() * 3, (int)om->volumes[v].points.size(), float(om->volumes[v].points.size()) / float(m->volumes[v].faces.size() * 3) * 100);
+ }
+ delete m;
+ log("Optimize model %5.3fs \n", timeKeeper.restart());
+ //om->saveDebugSTL("c:\\models\\output.stl");
+
+ log("Slicing model...\n");
+ vector<Slicer*> slicerList;
+ for(unsigned int volumeIdx=0; volumeIdx < om->volumes.size(); volumeIdx++)
+ {
+ slicerList.push_back(new Slicer(&om->volumes[volumeIdx], config.initialLayerThickness / 2, config.layerThickness, config.fixHorrible & FIX_HORRIBLE_KEEP_NONE_CLOSED, config.fixHorrible & FIX_HORRIBLE_EXTENSIVE_STITCHING));
+ //slicerList[volumeIdx]->dumpSegmentsToHTML("C:\\models\\output.html");
+ }
+ log("Sliced model in %5.3fs\n", timeKeeper.restart());
+
+ fprintf(stdout,"Generating support map...\n");
+ generateSupportGrid(storage.support, om, config.supportAngle, config.supportEverywhere > 0, config.supportXYDistance, config.supportZDistance);
+
+ storage.modelSize = om->modelSize;
+ storage.modelMin = om->vMin;
+ storage.modelMax = om->vMax;
+ delete om;
+
+ log("Generating layer parts...\n");
+ for(unsigned int volumeIdx=0; volumeIdx < slicerList.size(); volumeIdx++)
+ {
+ storage.volumes.push_back(SliceVolumeStorage());
+ createLayerParts(storage.volumes[volumeIdx], slicerList[volumeIdx], config.fixHorrible & (FIX_HORRIBLE_UNION_ALL_TYPE_A | FIX_HORRIBLE_UNION_ALL_TYPE_B | FIX_HORRIBLE_UNION_ALL_TYPE_C));
+ delete slicerList[volumeIdx];
+ }
+ log("Generated layer parts in %5.3fs\n", timeKeeper.restart());
+ return true;
+ }
+
+ void processSliceData(SliceDataStorage& storage)
+ {
+ //carveMultipleVolumes(storage.volumes);
+ generateMultipleVolumesOverlap(storage.volumes, config.multiVolumeOverlap);
+ //dumpLayerparts(storage, "c:/models/output.html");
+
+ const unsigned int totalLayers = storage.volumes[0].layers.size();
+ for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++)
+ {
+ for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++)
+ {
+ int insetCount = config.insetCount;
+ if (config.spiralizeMode && int(layerNr) < config.downSkinCount && layerNr % 2 == 1)//Add extra insets every 2 layers when spiralizing, this makes bottoms of cups watertight.
+ insetCount += 5;
+ generateInsets(&storage.volumes[volumeIdx].layers[layerNr], config.extrusionWidth, insetCount);
+ }
+ logProgress("inset",layerNr+1,totalLayers);
+ }
+ if (config.enableOozeShield)
+ {
+ for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++)
+ {
+ Polygons oozeShield;
+ for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++)
+ {
+ for(unsigned int partNr=0; partNr<storage.volumes[volumeIdx].layers[layerNr].parts.size(); partNr++)
+ {
+ oozeShield = oozeShield.unionPolygons(storage.volumes[volumeIdx].layers[layerNr].parts[partNr].outline.offset(2000));
+ }
+ }
+ storage.oozeShield.push_back(oozeShield);
+ }
+
+ for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++)
+ storage.oozeShield[layerNr] = storage.oozeShield[layerNr].offset(-1000).offset(1000);
+ int offsetAngle = tan(60.0*M_PI/180) * config.layerThickness;//Allow for a 60deg angle in the oozeShield.
+ for(unsigned int layerNr=1; layerNr<totalLayers; layerNr++)
+ storage.oozeShield[layerNr] = storage.oozeShield[layerNr].unionPolygons(storage.oozeShield[layerNr-1].offset(-offsetAngle));
+ for(unsigned int layerNr=totalLayers-1; layerNr>0; layerNr--)
+ storage.oozeShield[layerNr-1] = storage.oozeShield[layerNr-1].unionPolygons(storage.oozeShield[layerNr].offset(-offsetAngle));
+ }
+ log("Generated inset in %5.3fs\n", timeKeeper.restart());
+
+ for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++)
+ {
+ if (!config.spiralizeMode || int(layerNr) < config.downSkinCount) //Only generate up/downskin and infill for the first X layers when spiralize is choosen.
+ {
+ for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++)
+ {
+ generateSkins(layerNr, storage.volumes[volumeIdx], config.extrusionWidth, config.downSkinCount, config.upSkinCount, config.infillOverlap);
+ generateSparse(layerNr, storage.volumes[volumeIdx], config.extrusionWidth, config.downSkinCount, config.upSkinCount);
+ }
+ }
+ logProgress("skin",layerNr+1,totalLayers);
+ }
+ log("Generated up/down skin in %5.3fs\n", timeKeeper.restart());
+
+ if (config.wipeTowerSize > 0)
+ {
+ PolygonRef p = storage.wipeTower.newPoly();
+ p.add(Point(storage.modelMin.x - 3000, storage.modelMax.y + 3000));
+ p.add(Point(storage.modelMin.x - 3000, storage.modelMax.y + 3000 + config.wipeTowerSize));
+ p.add(Point(storage.modelMin.x - 3000 - config.wipeTowerSize, storage.modelMax.y + 3000 + config.wipeTowerSize));
+ p.add(Point(storage.modelMin.x - 3000 - config.wipeTowerSize, storage.modelMax.y + 3000));
+
+ storage.wipePoint = Point(storage.modelMin.x - 3000 - config.wipeTowerSize / 2, storage.modelMax.y + 3000 + config.wipeTowerSize / 2);
+ }
+
+ generateSkirt(storage, config.skirtDistance, config.extrusionWidth, config.skirtLineCount, config.skirtMinLength, config.initialLayerThickness);
+ generateRaft(storage, config.raftMargin);
+
+ for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++)
+ {
+ for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++)
+ {
+ for(unsigned int partNr=0; partNr<storage.volumes[volumeIdx].layers[layerNr].parts.size(); partNr++)
+ {
+ if (layerNr > 0)
+ storage.volumes[volumeIdx].layers[layerNr].parts[partNr].bridgeAngle = bridgeAngle(&storage.volumes[volumeIdx].layers[layerNr].parts[partNr], &storage.volumes[volumeIdx].layers[layerNr-1]);
+ else
+ storage.volumes[volumeIdx].layers[layerNr].parts[partNr].bridgeAngle = -1;
+ }
+ }
+ }
+ }
+
+ void writeGCode(SliceDataStorage& storage)
+ {
+ if (fileNr == 1)
+ {
+ if (gcode.getFlavor() == GCODE_FLAVOR_ULTIGCODE)
+ {
+ gcode.addCode(";FLAVOR:UltiGCode");
+ gcode.addCode(";TIME:<__TIME__>");
+ gcode.addCode(";MATERIAL:<FILAMENT>");
+ gcode.addCode(";MATERIAL2:<FILAMEN2>");
+ }
+ gcode.addCode(config.startCode);
+ }else{
+ gcode.addFanCommand(0);
+ gcode.resetExtrusionValue();
+ gcode.addRetraction();
+ gcode.setZ(maxObjectHeight + 5000);
+ gcode.addMove(Point(storage.modelMin.x, storage.modelMin.y), config.moveSpeed, 0);
+ }
+ fileNr++;
+
+ unsigned int totalLayers = storage.volumes[0].layers.size();
+ gcode.addComment("Layer count: %d", totalLayers);
+
+ if (config.raftBaseThickness > 0 && config.raftInterfaceThickness > 0)
+ {
+ GCodePathConfig raftBaseConfig(config.initialLayerSpeed, config.raftBaseLinewidth, "SUPPORT");
+ GCodePathConfig raftInterfaceConfig(config.initialLayerSpeed, config.raftInterfaceLinewidth, "SUPPORT");
+ {
+ gcode.addComment("LAYER:-2");
+ gcode.addComment("RAFT");
+ GCodePlanner gcodeLayer(gcode, config.moveSpeed, config.retractionMinimalDistance);
+ gcode.setZ(config.raftBaseThickness);
+ gcode.setExtrusion(config.raftBaseThickness, config.filamentDiameter, config.filamentFlow);
+ gcodeLayer.addPolygonsByOptimizer(storage.raftOutline, &raftBaseConfig);
+
+ Polygons raftLines;
+ generateLineInfill(storage.raftOutline, raftLines, config.raftBaseLinewidth, config.raftLineSpacing, config.infillOverlap, 0);
+ gcodeLayer.addPolygonsByOptimizer(raftLines, &raftBaseConfig);
+
+ gcodeLayer.writeGCode(false, config.raftBaseThickness);
+ }
+
+ {
+ gcode.addComment("LAYER:-1");
+ gcode.addComment("RAFT");
+ GCodePlanner gcodeLayer(gcode, config.moveSpeed, config.retractionMinimalDistance);
+ gcode.setZ(config.raftBaseThickness + config.raftInterfaceThickness);
+ gcode.setExtrusion(config.raftInterfaceThickness, config.filamentDiameter, config.filamentFlow);
+
+ Polygons raftLines;
+ generateLineInfill(storage.raftOutline, raftLines, config.raftInterfaceLinewidth, config.raftLineSpacing, config.infillOverlap, 90);
+ gcodeLayer.addPolygonsByOptimizer(raftLines, &raftInterfaceConfig);
+
+ gcodeLayer.writeGCode(false, config.raftInterfaceThickness);
+ }
+ }
+
+ int volumeIdx = 0;
+ for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++)
+ {
+ logProgress("export", layerNr+1, totalLayers);
+
+ gcode.addComment("LAYER:%d", layerNr);
+ if (layerNr == 0)
+ gcode.setExtrusion(config.initialLayerThickness, config.filamentDiameter, config.filamentFlow);
+ else
+ gcode.setExtrusion(config.layerThickness, config.filamentDiameter, config.filamentFlow);
+
+ GCodePlanner gcodeLayer(gcode, config.moveSpeed, config.retractionMinimalDistance);
+ int32_t z = config.initialLayerThickness + layerNr * config.layerThickness;
+ z += config.raftBaseThickness + config.raftInterfaceThickness;
+ gcode.setZ(z);
+
+ bool printSupportFirst = (storage.support.generated && config.supportExtruder > 0 && config.supportExtruder == gcodeLayer.getExtruder());
+ if (printSupportFirst)
+ addSupportToGCode(storage, gcodeLayer, layerNr);
+
+ for(unsigned int volumeCnt = 0; volumeCnt < storage.volumes.size(); volumeCnt++)
+ {
+ if (volumeCnt > 0)
+ volumeIdx = (volumeIdx + 1) % storage.volumes.size();
+ addVolumeLayerToGCode(storage, gcodeLayer, volumeIdx, layerNr);
+ }
+ if (!printSupportFirst)
+ addSupportToGCode(storage, gcodeLayer, layerNr);
+
+ //Finish the layer by applying speed corrections for minimal layer times and slowdown for the initial layer.
+ if (int(layerNr) < config.initialSpeedupLayers)
+ {
+ int n = config.initialSpeedupLayers;
+ int layer0Factor = config.initialLayerSpeed * 100 / config.printSpeed;
+ gcodeLayer.setExtrudeSpeedFactor((layer0Factor * (n - layerNr) + 100 * (layerNr)) / n);
+ if (layerNr == 0)//On the first layer, also slow down the travel
+ gcodeLayer.setTravelSpeedFactor(layer0Factor);
+ }
+ gcodeLayer.forceMinimalLayerTime(config.minimalLayerTime, config.minimalFeedrate);
+
+ int fanSpeed = config.fanSpeedMin;
+ if (gcodeLayer.getExtrudeSpeedFactor() <= 50)
+ {
+ fanSpeed = config.fanSpeedMax;
+ }else{
+ int n = gcodeLayer.getExtrudeSpeedFactor() - 50;
+ fanSpeed = config.fanSpeedMin * n / 50 + config.fanSpeedMax * (50 - n) / 50;
+ }
+ if (int(layerNr) < config.fanFullOnLayerNr)
+ {
+ //Slow down the fan on the layers below the [fanFullOnLayerNr], where layer 0 is speed 0.
+ fanSpeed = fanSpeed * layerNr / config.fanFullOnLayerNr;
+ }
+ gcode.addFanCommand(fanSpeed);
+
+ gcodeLayer.writeGCode(config.coolHeadLift > 0, int(layerNr) > 0 ? config.layerThickness : config.initialLayerThickness);
+ }
+
+ /* support debug
+ for(int32_t y=0; y<storage.support.gridHeight; y++)
+ {
+ for(int32_t x=0; x<storage.support.gridWidth; x++)
+ {
+ unsigned int n = x+y*storage.support.gridWidth;
+ if (storage.support.grid[n].size() < 1) continue;
+ int32_t z = storage.support.grid[n][0].z;
+ gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, 0), 0);
+ gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, z), z);
+ gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, 0), 0);
+ }
+ }
+ //*/
+
+ log("Wrote layers in %5.2fs.\n", timeKeeper.restart());
+ gcode.tellFileSize();
+ gcode.addFanCommand(0);
+
+ //Store the object height for when we are printing multiple objects, as we need to clear every one of them when moving to the next position.
+ maxObjectHeight = std::max(maxObjectHeight, storage.modelSize.z);
+ }
+
+ //Add a single layer from a single mesh-volume to the GCode
+ void addVolumeLayerToGCode(SliceDataStorage& storage, GCodePlanner& gcodeLayer, int volumeIdx, int layerNr)
+ {
+ int prevExtruder = gcodeLayer.getExtruder();
+ bool extruderChanged = gcodeLayer.setExtruder(volumeIdx);
+ if (layerNr == 0 && volumeIdx == 0)
+ gcodeLayer.addPolygonsByOptimizer(storage.skirt, &skirtConfig);
+
+ SliceLayer* layer = &storage.volumes[volumeIdx].layers[layerNr];
+ if (extruderChanged)
+ addWipeTower(storage, gcodeLayer, layerNr, prevExtruder);
+
+ if (storage.oozeShield.size() > 0 && storage.volumes.size() > 1)
+ {
+ gcodeLayer.setAlwaysRetract(true);
+ gcodeLayer.addPolygonsByOptimizer(storage.oozeShield[layerNr], &skirtConfig);
+ gcodeLayer.setAlwaysRetract(!config.enableCombing);
+ }
+
+ PathOrderOptimizer partOrderOptimizer(gcode.getPositionXY());
+ for(unsigned int partNr=0; partNr<layer->parts.size(); partNr++)
+ {
+ partOrderOptimizer.addPolygon(layer->parts[partNr].insets[0][0]);
+ }
+ partOrderOptimizer.optimize();
+
+ for(unsigned int partCounter=0; partCounter<partOrderOptimizer.polyOrder.size(); partCounter++)
+ {
+ SliceLayerPart* part = &layer->parts[partOrderOptimizer.polyOrder[partCounter]];
+
+ if (config.enableCombing)
+ gcodeLayer.setCombBoundary(&part->combBoundery);
+ else
+ gcodeLayer.setAlwaysRetract(true);
+
+ if (config.insetCount > 0)
+ {
+ if (config.spiralizeMode)
+ {
+ if (int(layerNr) >= config.downSkinCount)
+ inset0Config.spiralize = true;
+ if (int(layerNr) == config.downSkinCount && part->insets.size() > 0)
+ gcodeLayer.addPolygonsByOptimizer(part->insets[0], &inset1Config);
+ }
+ for(int insetNr=part->insets.size()-1; insetNr>-1; insetNr--)
+ {
+ if (insetNr == 0)
+ gcodeLayer.addPolygonsByOptimizer(part->insets[insetNr], &inset0Config);
+ else
+ gcodeLayer.addPolygonsByOptimizer(part->insets[insetNr], &inset1Config);
+ }
+ }
+
+ Polygons fillPolygons;
+ int fillAngle = 45;
+ if (layerNr & 1)
+ fillAngle += 90;
+ //int sparseSteps[1] = {config.extrusionWidth};
+ //generateConcentricInfill(part->skinOutline, fillPolygons, sparseSteps, 1);
+ generateLineInfill(part->skinOutline, fillPolygons, config.extrusionWidth, config.extrusionWidth, config.infillOverlap, (part->bridgeAngle > -1) ? part->bridgeAngle : fillAngle);
+ //int sparseSteps[2] = {config.extrusionWidth*5, config.extrusionWidth * 0.8};
+ //generateConcentricInfill(part->sparseOutline, fillPolygons, sparseSteps, 2);
+ if (config.sparseInfillLineDistance > 0)
+ {
+ if (config.sparseInfillLineDistance > config.extrusionWidth * 4)
+ {
+ generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance * 2, config.infillOverlap, 45);
+ generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance * 2, config.infillOverlap, 45 + 90);
+ }
+ else
+ {
+ generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance, config.infillOverlap, fillAngle);
+ }
+ }
+
+ gcodeLayer.addPolygonsByOptimizer(fillPolygons, &fillConfig);
+
+ //After a layer part, make sure the nozzle is inside the comb boundary, so we do not retract on the perimeter.
+ if (!config.spiralizeMode || int(layerNr) < config.downSkinCount)
+ gcodeLayer.moveInsideCombBoundary(config.extrusionWidth * 2);
+ }
+ gcodeLayer.setCombBoundary(NULL);
+ }
+
+ void addSupportToGCode(SliceDataStorage& storage, GCodePlanner& gcodeLayer, int layerNr)
+ {
+ if (!storage.support.generated)
+ return;
+
+ if (config.supportExtruder > -1)
+ {
+ int prevExtruder = gcodeLayer.getExtruder();
+ if (gcodeLayer.setExtruder(config.supportExtruder))
+ addWipeTower(storage, gcodeLayer, layerNr, prevExtruder);
+
+ if (storage.oozeShield.size() > 0 && storage.volumes.size() == 1)
+ {
+ gcodeLayer.setAlwaysRetract(true);
+ gcodeLayer.addPolygonsByOptimizer(storage.oozeShield[layerNr], &skirtConfig);
+ gcodeLayer.setAlwaysRetract(!config.enableCombing);
+ }
+ }
+ int32_t z = config.initialLayerThickness + layerNr * config.layerThickness;
+ SupportPolyGenerator supportGenerator(storage.support, z);
+ for(unsigned int volumeCnt = 0; volumeCnt < storage.volumes.size(); volumeCnt++)
+ {
+ SliceLayer* layer = &storage.volumes[volumeCnt].layers[layerNr];
+ for(unsigned int n=0; n<layer->parts.size(); n++)
+ supportGenerator.polygons = supportGenerator.polygons.difference(layer->parts[n].outline.offset(config.supportXYDistance));
+ }
+ //Contract and expand the suppory polygons so small sections are removed and the final polygon is smoothed a bit.
+ supportGenerator.polygons = supportGenerator.polygons.offset(-config.extrusionWidth * 3);
+ supportGenerator.polygons = supportGenerator.polygons.offset(config.extrusionWidth * 3);
+
+ vector<Polygons> supportIslands = supportGenerator.polygons.splitIntoParts();
+
+ for(unsigned int n=0; n<supportIslands.size(); n++)
+ {
+ Polygons supportLines;
+ if (config.supportLineDistance > 0)
+ {
+ if (config.supportLineDistance > config.extrusionWidth * 4)
+ {
+ generateLineInfill(supportIslands[n], supportLines, config.extrusionWidth, config.supportLineDistance*2, config.infillOverlap, 0);
+ generateLineInfill(supportIslands[n], supportLines, config.extrusionWidth, config.supportLineDistance*2, config.infillOverlap, 90);
+ }else{
+ generateLineInfill(supportIslands[n], supportLines, config.extrusionWidth, config.supportLineDistance, config.infillOverlap, (layerNr & 1) ? 0 : 90);
+ }
+ }
+
+ gcodeLayer.forceRetract();
+ if (config.enableCombing)
+ gcodeLayer.setCombBoundary(&supportIslands[n]);
+ gcodeLayer.addPolygonsByOptimizer(supportIslands[n], &supportConfig);
+ gcodeLayer.addPolygonsByOptimizer(supportLines, &supportConfig);
+ gcodeLayer.setCombBoundary(NULL);
+ }
+ }
+
+ void addWipeTower(SliceDataStorage& storage, GCodePlanner& gcodeLayer, int layerNr, int prevExtruder)
+ {
+ if (config.wipeTowerSize < 1)
+ return;
+ //If we changed extruder, print the wipe/prime tower for this nozzle;
+ gcodeLayer.addPolygonsByOptimizer(storage.wipeTower, &supportConfig);
+ Polygons fillPolygons;
+ generateLineInfill(storage.wipeTower, fillPolygons, config.extrusionWidth, config.extrusionWidth, config.infillOverlap, 45 + 90 * (layerNr % 2));
+ gcodeLayer.addPolygonsByOptimizer(fillPolygons, &supportConfig);
+
+ //Make sure we wipe the old extruder on the wipe tower.
+ gcodeLayer.addTravel(storage.wipePoint - config.extruderOffset[prevExtruder].p() + config.extruderOffset[gcodeLayer.getExtruder()].p());
+ }
+};
+
+#endif//FFF_PROCESSOR_H
diff --git a/gcodeExport.cpp b/gcodeExport.cpp
new file mode 100644
index 0000000..452c67a
--- /dev/null
+++ b/gcodeExport.cpp
@@ -0,0 +1,578 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <stdarg.h>
+
+#include "gcodeExport.h"
+#include "pathOrderOptimizer.h"
+#include "timeEstimate.h"
+#include "settings.h"
+
+#if defined(__APPLE__) && defined(__MACH__)
+//On MacOS the file offset functions are always 64bit.
+#define off64_t off_t
+#define ftello64 ftello
+#define fseeko64 fseeko
+#endif
+
+GCodeExport::GCodeExport()
+: currentPosition(0,0,0)
+{
+ extrusionAmount = 0;
+ extrusionPerMM = 0;
+ retractionAmount = 4.5;
+ minimalExtrusionBeforeRetraction = 0.0;
+ extrusionAmountAtPreviousRetraction = -10000;
+ extruderSwitchRetraction = 14.5;
+ extruderNr = 0;
+ currentFanSpeed = -1;
+
+ totalPrintTime = 0.0;
+ for(unsigned int e=0; e<MAX_EXTRUDERS; e++)
+ totalFilament[e] = 0.0;
+
+ currentSpeed = 0;
+ retractionSpeed = 45;
+ isRetracted = true;
+ memset(extruderOffset, 0, sizeof(extruderOffset));
+ f = stdout;
+}
+
+GCodeExport::~GCodeExport()
+{
+ if (f)
+ fclose(f);
+}
+
+void GCodeExport::replaceTagInStart(const char* tag, const char* replaceValue)
+{
+ off64_t oldPos = ftello64(f);
+
+ char buffer[1024];
+ fseeko64(f, 0, SEEK_SET);
+ fread(buffer, 1024, 1, f);
+
+ char* c = strstr(buffer, tag);
+ memset(c, ' ', strlen(tag));
+ if (c) memcpy(c, replaceValue, strlen(replaceValue));
+
+ fseeko64(f, 0, SEEK_SET);
+ fwrite(buffer, 1024, 1, f);
+
+ fseeko64(f, oldPos, SEEK_SET);
+}
+
+void GCodeExport::setExtruderOffset(int id, Point p)
+{
+ extruderOffset[id] = p;
+}
+
+void GCodeExport::setFlavor(int flavor)
+{
+ this->flavor = flavor;
+}
+int GCodeExport::getFlavor()
+{
+ return this->flavor;
+}
+
+void GCodeExport::setFilename(const char* filename)
+{
+ f = fopen(filename, "w+");
+}
+
+bool GCodeExport::isValid()
+{
+ return f != NULL;
+}
+
+void GCodeExport::setExtrusion(int layerThickness, int filamentDiameter, int flow)
+{
+ double filamentArea = M_PI * (double(filamentDiameter) / 1000.0 / 2.0) * (double(filamentDiameter) / 1000.0 / 2.0);
+ if (flavor == GCODE_FLAVOR_ULTIGCODE)//UltiGCode uses volume extrusion as E value, and thus does not need the filamentArea in the mix.
+ extrusionPerMM = double(layerThickness) / 1000.0;
+ else
+ extrusionPerMM = double(layerThickness) / 1000.0 / filamentArea * double(flow) / 100.0;
+}
+
+void GCodeExport::setRetractionSettings(int retractionAmount, int retractionSpeed, int extruderSwitchRetraction, int minimalExtrusionBeforeRetraction)
+{
+ this->retractionAmount = double(retractionAmount) / 1000.0;
+ this->retractionSpeed = retractionSpeed;
+ this->extruderSwitchRetraction = double(extruderSwitchRetraction) / 1000.0;
+ this->minimalExtrusionBeforeRetraction = double(minimalExtrusionBeforeRetraction) / 1000.0;
+}
+
+void GCodeExport::setZ(int z)
+{
+ this->zPos = z;
+}
+
+Point GCodeExport::getPositionXY()
+{
+ return Point(currentPosition.x, currentPosition.y);
+}
+
+int GCodeExport::getPositionZ()
+{
+ return currentPosition.z;
+}
+
+int GCodeExport::getExtruderNr()
+{
+ return extruderNr;
+}
+
+double GCodeExport::getTotalFilamentUsed(int e)
+{
+ if (e == extruderNr)
+ return totalFilament[e] + extrusionAmount;
+ return totalFilament[e];
+}
+
+double GCodeExport::getTotalPrintTime()
+{
+ return totalPrintTime;
+}
+
+void GCodeExport::updateTotalPrintTime()
+{
+ totalPrintTime += estimateCalculator.calculate();
+ estimateCalculator.reset();
+}
+
+void GCodeExport::addComment(const char* comment, ...)
+{
+ va_list args;
+ va_start(args, comment);
+ fprintf(f, ";");
+ vfprintf(f, comment, args);
+ fprintf(f, "\n");
+ va_end(args);
+}
+
+void GCodeExport::addLine(const char* line, ...)
+{
+ va_list args;
+ va_start(args, line);
+ vfprintf(f, line, args);
+ fprintf(f, "\n");
+ va_end(args);
+}
+
+void GCodeExport::resetExtrusionValue()
+{
+ if (extrusionAmount != 0.0)
+ {
+ fprintf(f, "G92 E0\n");
+ totalFilament[extruderNr] += extrusionAmount;
+ extrusionAmountAtPreviousRetraction -= extrusionAmount;
+ extrusionAmount = 0.0;
+ }
+}
+
+void GCodeExport::addDelay(double timeAmount)
+{
+ fprintf(f, "G4 P%d\n", int(timeAmount * 1000));
+ totalPrintTime += timeAmount;
+}
+
+void GCodeExport::addMove(Point p, int speed, int lineWidth)
+{
+ if (lineWidth != 0)
+ {
+ Point diff = p - getPositionXY();
+ if (isRetracted)
+ {
+ if (flavor == GCODE_FLAVOR_ULTIGCODE)
+ {
+ fprintf(f, "G11\n");
+ }else{
+ fprintf(f, "G1 F%i E%0.5lf\n", retractionSpeed * 60, extrusionAmount);
+ currentSpeed = retractionSpeed;
+ estimateCalculator.plan(TimeEstimateCalculator::Position(double(p.X) / 1000.0, (p.Y) / 1000.0, double(zPos) / 1000.0, extrusionAmount), currentSpeed);
+ }
+ if (extrusionAmount > 10000.0) //According to https://github.com/Ultimaker/CuraEngine/issues/14 having more then 21m of extrusion causes inaccuracies. So reset it every 10m, just to be sure.
+ resetExtrusionValue();
+ isRetracted = false;
+ }
+ extrusionAmount += extrusionPerMM * double(lineWidth) / 1000.0 * vSizeMM(diff);
+ fprintf(f, "G1");
+ }else{
+ fprintf(f, "G0");
+ }
+
+ if (currentSpeed != speed)
+ {
+ fprintf(f, " F%i", speed * 60);
+ currentSpeed = speed;
+ }
+ fprintf(f, " X%0.2f Y%0.2f", float(p.X - extruderOffset[extruderNr].X)/1000, float(p.Y - extruderOffset[extruderNr].Y)/1000);
+ if (zPos != currentPosition.z)
+ fprintf(f, " Z%0.2f", float(zPos)/1000);
+ if (lineWidth != 0)
+ fprintf(f, " E%0.5lf", extrusionAmount);
+ fprintf(f, "\n");
+
+ currentPosition = Point3(p.X, p.Y, zPos);
+ estimateCalculator.plan(TimeEstimateCalculator::Position(double(currentPosition.x) / 1000.0, (currentPosition.y) / 1000.0, double(currentPosition.z) / 1000.0, extrusionAmount), currentSpeed);
+}
+
+void GCodeExport::addRetraction()
+{
+ if (retractionAmount > 0 && !isRetracted && extrusionAmountAtPreviousRetraction + minimalExtrusionBeforeRetraction < extrusionAmount)
+ {
+ if (flavor == GCODE_FLAVOR_ULTIGCODE)
+ {
+ fprintf(f, "G10\n");
+ }else{
+ fprintf(f, "G1 F%i E%0.5lf\n", retractionSpeed * 60, extrusionAmount - retractionAmount);
+ estimateCalculator.plan(TimeEstimateCalculator::Position(double(currentPosition.x) / 1000.0, (currentPosition.y) / 1000.0, double(currentPosition.z) / 1000.0, extrusionAmount - retractionAmount), currentSpeed);
+ currentSpeed = retractionSpeed;
+ }
+ extrusionAmountAtPreviousRetraction = extrusionAmount;
+ isRetracted = true;
+ }
+}
+
+void GCodeExport::switchExtruder(int newExtruder)
+{
+ if (extruderNr == newExtruder)
+ return;
+
+ resetExtrusionValue();
+ extruderNr = newExtruder;
+
+ if (flavor == GCODE_FLAVOR_ULTIGCODE)
+ {
+ fprintf(f, "G10 S1\n");
+ }else{
+ fprintf(f, "G1 F%i E%0.4lf\n", retractionSpeed * 60, extrusionAmount - extruderSwitchRetraction);
+ currentSpeed = retractionSpeed;
+ }
+ isRetracted = true;
+ fprintf(f, "T%i\n", extruderNr);
+}
+
+void GCodeExport::addCode(const char* str)
+{
+ fprintf(f, "%s\n", str);
+}
+
+void GCodeExport::addFanCommand(int speed)
+{
+ if (currentFanSpeed == speed)
+ return;
+ if (speed > 0)
+ {
+ if (flavor == GCODE_FLAVOR_MAKERBOT)
+ fprintf(f, "M126 T0 ; value = %d\n", speed * 255 / 100);
+ else
+ fprintf(f, "M106 S%d\n", speed * 255 / 100);
+ }
+ else
+ {
+ if (flavor == GCODE_FLAVOR_MAKERBOT)
+ fprintf(f, "M127 T0\n");
+ else
+ fprintf(f, "M107\n");
+ }
+ currentFanSpeed = speed;
+}
+
+int GCodeExport::getFileSize(){
+ return ftell(f);
+}
+void GCodeExport::tellFileSize() {
+ float fsize = (float) ftell(f);
+ if(fsize > 1024*1024) {
+ fsize /= 1024.0*1024.0;
+ fprintf(stdout, "Wrote %5.1f MB.\n",fsize);
+ }
+ if(fsize > 1024) {
+ fsize /= 1024.0;
+ fprintf(stdout, "Wrote %5.1f kilobytes.\n",fsize);
+ }
+}
+
+GCodePath* GCodePlanner::getLatestPathWithConfig(GCodePathConfig* config)
+{
+ if (paths.size() > 0 && paths[paths.size()-1].config == config && !paths[paths.size()-1].done)
+ return &paths[paths.size()-1];
+ paths.push_back(GCodePath());
+ GCodePath* ret = &paths[paths.size()-1];
+ ret->retract = false;
+ ret->config = config;
+ ret->extruder = currentExtruder;
+ ret->done = false;
+ return ret;
+}
+void GCodePlanner::forceNewPathStart()
+{
+ if (paths.size() > 0)
+ paths[paths.size()-1].done = true;
+}
+
+GCodePlanner::GCodePlanner(GCodeExport& gcode, int travelSpeed, int retractionMinimalDistance)
+: gcode(gcode), travelConfig(travelSpeed, 0, "travel")
+{
+ lastPosition = gcode.getPositionXY();
+ comb = NULL;
+ extrudeSpeedFactor = 100;
+ travelSpeedFactor = 100;
+ extraTime = 0.0;
+ totalPrintTime = 0.0;
+ forceRetraction = false;
+ alwaysRetract = false;
+ currentExtruder = gcode.getExtruderNr();
+ this->retractionMinimalDistance = retractionMinimalDistance;
+}
+GCodePlanner::~GCodePlanner()
+{
+ if (comb)
+ delete comb;
+}
+
+void GCodePlanner::addTravel(Point p)
+{
+ GCodePath* path = getLatestPathWithConfig(&travelConfig);
+ if (forceRetraction)
+ {
+ if (!shorterThen(lastPosition - p, retractionMinimalDistance))
+ {
+ path->retract = true;
+ }
+ forceRetraction = false;
+ }else if (comb != NULL)
+ {
+ vector<Point> pointList;
+ if (comb->calc(lastPosition, p, pointList))
+ {
+ for(unsigned int n=0; n<pointList.size(); n++)
+ {
+ path->points.push_back(pointList[n]);
+ }
+ }else{
+ if (!shorterThen(lastPosition - p, retractionMinimalDistance))
+ path->retract = true;
+ }
+ }else if (alwaysRetract)
+ {
+ if (!shorterThen(lastPosition - p, retractionMinimalDistance))
+ path->retract = true;
+ }
+ path->points.push_back(p);
+ lastPosition = p;
+}
+
+void GCodePlanner::addExtrusionMove(Point p, GCodePathConfig* config)
+{
+ getLatestPathWithConfig(config)->points.push_back(p);
+ lastPosition = p;
+}
+
+void GCodePlanner::moveInsideCombBoundary(int distance)
+{
+ if (!comb || comb->checkInside(lastPosition)) return;
+ Point p = lastPosition;
+ if (comb->moveInside(&p, distance))
+ {
+ //Move inside again, so we move out of tight 90deg corners
+ comb->moveInside(&p, distance);
+ if (comb->checkInside(p))
+ {
+ addTravel(p);
+ //Make sure the that any retraction happens after this move, not before it by starting a new move path.
+ forceNewPathStart();
+ }
+ }
+}
+
+void GCodePlanner::addPolygon(PolygonRef polygon, int startIdx, GCodePathConfig* config)
+{
+ Point p0 = polygon[startIdx];
+ addTravel(p0);
+ for(unsigned int i=1; i<polygon.size(); i++)
+ {
+ Point p1 = polygon[(startIdx + i) % polygon.size()];
+ addExtrusionMove(p1, config);
+ p0 = p1;
+ }
+ if (polygon.size() > 2)
+ addExtrusionMove(polygon[startIdx], config);
+}
+
+void GCodePlanner::addPolygonsByOptimizer(Polygons& polygons, GCodePathConfig* config)
+{
+ PathOrderOptimizer orderOptimizer(lastPosition);
+ for(unsigned int i=0;i<polygons.size();i++)
+ orderOptimizer.addPolygon(polygons[i]);
+ orderOptimizer.optimize();
+ for(unsigned int i=0;i<orderOptimizer.polyOrder.size();i++)
+ {
+ int nr = orderOptimizer.polyOrder[i];
+ addPolygon(polygons[nr], orderOptimizer.polyStart[nr], config);
+ }
+}
+
+void GCodePlanner::forceMinimalLayerTime(double minTime, int minimalSpeed)
+{
+ Point p0 = gcode.getPositionXY();
+ double travelTime = 0.0;
+ double extrudeTime = 0.0;
+ for(unsigned int n=0; n<paths.size(); n++)
+ {
+ GCodePath* path = &paths[n];
+ for(unsigned int i=0; i<path->points.size(); i++)
+ {
+ double thisTime = vSizeMM(p0 - path->points[i]) / double(path->config->speed);
+ if (path->config->lineWidth != 0)
+ extrudeTime += thisTime;
+ else
+ travelTime += thisTime;
+ p0 = path->points[i];
+ }
+ }
+ double totalTime = extrudeTime + travelTime;
+ if (totalTime < minTime && extrudeTime > 0.0)
+ {
+ double minExtrudeTime = minTime - travelTime;
+ if (minExtrudeTime < 1)
+ minExtrudeTime = 1;
+ double factor = extrudeTime / minExtrudeTime;
+ for(unsigned int n=0; n<paths.size(); n++)
+ {
+ GCodePath* path = &paths[n];
+ if (path->config->lineWidth == 0)
+ continue;
+ int speed = path->config->speed * factor;
+ if (speed < minimalSpeed)
+ factor = double(minimalSpeed) / double(path->config->speed);
+ }
+
+ //Only slow down with the minimal time if that will be slower then a factor already set. First layer slowdown also sets the speed factor.
+ if (factor * 100 < getExtrudeSpeedFactor())
+ setExtrudeSpeedFactor(factor * 100);
+ else
+ factor = getExtrudeSpeedFactor() / 100.0;
+
+ if (minTime - (extrudeTime / factor) - travelTime > 0.1)
+ {
+ //TODO: Use up this extra time (circle around the print?)
+ this->extraTime = minTime - (extrudeTime / factor) - travelTime;
+ }
+ this->totalPrintTime = (extrudeTime / factor) + travelTime;
+ }else{
+ this->totalPrintTime = totalTime;
+ }
+}
+
+void GCodePlanner::writeGCode(bool liftHeadIfNeeded, int layerThickness)
+{
+ GCodePathConfig* lastConfig = NULL;
+ int extruder = gcode.getExtruderNr();
+
+ for(unsigned int n=0; n<paths.size(); n++)
+ {
+ GCodePath* path = &paths[n];
+ if (extruder != path->extruder)
+ {
+ extruder = path->extruder;
+ gcode.switchExtruder(extruder);
+ }else if (path->retract)
+ {
+ gcode.addRetraction();
+ }
+ if (path->config != &travelConfig && lastConfig != path->config)
+ {
+ gcode.addComment("TYPE:%s", path->config->name);
+ lastConfig = path->config;
+ }
+ int speed = path->config->speed;
+
+ if (path->config->lineWidth != 0)// Only apply the extrudeSpeedFactor to extrusion moves
+ speed = speed * extrudeSpeedFactor / 100;
+ else
+ speed = speed * travelSpeedFactor / 100;
+
+ if (path->points.size() == 1 && path->config != &travelConfig && shorterThen(gcode.getPositionXY() - path->points[0], path->config->lineWidth * 2))
+ {
+ //Check for lots of small moves and combine them into one large line
+ Point p0 = path->points[0];
+ unsigned int i = n + 1;
+ while(i < paths.size() && paths[i].points.size() == 1 && shorterThen(p0 - paths[i].points[0], path->config->lineWidth * 2))
+ {
+ p0 = paths[i].points[0];
+ i ++;
+ }
+ if (paths[i-1].config == &travelConfig)
+ i --;
+ if (i > n + 2)
+ {
+ p0 = gcode.getPositionXY();
+ for(unsigned int x=n; x<i-1; x+=2)
+ {
+ int64_t oldLen = vSize(p0 - paths[x].points[0]);
+ Point newPoint = (paths[x].points[0] + paths[x+1].points[0]) / 2;
+ int64_t newLen = vSize(gcode.getPositionXY() - newPoint);
+ if (newLen > 0)
+ gcode.addMove(newPoint, speed, path->config->lineWidth * oldLen / newLen);
+
+ p0 = paths[x+1].points[0];
+ }
+ gcode.addMove(paths[i-1].points[0], speed, path->config->lineWidth);
+ n = i - 1;
+ continue;
+ }
+ }
+
+ bool spiralize = path->config->spiralize;
+ if (spiralize)
+ {
+ //Check if we are the last spiralize path in the list, if not, do not spiralize.
+ for(unsigned int m=n+1; m<paths.size(); m++)
+ {
+ if (paths[m].config->spiralize)
+ spiralize = false;
+ }
+ }
+ if (spiralize)
+ {
+ //If we need to spiralize then raise the head slowly by 1 layer as this path progresses.
+ float totalLength = 0.0;
+ int z = gcode.getPositionZ();
+ Point p0 = gcode.getPositionXY();
+ for(unsigned int i=0; i<path->points.size(); i++)
+ {
+ Point p1 = path->points[i];
+ totalLength += vSizeMM(p0 - p1);
+ p0 = p1;
+ }
+
+ float length = 0.0;
+ p0 = gcode.getPositionXY();
+ for(unsigned int i=0; i<path->points.size(); i++)
+ {
+ Point p1 = path->points[i];
+ length += vSizeMM(p0 - p1);
+ p0 = p1;
+ gcode.setZ(z + layerThickness * length / totalLength);
+ gcode.addMove(path->points[i], speed, path->config->lineWidth);
+ }
+ }else{
+ for(unsigned int i=0; i<path->points.size(); i++)
+ {
+ gcode.addMove(path->points[i], speed, path->config->lineWidth);
+ }
+ }
+ }
+
+ gcode.updateTotalPrintTime();
+ if (liftHeadIfNeeded && extraTime > 0.0)
+ {
+ gcode.addComment("Small layer, adding delay of %f", extraTime);
+ gcode.addRetraction();
+ gcode.setZ(gcode.getPositionZ() + 3000);
+ gcode.addMove(gcode.getPositionXY(), travelConfig.speed, 0);
+ gcode.addMove(gcode.getPositionXY() - Point(-20000, 0), travelConfig.speed, 0);
+ gcode.addDelay(extraTime);
+ }
+}
diff --git a/gcodeExport.h b/gcodeExport.h
new file mode 100644
index 0000000..cf1236e
--- /dev/null
+++ b/gcodeExport.h
@@ -0,0 +1,212 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef GCODEEXPORT_H
+#define GCODEEXPORT_H
+
+#include <stdio.h>
+
+#include "settings.h"
+#include "comb.h"
+#include "utils/intpoint.h"
+#include "utils/polygon.h"
+#include "timeEstimate.h"
+
+class GCodeExport
+{
+private:
+ FILE* f;
+ double extrusionAmount;
+ double extrusionPerMM;
+ double retractionAmount;
+ double extruderSwitchRetraction;
+ double minimalExtrusionBeforeRetraction;
+ double extrusionAmountAtPreviousRetraction;
+ Point3 currentPosition;
+ Point extruderOffset[MAX_EXTRUDERS];
+ int currentSpeed, retractionSpeed;
+ int zPos;
+ bool isRetracted;
+ int extruderNr;
+ int currentFanSpeed;
+ int flavor;
+
+ double totalFilament[MAX_EXTRUDERS];
+ double totalPrintTime;
+ TimeEstimateCalculator estimateCalculator;
+public:
+
+ GCodeExport();
+
+ ~GCodeExport();
+
+ void replaceTagInStart(const char* tag, const char* replaceValue);
+
+ void setExtruderOffset(int id, Point p);
+
+ void setFlavor(int flavor);
+ int getFlavor();
+
+ void setFilename(const char* filename);
+
+ bool isValid();
+
+ void setExtrusion(int layerThickness, int filamentDiameter, int flow);
+
+ void setRetractionSettings(int retractionAmount, int retractionSpeed, int extruderSwitchRetraction, int minimalExtrusionBeforeRetraction);
+
+ void setZ(int z);
+
+ Point getPositionXY();
+
+ int getPositionZ();
+
+ int getExtruderNr();
+
+ double getTotalFilamentUsed(int e);
+
+ double getTotalPrintTime();
+ void updateTotalPrintTime();
+
+ void addComment(const char* comment, ...);
+
+ void addLine(const char* line, ...);
+
+ void resetExtrusionValue();
+
+ void addDelay(double timeAmount);
+
+ void addMove(Point p, int speed, int lineWidth);
+
+ void addRetraction();
+
+ void switchExtruder(int newExtruder);
+
+ void addCode(const char* str);
+
+ void addFanCommand(int speed);
+
+ int getFileSize();
+ void tellFileSize();
+};
+
+class GCodePathConfig
+{
+public:
+ int speed;
+ int lineWidth;
+ const char* name;
+ bool spiralize;
+
+ GCodePathConfig() : speed(0), lineWidth(0), name(NULL), spiralize(false) {}
+ GCodePathConfig(int speed, int lineWidth, const char* name) : speed(speed), lineWidth(lineWidth), name(name), spiralize(false) {}
+
+ void setData(int speed, int lineWidth, const char* name)
+ {
+ this->speed = speed;
+ this->lineWidth = lineWidth;
+ this->name = name;
+ }
+};
+
+class GCodePath
+{
+public:
+ GCodePathConfig* config;
+ bool retract;
+ int extruder;
+ vector<Point> points;
+ bool done;//Path is finished, no more moves should be added, and a new path should be started instead of any appending done to this one.
+};
+
+class GCodePlanner
+{
+private:
+ GCodeExport& gcode;
+
+ Point lastPosition;
+ vector<GCodePath> paths;
+ Comb* comb;
+
+ GCodePathConfig travelConfig;
+ int extrudeSpeedFactor;
+ int travelSpeedFactor;
+ int currentExtruder;
+ int retractionMinimalDistance;
+ bool forceRetraction;
+ bool alwaysRetract;
+ double extraTime;
+ double totalPrintTime;
+private:
+ GCodePath* getLatestPathWithConfig(GCodePathConfig* config);
+ void forceNewPathStart();
+public:
+ GCodePlanner(GCodeExport& gcode, int travelSpeed, int retractionMinimalDistance);
+ ~GCodePlanner();
+
+ bool setExtruder(int extruder)
+ {
+ if (extruder == currentExtruder)
+ return false;
+ currentExtruder = extruder;
+ return true;
+ }
+
+ int getExtruder()
+ {
+ return currentExtruder;
+ }
+
+ void setCombBoundary(Polygons* polygons)
+ {
+ if (comb)
+ delete comb;
+ if (polygons)
+ comb = new Comb(*polygons);
+ else
+ comb = NULL;
+ }
+
+ void setAlwaysRetract(bool alwaysRetract)
+ {
+ this->alwaysRetract = alwaysRetract;
+ }
+
+ void forceRetract()
+ {
+ forceRetraction = true;
+ }
+
+ void setExtrudeSpeedFactor(int speedFactor)
+ {
+ if (speedFactor < 1) speedFactor = 1;
+ this->extrudeSpeedFactor = speedFactor;
+ }
+ int getExtrudeSpeedFactor()
+ {
+ return this->extrudeSpeedFactor;
+ }
+ void setTravelSpeedFactor(int speedFactor)
+ {
+ if (speedFactor < 1) speedFactor = 1;
+ this->travelSpeedFactor = speedFactor;
+ }
+ int getTravelSpeedFactor()
+ {
+ return this->travelSpeedFactor;
+ }
+
+ void addTravel(Point p);
+
+ void addExtrusionMove(Point p, GCodePathConfig* config);
+
+ void moveInsideCombBoundary(int distance);
+
+ void addPolygon(PolygonRef polygon, int startIdx, GCodePathConfig* config);
+
+ void addPolygonsByOptimizer(Polygons& polygons, GCodePathConfig* config);
+
+ void forceMinimalLayerTime(double minTime, int minimalSpeed);
+
+ void writeGCode(bool liftHeadIfNeeded, int layerThickness);
+};
+
+#endif//GCODEEXPORT_H
diff --git a/infill.cpp b/infill.cpp
new file mode 100644
index 0000000..abed2d9
--- /dev/null
+++ b/infill.cpp
@@ -0,0 +1,79 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "infill.h"
+
+void generateConcentricInfill(Polygons outline, Polygons& result, int offsets[], int offsetsSize)
+{
+ int step = 0;
+ while(1)
+ {
+ for(unsigned int polygonNr=0; polygonNr<outline.size(); polygonNr++)
+ result.add(outline[polygonNr]);
+ outline = outline.offset(-offsets[step]);
+ if (outline.size() < 1)
+ break;
+ step = (step + 1) % offsetsSize;
+ }
+}
+
+int compare_int64_t(const void* a, const void* b)
+{
+ int64_t n = (*(int64_t*)a) - (*(int64_t*)b);
+ if (n < 0) return -1;
+ if (n > 0) return 1;
+ return 0;
+}
+
+void generateLineInfill(const Polygons& in_outline, Polygons& result, int extrusionWidth, int lineSpacing, int infillOverlap, double rotation)
+{
+ Polygons outline = in_outline.offset(extrusionWidth * infillOverlap / 100);
+ PointMatrix matrix(rotation);
+
+ outline.applyMatrix(matrix);
+
+ AABB boundary(outline);
+
+ boundary.min.X = ((boundary.min.X / lineSpacing) - 1) * lineSpacing;
+ int lineCount = (boundary.max.X - boundary.min.X + (lineSpacing - 1)) / lineSpacing;
+ vector<vector<int64_t> > cutList;
+ for(int n=0; n<lineCount; n++)
+ cutList.push_back(vector<int64_t>());
+
+ for(unsigned int polyNr=0; polyNr < outline.size(); polyNr++)
+ {
+ Point p1 = outline[polyNr][outline[polyNr].size()-1];
+ for(unsigned int i=0; i < outline[polyNr].size(); i++)
+ {
+ Point p0 = outline[polyNr][i];
+ int idx0 = (p0.X - boundary.min.X) / lineSpacing;
+ int idx1 = (p1.X - boundary.min.X) / lineSpacing;
+ int64_t xMin = p0.X, xMax = p1.X;
+ if (p0.X > p1.X) { xMin = p1.X; xMax = p0.X; }
+ if (idx0 > idx1) { int tmp = idx0; idx0 = idx1; idx1 = tmp; }
+ for(int idx = idx0; idx<=idx1; idx++)
+ {
+ int x = (idx * lineSpacing) + boundary.min.X + lineSpacing / 2;
+ if (x < xMin) continue;
+ if (x >= xMax) continue;
+ int y = p0.Y + (p1.Y - p0.Y) * (x - p0.X) / (p1.X - p0.X);
+ cutList[idx].push_back(y);
+ }
+ p1 = p0;
+ }
+ }
+
+ int idx = 0;
+ for(int64_t x = boundary.min.X + lineSpacing / 2; x < boundary.max.X; x += lineSpacing)
+ {
+ qsort(cutList[idx].data(), cutList[idx].size(), sizeof(int64_t), compare_int64_t);
+ for(unsigned int i = 0; i + 1 < cutList[idx].size(); i+=2)
+ {
+ if (cutList[idx][i+1] - cutList[idx][i] < extrusionWidth / 5)
+ continue;
+ PolygonRef p = result.newPoly();
+ p.add(matrix.unapply(Point(x, cutList[idx][i])));
+ p.add(matrix.unapply(Point(x, cutList[idx][i+1])));
+ }
+ idx += 1;
+ }
+}
+
diff --git a/infill.h b/infill.h
new file mode 100644
index 0000000..ab8edab
--- /dev/null
+++ b/infill.h
@@ -0,0 +1,10 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef INFILL_H
+#define INFILL_H
+
+#include "utils/polygon.h"
+
+void generateConcentricInfill(Polygons outline, Polygons& result, int offsets[], int offsetsSize);
+void generateLineInfill(const Polygons& in_outline, Polygons& result, int extrusionWidth, int lineSpacing, int infillOverlap, double rotation);
+
+#endif//INFILL_H
diff --git a/inset.cpp b/inset.cpp
new file mode 100644
index 0000000..b491ebe
--- /dev/null
+++ b/inset.cpp
@@ -0,0 +1,45 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "inset.h"
+#include "polygonOptimizer.h"
+
+void generateInsets(SliceLayerPart* part, int offset, int insetCount)
+{
+ part->combBoundery = part->outline.offset(-offset);
+ if (insetCount == 0)
+ {
+ part->insets.push_back(part->outline);
+ return;
+ }
+
+ for(int i=0; i<insetCount; i++)
+ {
+ part->insets.push_back(Polygons());
+ part->insets[i] = part->outline.offset(-offset * i - offset/2);
+ optimizePolygons(part->insets[i]);
+ if (part->insets[i].size() < 1)
+ {
+ part->insets.pop_back();
+ break;
+ }
+ }
+}
+
+void generateInsets(SliceLayer* layer, int offset, int insetCount)
+{
+ for(unsigned int partNr = 0; partNr < layer->parts.size(); partNr++)
+ {
+ generateInsets(&layer->parts[partNr], offset, insetCount);
+ }
+
+ //Remove the parts which did not generate an inset. As these parts are too small to print,
+ // and later code can now assume that there is always minimal 1 inset line.
+ for(unsigned int partNr = 0; partNr < layer->parts.size(); partNr++)
+ {
+ if (layer->parts[partNr].insets.size() < 1)
+ {
+ layer->parts.erase(layer->parts.begin() + partNr);
+ partNr -= 1;
+ }
+ }
+}
+
diff --git a/inset.h b/inset.h
new file mode 100644
index 0000000..5316813
--- /dev/null
+++ b/inset.h
@@ -0,0 +1,11 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef INSET_H
+#define INSET_H
+
+#include "sliceDataStorage.h"
+
+void generateInsets(SliceLayerPart* part, int offset, int insetCount);
+
+void generateInsets(SliceLayer* layer, int offset, int insetCount);
+
+#endif//INSET_H
diff --git a/layerPart.cpp b/layerPart.cpp
new file mode 100644
index 0000000..a824456
--- /dev/null
+++ b/layerPart.cpp
@@ -0,0 +1,90 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <stdio.h>
+
+#include "layerPart.h"
+#include "settings.h"
+
+/*
+The layer-part creation step is the first step in creating actual useful data for 3D printing.
+It takes the result of the Slice step, which is an unordered list of polygons, and makes groups of polygons,
+each of these groups is called a "part", which sometimes are also known as "islands". These parts represent
+isolated areas in the 2D layer with possible holes.
+
+Creating "parts" is an important step, as all elements in a single part should be printed before going to another part.
+And all every bit inside a single part can be printed without the nozzle leaving the boundery of this part.
+
+It's also the first step that stores the result in the "data storage" so all other steps can access it.
+*/
+
+
+void createLayerWithParts(SliceLayer& storageLayer, SlicerLayer* layer, int unionAllType)
+{
+ if (unionAllType & FIX_HORRIBLE_UNION_ALL_TYPE_B)
+ {
+ for(unsigned int i=0; i<layer->polygonList.size(); i++)
+ {
+ if (layer->polygonList[i].orientation())
+ layer->polygonList[i].reverse();
+ }
+ }
+
+ vector<Polygons> result;
+ if (unionAllType & FIX_HORRIBLE_UNION_ALL_TYPE_C)
+ result = layer->polygonList.offset(1000).splitIntoParts(unionAllType);
+ else
+ result = layer->polygonList.splitIntoParts(unionAllType);
+ for(unsigned int i=0; i<result.size(); i++)
+ {
+ storageLayer.parts.push_back(SliceLayerPart());
+ if (unionAllType & FIX_HORRIBLE_UNION_ALL_TYPE_C)
+ {
+ storageLayer.parts[i].outline.add(result[i][0]);
+ storageLayer.parts[i].outline = storageLayer.parts[i].outline.offset(-1000);
+ }else
+ storageLayer.parts[i].outline = result[i];
+ storageLayer.parts[i].boundaryBox.calculate(storageLayer.parts[i].outline);
+ }
+}
+
+void createLayerParts(SliceVolumeStorage& storage, Slicer* slicer, int unionAllType)
+{
+ for(unsigned int layerNr = 0; layerNr < slicer->layers.size(); layerNr++)
+ {
+ storage.layers.push_back(SliceLayer());
+ createLayerWithParts(storage.layers[layerNr], &slicer->layers[layerNr], unionAllType);
+ }
+}
+
+void dumpLayerparts(SliceDataStorage& storage, const char* filename)
+{
+ FILE* out = fopen(filename, "w");
+ fprintf(out, "<!DOCTYPE html><html><body>");
+ Point3 modelSize = storage.modelSize;
+ Point3 modelMin = storage.modelMin;
+
+ for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++)
+ {
+ for(unsigned int layerNr=0;layerNr<storage.volumes[volumeIdx].layers.size(); layerNr++)
+ {
+ fprintf(out, "<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\" style=\"width: 500px; height:500px\">\n");
+ SliceLayer* layer = &storage.volumes[volumeIdx].layers[layerNr];
+ for(unsigned int i=0;i<layer->parts.size();i++)
+ {
+ SliceLayerPart* part = &layer->parts[i];
+ for(unsigned int j=0;j<part->outline.size();j++)
+ {
+ fprintf(out, "<polygon points=\"");
+ for(unsigned int k=0;k<part->outline[j].size();k++)
+ fprintf(out, "%f,%f ", float(part->outline[j][k].X - modelMin.x)/modelSize.x*500, float(part->outline[j][k].Y - modelMin.y)/modelSize.y*500);
+ if (j == 0)
+ fprintf(out, "\" style=\"fill:gray; stroke:black;stroke-width:1\" />\n");
+ else
+ fprintf(out, "\" style=\"fill:red; stroke:black;stroke-width:1\" />\n");
+ }
+ }
+ fprintf(out, "</svg>\n");
+ }
+ }
+ fprintf(out, "</body></html>");
+ fclose(out);
+}
diff --git a/layerPart.h b/layerPart.h
new file mode 100644
index 0000000..45542eb
--- /dev/null
+++ b/layerPart.h
@@ -0,0 +1,27 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef LAYERPART_H
+#define LAYERPART_H
+
+#include "sliceDataStorage.h"
+#include "slicer.h"
+
+/*
+The layer-part creation step is the first step in creating actual useful data for 3D printing.
+It takes the result of the Slice step, which is an unordered list of polygons, and makes groups of polygons,
+each of these groups is called a "part", which sometimes are also known as "islands". These parts represent
+isolated areas in the 2D layer with possible holes.
+
+Creating "parts" is an important step, as all elements in a single part should be printed before going to another part.
+And all every bit inside a single part can be printed without the nozzle leaving the boundery of this part.
+
+It's also the first step that stores the result in the "data storage" so all other steps can access it.
+*/
+
+
+void createLayerWithParts(SliceLayer& storageLayer, SlicerLayer* layer, int unionAllType);
+
+void createLayerParts(SliceVolumeStorage& storage, Slicer* slicer, int unionAllType);
+
+void dumpLayerparts(SliceDataStorage& storage, const char* filename);
+
+#endif//LAYERPART_H
diff --git a/main.cpp b/main.cpp
new file mode 100644
index 0000000..9d19394
--- /dev/null
+++ b/main.cpp
@@ -0,0 +1,199 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <signal.h>
+#if defined(__linux__) || (defined(__APPLE__) && defined(__MACH__))
+#include <execinfo.h>
+#include <sys/resource.h>
+#endif
+#include <stddef.h>
+
+#include "utils/gettime.h"
+#include "utils/logoutput.h"
+#include "sliceDataStorage.h"
+
+#include "modelFile/modelFile.h"
+#include "settings.h"
+#include "optimizedModel.h"
+#include "multiVolumes.h"
+#include "polygonOptimizer.h"
+#include "slicer.h"
+#include "layerPart.h"
+#include "inset.h"
+#include "skin.h"
+#include "infill.h"
+#include "bridge.h"
+#include "support.h"
+#include "pathOrderOptimizer.h"
+#include "skirt.h"
+#include "raft.h"
+#include "comb.h"
+#include "gcodeExport.h"
+#include "fffProcessor.h"
+
+void print_usage()
+{
+ printf("usage: CuraEngine [-h] [-v] [-m 3x3matrix] [-s <settingkey>=<value>] -o <output.gcode> <model.stl>\n");
+}
+
+void signal_FPE(int n)
+{
+ (void)n;
+ printf("Arithmetic exception.\n");
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+#if defined(__linux__) || (defined(__APPLE__) && defined(__MACH__))
+ //Lower the process priority on linux and mac.
+ setpriority(PRIO_PROCESS, 0, 10);
+#endif
+ signal(SIGFPE, signal_FPE);
+
+ ConfigSettings config;
+ fffProcessor processor(config);
+
+ config.filamentDiameter = 2890;
+ config.filamentFlow = 100;
+ config.initialLayerThickness = 300;
+ config.layerThickness = 100;
+ config.extrusionWidth = 400;
+ config.insetCount = 2;
+ config.downSkinCount = 6;
+ config.upSkinCount = 6;
+ config.initialSpeedupLayers = 4;
+ config.initialLayerSpeed = 20;
+ config.printSpeed = 50;
+ config.infillSpeed = 50;
+ config.moveSpeed = 200;
+ config.fanFullOnLayerNr = 2;
+ config.skirtDistance = 6000;
+ config.skirtLineCount = 1;
+ config.skirtMinLength = 0;
+ config.sparseInfillLineDistance = 100 * config.extrusionWidth / 20;
+ config.infillOverlap = 15;
+ config.objectPosition.X = 102500;
+ config.objectPosition.Y = 102500;
+ config.objectSink = 0;
+ config.supportAngle = -1;
+ config.supportEverywhere = 0;
+ config.supportLineDistance = config.sparseInfillLineDistance;
+ config.supportExtruder = -1;
+ config.supportXYDistance = 700;
+ config.supportZDistance = 150;
+ config.retractionAmount = 4500;
+ config.retractionSpeed = 45;
+ config.retractionAmountExtruderSwitch = 14500;
+ config.retractionMinimalDistance = 1500;
+ config.minimalExtrusionBeforeRetraction = 100;
+ config.enableOozeShield = 0;
+ config.enableCombing = 1;
+ config.wipeTowerSize = 0;
+ config.multiVolumeOverlap = 0;
+
+ config.minimalLayerTime = 5;
+ config.minimalFeedrate = 10;
+ config.coolHeadLift = 1;
+ config.fanSpeedMin = 100;
+ config.fanSpeedMax = 100;
+
+ config.raftMargin = 5000;
+ config.raftLineSpacing = 1000;
+ config.raftBaseThickness = 0;
+ config.raftBaseLinewidth = 0;
+ config.raftInterfaceThickness = 0;
+ config.raftInterfaceLinewidth = 0;
+
+ config.spiralizeMode = 0;
+ config.fixHorrible = 0;
+ config.gcodeFlavor = GCODE_FLAVOR_REPRAP;
+ memset(config.extruderOffset, 0, sizeof(config.extruderOffset));
+
+ config.startCode =
+ "M109 S210 ;Heatup to 210C\n"
+ "G21 ;metric values\n"
+ "G90 ;absolute positioning\n"
+ "G28 ;Home\n"
+ "G1 Z15.0 F300 ;move the platform down 15mm\n"
+ "G92 E0 ;zero the extruded length\n"
+ "G1 F200 E5 ;extrude 5mm of feed stock\n"
+ "G92 E0 ;zero the extruded length again\n";
+ config.endCode =
+ "M104 S0 ;extruder heater off\n"
+ "M140 S0 ;heated bed heater off (if you have it)\n"
+ "G91 ;relative positioning\n"
+ "G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure\n"
+ "G1 Z+0.5 E-5 X-20 Y-20 F9000 ;move Z up a bit and retract filament even more\n"
+ "G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way\n"
+ "M84 ;steppers off\n"
+ "G90 ;absolute positioning\n";
+
+ fprintf(stdout,"Cura_SteamEngine version %s\n", VERSION);
+
+ for(int argn = 1; argn < argc; argn++)
+ {
+ char* str = argv[argn];
+ if (str[0] == '-')
+ {
+ for(str++; *str; str++)
+ {
+ switch(*str)
+ {
+ case 'h':
+ print_usage();
+ exit(1);
+ case 'v':
+ verbose_level++;
+ break;
+ case 'b':
+ argn++;
+ binaryMeshBlob = fopen(argv[argn], "rb");
+ break;
+ case 'o':
+ argn++;
+ if (!processor.setTargetFile(argv[argn]))
+ {
+ logError("Failed to open %s for output.\n", argv[argn]);
+ exit(1);
+ }
+ break;
+ case 's':
+ {
+ argn++;
+ char* valuePtr = strchr(argv[argn], '=');
+ if (valuePtr)
+ {
+ *valuePtr++ = '\0';
+
+ if (!config.setSetting(argv[argn], valuePtr))
+ printf("Setting not found: %s %s\n", argv[argn], valuePtr);
+ }
+ }
+ break;
+ case 'm':
+ argn++;
+ sscanf(argv[argn], "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf",
+ &config.matrix.m[0][0], &config.matrix.m[0][1], &config.matrix.m[0][2],
+ &config.matrix.m[1][0], &config.matrix.m[1][1], &config.matrix.m[1][2],
+ &config.matrix.m[2][0], &config.matrix.m[2][1], &config.matrix.m[2][2]);
+ break;
+ default:
+ logError("Unknown option: %c\n", *str);
+ break;
+ }
+ }
+ }else{
+ try {
+ processor.processFile(argv[argn]);
+ }catch(...){
+ printf("Unknown exception\n");
+ exit(1);
+ }
+ }
+ }
+
+ processor.finalize();
+}
diff --git a/modelFile/modelFile.cpp b/modelFile/modelFile.cpp
new file mode 100644
index 0000000..ad1a269
--- /dev/null
+++ b/modelFile/modelFile.cpp
@@ -0,0 +1,177 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <string.h>
+#include <stdio.h>
+
+#include "modelFile/modelFile.h"
+
+FILE* binaryMeshBlob = NULL;
+
+/* Custom fgets function to support Mac line-ends in Ascii STL files. OpenSCAD produces this when used on Mac */
+void* fgets_(char* ptr, size_t len, FILE* f)
+{
+ while(len && fread(ptr, 1, 1, f) > 0)
+ {
+ if (*ptr == '\n' || *ptr == '\r')
+ {
+ *ptr = '\0';
+ return ptr;
+ }
+ ptr++;
+ len--;
+ }
+ return NULL;
+}
+
+SimpleModel* loadModelSTL_ascii(const char* filename, FMatrix3x3& matrix)
+{
+ SimpleModel* m = new SimpleModel();
+ m->volumes.push_back(SimpleVolume());
+ SimpleVolume* vol = &m->volumes[0];
+ FILE* f = fopen(filename, "rt");
+ char buffer[1024];
+ FPoint3 vertex;
+ int n = 0;
+ Point3 v0(0,0,0), v1(0,0,0), v2(0,0,0);
+ while(fgets_(buffer, sizeof(buffer), f))
+ {
+ if (sscanf(buffer, " vertex %lf %lf %lf", &vertex.x, &vertex.y, &vertex.z) == 3)
+ {
+ n++;
+ switch(n)
+ {
+ case 1:
+ v0 = matrix.apply(vertex);
+ break;
+ case 2:
+ v1 = matrix.apply(vertex);
+ break;
+ case 3:
+ v2 = matrix.apply(vertex);
+ vol->addFace(v0, v1, v2);
+ n = 0;
+ break;
+ }
+ }
+ }
+ fclose(f);
+ return m;
+}
+
+SimpleModel* loadModelSTL_binary(const char* filename, FMatrix3x3& matrix)
+{
+ FILE* f = fopen(filename, "rb");
+ char buffer[80];
+ uint32_t faceCount;
+ //Skip the header
+ if (fread(buffer, 80, 1, f) != 1)
+ {
+ fclose(f);
+ return NULL;
+ }
+ //Read the face count
+ if (fread(&faceCount, sizeof(uint32_t), 1, f) != 1)
+ {
+ fclose(f);
+ return NULL;
+ }
+ //For each face read:
+ //float(x,y,z) = normal, float(X,Y,Z)*3 = vertexes, uint16_t = flags
+ SimpleModel* m = new SimpleModel();
+ m->volumes.push_back(SimpleVolume());
+ SimpleVolume* vol = &m->volumes[0];
+ if(vol == NULL)
+ {
+ fclose(f);
+ return NULL;
+ }
+
+ for(unsigned int i=0;i<faceCount;i++)
+ {
+ if (fread(buffer, sizeof(float) * 3, 1, f) != 1)
+ {
+ fclose(f);
+ return NULL;
+ }
+ float v[9];
+ if (fread(v, sizeof(float) * 9, 1, f) != 1)
+ {
+ fclose(f);
+ return NULL;
+ }
+ Point3 v0 = matrix.apply(FPoint3(v[0], v[1], v[2]));
+ Point3 v1 = matrix.apply(FPoint3(v[3], v[4], v[5]));
+ Point3 v2 = matrix.apply(FPoint3(v[6], v[7], v[8]));
+ vol->addFace(v0, v1, v2);
+ if (fread(buffer, sizeof(uint16_t), 1, f) != 1)
+ {
+ fclose(f);
+ return NULL;
+ }
+ }
+ fclose(f);
+ return m;
+}
+
+SimpleModel* loadModelSTL(const char* filename, FMatrix3x3& matrix)
+{
+ FILE* f = fopen(filename, "r");
+ char buffer[6];
+ if (f == NULL)
+ return NULL;
+
+ if (fread(buffer, 5, 1, f) != 1)
+ {
+ fclose(f);
+ return NULL;
+ }
+ fclose(f);
+
+ buffer[5] = '\0';
+ if (strcasecmp(buffer, "SOLID") == 0)
+ {
+ return loadModelSTL_ascii(filename, matrix);
+ }
+ return loadModelSTL_binary(filename, matrix);
+}
+
+SimpleModel* loadModel(const char* filename, FMatrix3x3& matrix)
+{
+ const char* ext = strrchr(filename, '.');
+ if (ext && strcasecmp(ext, ".stl") == 0)
+ {
+ return loadModelSTL(filename, matrix);
+ }
+ if (filename[0] == '#' && binaryMeshBlob != NULL)
+ {
+ SimpleModel* m = new SimpleModel();
+
+ while(*filename == '#')
+ {
+ filename++;
+
+ m->volumes.push_back(SimpleVolume());
+ SimpleVolume* vol = &m->volumes[m->volumes.size()-1];
+ int32_t n, pNr = 0;
+ if (fread(&n, 1, sizeof(int32_t), binaryMeshBlob) < 1)
+ return NULL;
+ printf("Reading mesh from binary blob with %i vertexes\n", n);
+ Point3 v[3];
+ while(n)
+ {
+ float f[3];
+ if (fread(f, 3, sizeof(float), binaryMeshBlob) < 1)
+ return NULL;
+ FPoint3 fp(f[0], f[1], f[2]);
+ v[pNr++] = matrix.apply(fp);
+ if (pNr == 3)
+ {
+ vol->addFace(v[0], v[1], v[2]);
+ pNr = 0;
+ }
+ n--;
+ }
+ }
+ return m;
+ }
+ return NULL;
+}
diff --git a/modelFile/modelFile.h b/modelFile/modelFile.h
new file mode 100644
index 0000000..80397af
--- /dev/null
+++ b/modelFile/modelFile.h
@@ -0,0 +1,110 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef MODELFILE_H
+#define MODELFILE_H
+/**
+modelFile contains the model loaders for the slicer. The model loader turns any format that it can read into a list of triangles with 3 X/Y/Z points.
+
+The format returned is a Model class with an array of faces, which have integer points with a resolution of 1 micron. Giving a maximum object size of 4 meters.
+**/
+
+#include <vector>
+using std::vector;
+#include "utils/intpoint.h"
+#include "utils/floatpoint.h"
+
+extern FILE* binaryMeshBlob;
+
+#define SET_MIN(n, m) do { if ((m) < (n)) n = m; } while(0)
+#define SET_MAX(n, m) do { if ((m) > (n)) n = m; } while(0)
+
+/* A SimpleFace is a 3 dimensional model triangle with 3 points. These points are already converted to integers */
+class SimpleFace
+{
+public:
+ Point3 v[3];
+
+ SimpleFace(Point3& v0, Point3& v1, Point3& v2) { v[0] = v0; v[1] = v1; v[2] = v2; }
+};
+
+/* A SimpleVolume is the most basic reprisentation of a 3D model. It contains all the faces as SimpleTriangles, with nothing fancy. */
+class SimpleVolume
+{
+public:
+ vector<SimpleFace> faces;
+
+ void addFace(Point3& v0, Point3& v1, Point3& v2)
+ {
+ faces.push_back(SimpleFace(v0, v1, v2));
+ }
+
+ Point3 min()
+ {
+ Point3 ret = faces[0].v[0];
+ for(unsigned int i=0; i<faces.size(); i++)
+ {
+ SET_MIN(ret.x, faces[i].v[0].x);
+ SET_MIN(ret.y, faces[i].v[0].y);
+ SET_MIN(ret.z, faces[i].v[0].z);
+ SET_MIN(ret.x, faces[i].v[1].x);
+ SET_MIN(ret.y, faces[i].v[1].y);
+ SET_MIN(ret.z, faces[i].v[1].z);
+ SET_MIN(ret.x, faces[i].v[2].x);
+ SET_MIN(ret.y, faces[i].v[2].y);
+ SET_MIN(ret.z, faces[i].v[2].z);
+ }
+ return ret;
+ }
+ Point3 max()
+ {
+ Point3 ret = faces[0].v[0];
+ for(unsigned int i=0; i<faces.size(); i++)
+ {
+ SET_MAX(ret.x, faces[i].v[0].x);
+ SET_MAX(ret.y, faces[i].v[0].y);
+ SET_MAX(ret.z, faces[i].v[0].z);
+ SET_MAX(ret.x, faces[i].v[1].x);
+ SET_MAX(ret.y, faces[i].v[1].y);
+ SET_MAX(ret.z, faces[i].v[1].z);
+ SET_MAX(ret.x, faces[i].v[2].x);
+ SET_MAX(ret.y, faces[i].v[2].y);
+ SET_MAX(ret.z, faces[i].v[2].z);
+ }
+ return ret;
+ }
+};
+
+//A SimpleModel is a 3D model with 1 or more 3D volumes.
+class SimpleModel
+{
+public:
+ vector<SimpleVolume> volumes;
+
+ Point3 min()
+ {
+ Point3 ret = volumes[0].min();
+ for(unsigned int i=0; i<volumes.size(); i++)
+ {
+ Point3 v = volumes[i].min();
+ SET_MIN(ret.x, v.x);
+ SET_MIN(ret.y, v.y);
+ SET_MIN(ret.z, v.z);
+ }
+ return ret;
+ }
+ Point3 max()
+ {
+ Point3 ret = volumes[0].max();
+ for(unsigned int i=0; i<volumes.size(); i++)
+ {
+ Point3 v = volumes[i].max();
+ SET_MAX(ret.x, v.x);
+ SET_MAX(ret.y, v.y);
+ SET_MAX(ret.z, v.z);
+ }
+ return ret;
+ }
+};
+
+SimpleModel* loadModel(const char* filename, FMatrix3x3& matrix);
+
+#endif//MODELFILE_H
diff --git a/multiVolumes.h b/multiVolumes.h
new file mode 100644
index 0000000..8f0900f
--- /dev/null
+++ b/multiVolumes.h
@@ -0,0 +1,59 @@
+#ifndef MULTIVOLUMES_H
+#define MULTIVOLUMES_H
+
+/* This file contains code to help fixing up and changing layers that are build from multiple volumes. */
+
+void carveMultipleVolumes(vector<SliceVolumeStorage> &volumes)
+{
+ //Go trough all the volumes, and remove the previous volume outlines from our own outline, so we never have overlapped areas.
+ for(unsigned int idx=0; idx < volumes.size(); idx++)
+ {
+ for(unsigned int idx2=0; idx2<idx; idx2++)
+ {
+ for(unsigned int layerNr=0; layerNr < volumes[idx].layers.size(); layerNr++)
+ {
+ SliceLayer* layer1 = &volumes[idx].layers[layerNr];
+ SliceLayer* layer2 = &volumes[idx2].layers[layerNr];
+ for(unsigned int p1 = 0; p1 < layer1->parts.size(); p1++)
+ {
+ for(unsigned int p2 = 0; p2 < layer2->parts.size(); p2++)
+ {
+ layer1->parts[p1].outline = layer1->parts[p1].outline.difference(layer2->parts[p2].outline);
+ }
+ }
+ }
+ }
+ }
+}
+
+//Expand each layer a bit and then keep the extra overlapping parts that overlap with other volumes.
+//This generates some overlap in dual extrusion, for better bonding in touching parts.
+void generateMultipleVolumesOverlap(vector<SliceVolumeStorage> &volumes, int overlap)
+{
+ if (volumes.size() < 2 || overlap <= 0) return;
+
+ for(unsigned int layerNr=0; layerNr < volumes[0].layers.size(); layerNr++)
+ {
+ Polygons fullLayer;
+ for(unsigned int volIdx = 0; volIdx < volumes.size(); volIdx++)
+ {
+ SliceLayer* layer1 = &volumes[volIdx].layers[layerNr];
+ for(unsigned int p1 = 0; p1 < layer1->parts.size(); p1++)
+ {
+ fullLayer = fullLayer.unionPolygons(layer1->parts[p1].outline.offset(20));
+ }
+ }
+ fullLayer = fullLayer.offset(-20);
+
+ for(unsigned int volIdx = 0; volIdx < volumes.size(); volIdx++)
+ {
+ SliceLayer* layer1 = &volumes[volIdx].layers[layerNr];
+ for(unsigned int p1 = 0; p1 < layer1->parts.size(); p1++)
+ {
+ layer1->parts[p1].outline = fullLayer.intersection(layer1->parts[p1].outline.offset(overlap / 2));
+ }
+ }
+ }
+}
+
+#endif//MULTIVOLUMES_H
diff --git a/optimizedModel.cpp b/optimizedModel.cpp
new file mode 100644
index 0000000..a85bc23
--- /dev/null
+++ b/optimizedModel.cpp
@@ -0,0 +1,141 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <stdio.h>
+
+#include "utils/gettime.h"
+#include "utils/logoutput.h"
+#include "optimizedModel.h"
+
+#define MELD_DIST 30
+OptimizedVolume::OptimizedVolume(SimpleVolume* volume, OptimizedModel* model)
+: model(model)
+{
+ points.reserve(volume->faces.size() * 3);
+ faces.reserve(volume->faces.size());
+
+ std::map<uint32_t, std::vector<uint32_t> > indexMap;
+
+ double t = getTime();
+ for(uint32_t i=0; i<volume->faces.size(); i++)
+ {
+ OptimizedFace f;
+ if((i%1000==0) && (getTime()-t)>2.0) logProgress("optimized", i + 1, volume->faces.size());
+ for(uint32_t j=0; j<3; j++)
+ {
+ Point3 p = volume->faces[i].v[j];
+ int hash = ((p.x + MELD_DIST/2) / MELD_DIST) ^ (((p.y + MELD_DIST/2) / MELD_DIST) << 10) ^ (((p.z + MELD_DIST/2) / MELD_DIST) << 20);
+ uint32_t idx;
+ bool add = true;
+ for(unsigned int n = 0; n < indexMap[hash].size(); n++)
+ {
+ if ((points[indexMap[hash][n]].p - p).testLength(MELD_DIST))
+ {
+ idx = indexMap[hash][n];
+ add = false;
+ break;
+ }
+ }
+ if (add)
+ {
+ indexMap[hash].push_back(points.size());
+ idx = points.size();
+ points.push_back(p);
+ }
+ f.index[j] = idx;
+ }
+ if (f.index[0] != f.index[1] && f.index[0] != f.index[2] && f.index[1] != f.index[2])
+ {
+ //Check if there is a face with the same points
+ bool duplicate = false;
+ for(unsigned int _idx0 = 0; _idx0 < points[f.index[0]].faceIndexList.size(); _idx0++)
+ {
+ for(unsigned int _idx1 = 0; _idx1 < points[f.index[1]].faceIndexList.size(); _idx1++)
+ {
+ for(unsigned int _idx2 = 0; _idx2 < points[f.index[2]].faceIndexList.size(); _idx2++)
+ {
+ if (points[f.index[0]].faceIndexList[_idx0] == points[f.index[1]].faceIndexList[_idx1] && points[f.index[0]].faceIndexList[_idx0] == points[f.index[2]].faceIndexList[_idx2])
+ duplicate = true;
+ }
+ }
+ }
+ if (!duplicate)
+ {
+ points[f.index[0]].faceIndexList.push_back(faces.size());
+ points[f.index[1]].faceIndexList.push_back(faces.size());
+ points[f.index[2]].faceIndexList.push_back(faces.size());
+ faces.push_back(f);
+ }
+ }
+ }
+ //fprintf(stdout, "\rAll faces are optimized in %5.1fs.\n",timeElapsed(t));
+
+ int openFacesCount = 0;
+ for(unsigned int i=0;i<faces.size();i++)
+ {
+ OptimizedFace* f = &faces[i];
+ f->touching[0] = getFaceIdxWithPoints(f->index[0], f->index[1], i);
+ f->touching[1] = getFaceIdxWithPoints(f->index[1], f->index[2], i);
+ f->touching[2] = getFaceIdxWithPoints(f->index[2], f->index[0], i);
+ if (f->touching[0] == -1)
+ openFacesCount++;
+ if (f->touching[1] == -1)
+ openFacesCount++;
+ if (f->touching[2] == -1)
+ openFacesCount++;
+ }
+ //fprintf(stdout, " Number of open faces: %i\n", openFacesCount);
+}
+
+
+void OptimizedModel::saveDebugSTL(const char* filename)
+{
+ char buffer[80] = "Cura_Engine_STL_export";
+ uint32_t n;
+ uint16_t s;
+ float flt;
+ OptimizedVolume* vol = &volumes[0];
+ FILE* f = fopen(filename, "wb");
+ fwrite(buffer, 80, 1, f);
+ n = vol->faces.size();
+ fwrite(&n, sizeof(n), 1, f);
+ for(unsigned int i=0;i<vol->faces.size();i++)
+ {
+ flt = 0;
+ s = 0;
+ fwrite(&flt, sizeof(flt), 1, f);
+ fwrite(&flt, sizeof(flt), 1, f);
+ fwrite(&flt, sizeof(flt), 1, f);
+
+ flt = vol->points[vol->faces[i].index[0]].p.x / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[0]].p.y / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[0]].p.z / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[1]].p.x / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[1]].p.y / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[1]].p.z / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[2]].p.x / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[2]].p.y / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+ flt = vol->points[vol->faces[i].index[2]].p.z / 1000.0; fwrite(&flt, sizeof(flt), 1, f);
+
+ fwrite(&s, sizeof(s), 1, f);
+ }
+ fclose(f);
+ //Export the open faces so you can view the with Cura (hacky)
+ /*
+ char gcodeFilename[1024];
+ strcpy(gcodeFilename, filename);
+ strcpy(strchr(gcodeFilename, '.'), ".gcode");
+ f = fopen(gcodeFilename, "w");
+ for(unsigned int i=0;i<faces.size();i++)
+ {
+ for(int j=0;j<3;j++)
+ {
+ if (faces[i].touching[j] == -1)
+ {
+ Point3 p0 = points[faces[i].index[j]].p;
+ Point3 p1 = points[faces[i].index[(j+1)%3]].p;
+ fprintf(f, ";Model error(open face): (%f, %f, %f) (%f, %f, %f)\n", p0.x / 1000.0, p0.y / 1000.0, p0.z / 1000.0, p1.x / 1000.0, p1.y / 1000.0, p1.z / 1000.0);
+ }
+ }
+ }
+ fclose(f);
+ */
+}
diff --git a/optimizedModel.h b/optimizedModel.h
new file mode 100644
index 0000000..e6996de
--- /dev/null
+++ b/optimizedModel.h
@@ -0,0 +1,77 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef OPTIMIZED_MODEL_H
+#define OPTIMIZED_MODEL_H
+
+#include <map>
+#include "modelFile/modelFile.h"
+
+class OptimizedFace
+{
+public:
+ int index[3];
+ int touching[3];
+};
+class OptimizedPoint3
+{
+public:
+ Point3 p;
+ vector<uint32_t> faceIndexList;
+
+ OptimizedPoint3(Point3 p): p(p) {}
+};
+
+class OptimizedModel;
+class OptimizedVolume
+{
+public:
+ OptimizedModel* model;
+ vector<OptimizedPoint3> points;
+ vector<OptimizedFace> faces;
+
+ OptimizedVolume(SimpleVolume* volume, OptimizedModel* model);
+
+ int getFaceIdxWithPoints(int idx0, int idx1, int notFaceIdx)
+ {
+ for(unsigned int i=0;i<points[idx0].faceIndexList.size();i++)
+ {
+ int f0 = points[idx0].faceIndexList[i];
+ if (f0 == notFaceIdx) continue;
+ for(unsigned int j=0;j<points[idx1].faceIndexList.size();j++)
+ {
+ int f1 = points[idx1].faceIndexList[j];
+ if (f1 == notFaceIdx) continue;
+ if (f0 == f1) return f0;
+ }
+ }
+ return -1;
+ }
+};
+class OptimizedModel
+{
+public:
+ vector<OptimizedVolume> volumes;
+ Point3 modelSize;
+ Point3 vMin, vMax;
+
+ OptimizedModel(SimpleModel* model, Point3 center)
+ {
+ for(unsigned int i=0; i<model->volumes.size(); i++)
+ volumes.push_back(OptimizedVolume(&model->volumes[i], this));
+ vMin = model->min();
+ vMax = model->max();
+
+ Point3 vOffset((vMin.x + vMax.x) / 2, (vMin.y + vMax.y) / 2, vMin.z);
+ vOffset -= center;
+ for(unsigned int i=0; i<volumes.size(); i++)
+ for(unsigned int n=0; n<volumes[i].points.size(); n++)
+ volumes[i].points[n].p -= vOffset;
+
+ modelSize = vMax - vMin;
+ vMin -= vOffset;
+ vMax -= vOffset;
+ }
+
+ void saveDebugSTL(const char* filename);
+};
+
+#endif//OPTIMIZED_MODEL_H
diff --git a/pathOrderOptimizer.cpp b/pathOrderOptimizer.cpp
new file mode 100644
index 0000000..eb453ca
--- /dev/null
+++ b/pathOrderOptimizer.cpp
@@ -0,0 +1,95 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "pathOrderOptimizer.h"
+
+void PathOrderOptimizer::optimize()
+{
+ std::vector<bool> picked;
+ for(unsigned int i=0;i<polygons.size(); i++)
+ {
+ int best = -1;
+ float bestDist = 0xFFFFFFFFFFFFFFFFLL;
+ PolygonRef poly = polygons[i];
+ for(unsigned int j=0; j<poly.size(); j++)
+ {
+ float dist = vSize2f(poly[j] - startPoint);
+ if (dist < bestDist)
+ {
+ best = j;
+ bestDist = dist;
+ }
+ }
+ polyStart.push_back(best);
+ picked.push_back(false);
+ }
+
+ Point p0 = startPoint;
+ for(unsigned int n=0; n<polygons.size(); n++)
+ {
+ int best = -1;
+ float bestDist = 0xFFFFFFFFFFFFFFFFLL;
+ for(unsigned int i=0;i<polygons.size(); i++)
+ {
+ if (picked[i] || polygons[i].size() < 1)
+ continue;
+ if (polygons[i].size() == 2)
+ {
+ float dist = vSize2f(polygons[i][0] - p0);
+ if (dist < bestDist)
+ {
+ best = i;
+ bestDist = dist;
+ polyStart[i] = 0;
+ }
+ dist = vSize2f(polygons[i][1] - p0);
+ if (dist < bestDist)
+ {
+ best = i;
+ bestDist = dist;
+ polyStart[i] = 1;
+ }
+ }else{
+ float dist = vSize2f(polygons[i][polyStart[i]] - p0);
+ if (dist < bestDist)
+ {
+ best = i;
+ bestDist = dist;
+ }
+ }
+ }
+ if (best > -1)
+ {
+ if (polygons[best].size() == 2)
+ {
+ p0 = polygons[best][(polyStart[best] + 1) % 2];
+ }else{
+ p0 = polygons[best][polyStart[best]];
+ }
+ picked[best] = true;
+ polyOrder.push_back(best);
+ }
+ }
+
+ p0 = startPoint;
+ for(unsigned int n=0; n<polyOrder.size(); n++)
+ {
+ int nr = polyOrder[n];
+ int best = -1;
+ float bestDist = 0xFFFFFFFFFFFFFFFFLL;
+ for(unsigned int i=0;i<polygons[nr].size(); i++)
+ {
+ float dist = vSize2f(polygons[nr][i] - p0);
+ if (dist < bestDist)
+ {
+ best = i;
+ bestDist = dist;
+ }
+ }
+ polyStart[nr] = best;
+ if (polygons[nr].size() <= 2)
+ {
+ p0 = polygons[nr][(best + 1) % 2];
+ }else{
+ p0 = polygons[nr][best];
+ }
+ }
+}
diff --git a/pathOrderOptimizer.h b/pathOrderOptimizer.h
new file mode 100644
index 0000000..e8a323e
--- /dev/null
+++ b/pathOrderOptimizer.h
@@ -0,0 +1,35 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef PATHOPTIMIZER_H
+#define PATHOPTIMIZER_H
+
+#include <stdint.h>
+#include "utils/polygon.h"
+
+class PathOrderOptimizer
+{
+public:
+ Point startPoint;
+ vector<PolygonRef> polygons;
+ vector<int> polyStart;
+ vector<int> polyOrder;
+
+ PathOrderOptimizer(Point startPoint)
+ {
+ this->startPoint = startPoint;
+ }
+
+ void addPolygon(PolygonRef polygon)
+ {
+ this->polygons.push_back(polygon);
+ }
+
+ void addPolygons(Polygons& polygons)
+ {
+ for(unsigned int i=0;i<polygons.size(); i++)
+ this->polygons.push_back(polygons[i]);
+ }
+
+ void optimize();
+};
+
+#endif//PATHOPTIMIZER_H
diff --git a/polygonOptimizer.cpp b/polygonOptimizer.cpp
new file mode 100644
index 0000000..87f1231
--- /dev/null
+++ b/polygonOptimizer.cpp
@@ -0,0 +1,47 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "polygonOptimizer.h"
+
+void optimizePolygon(PolygonRef poly)
+{
+ Point p0 = poly[poly.size()-1];
+ for(unsigned int i=0;i<poly.size();i++)
+ {
+ Point p1 = poly[i];
+ if (shorterThen(p0 - p1, 10))
+ {
+ poly.remove(i);
+ i --;
+ }else{
+ Point p2;
+ if (i < poly.size() - 1)
+ p2 = poly[i+1];
+ else
+ p2 = poly[0];
+
+ Point diff0 = normal(p1 - p0, 1000000);
+ Point diff2 = normal(p1 - p2, 1000000);
+
+ int64_t d = dot(diff0, diff2);
+ if (d < -999999000000LL)
+ {
+ poly.remove(i);
+ i --;
+ }else{
+ p0 = p1;
+ }
+ }
+ }
+}
+
+void optimizePolygons(Polygons& polys)
+{
+ for(unsigned int n=0;n<polys.size();n++)
+ {
+ optimizePolygon(polys[n]);
+ if (polys[n].size() < 3)
+ {
+ polys.remove(n);
+ n--;
+ }
+ }
+}
diff --git a/polygonOptimizer.h b/polygonOptimizer.h
new file mode 100644
index 0000000..9fc8798
--- /dev/null
+++ b/polygonOptimizer.h
@@ -0,0 +1,11 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef POLYGON_OPTIMIZER_H
+#define POLYGON_OPTIMIZER_H
+
+#include "utils/polygon.h"
+
+void optimizePolygon(PolygonRef poly);
+
+void optimizePolygons(Polygons& polys);
+
+#endif//POLYGON_OPTIMIZER_H
diff --git a/raft.cpp b/raft.cpp
new file mode 100644
index 0000000..4af213b
--- /dev/null
+++ b/raft.cpp
@@ -0,0 +1,19 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "raft.h"
+#include "support.h"
+
+void generateRaft(SliceDataStorage& storage, int distance)
+{
+ for(unsigned int volumeIdx = 0; volumeIdx < storage.volumes.size(); volumeIdx++)
+ {
+ if (storage.volumes[volumeIdx].layers.size() < 1) continue;
+ SliceLayer* layer = &storage.volumes[volumeIdx].layers[0];
+ for(unsigned int i=0; i<layer->parts.size(); i++)
+ {
+ storage.raftOutline = storage.raftOutline.unionPolygons(layer->parts[i].outline.offset(distance));
+ }
+ }
+
+ SupportPolyGenerator supportGenerator(storage.support, 0);
+ storage.raftOutline = storage.raftOutline.unionPolygons(supportGenerator.polygons);
+}
diff --git a/raft.h b/raft.h
new file mode 100644
index 0000000..a171f37
--- /dev/null
+++ b/raft.h
@@ -0,0 +1,9 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef RAFT_H
+#define RAFT_H
+
+#include "sliceDataStorage.h"
+
+void generateRaft(SliceDataStorage& storage, int distance);
+
+#endif//RAFT_H
diff --git a/settings.cpp b/settings.cpp
new file mode 100644
index 0000000..a074b15
--- /dev/null
+++ b/settings.cpp
@@ -0,0 +1,101 @@
+#include <stdio.h>
+
+#include "settings.h"
+
+#define STRINGIFY(_s) #_s
+#define SETTING(name) _index.push_back(_ConfigSettingIndex(STRINGIFY(name), &name))
+#define SETTING2(name, altName) _index.push_back(_ConfigSettingIndex(STRINGIFY(name), &name)); _index.push_back(_ConfigSettingIndex(STRINGIFY(altName), &name))
+
+ConfigSettings::ConfigSettings()
+{
+ SETTING(layerThickness);
+ SETTING(initialLayerThickness);
+ SETTING(filamentDiameter);
+ SETTING(filamentFlow);
+ SETTING(extrusionWidth);
+ SETTING(insetCount);
+ SETTING(downSkinCount);
+ SETTING(upSkinCount);
+ SETTING(sparseInfillLineDistance);
+ SETTING(infillOverlap);
+ SETTING(skirtDistance);
+ SETTING(skirtLineCount);
+ SETTING(skirtMinLength);
+
+ SETTING(initialSpeedupLayers);
+ SETTING(initialLayerSpeed);
+ SETTING(printSpeed);
+ SETTING(infillSpeed);
+ SETTING(moveSpeed);
+ SETTING(fanFullOnLayerNr);
+
+ SETTING(supportAngle);
+ SETTING(supportEverywhere);
+ SETTING(supportLineDistance);
+ SETTING(supportXYDistance);
+ SETTING(supportZDistance);
+ SETTING(supportExtruder);
+
+ SETTING(retractionAmount);
+ SETTING(retractionSpeed);
+ SETTING(retractionAmountExtruderSwitch);
+ SETTING(retractionMinimalDistance);
+ SETTING(minimalExtrusionBeforeRetraction);
+ SETTING(enableCombing);
+ SETTING(enableOozeShield);
+ SETTING(wipeTowerSize);
+ SETTING(multiVolumeOverlap);
+ SETTING2(objectPosition.X, posx);
+ SETTING2(objectPosition.Y, posy);
+ SETTING(objectSink);
+
+ SETTING(raftMargin);
+ SETTING(raftLineSpacing);
+ SETTING(raftBaseThickness);
+ SETTING(raftBaseLinewidth);
+ SETTING(raftInterfaceThickness);
+ SETTING(raftInterfaceLinewidth);
+
+ SETTING(minimalLayerTime);
+ SETTING(minimalFeedrate);
+ SETTING(coolHeadLift);
+ SETTING(fanSpeedMin);
+ SETTING(fanSpeedMax);
+
+ SETTING(fixHorrible);
+ SETTING(spiralizeMode);
+ SETTING(gcodeFlavor);
+
+ SETTING(extruderOffset[1].X);
+ SETTING(extruderOffset[1].Y);
+ SETTING(extruderOffset[2].X);
+ SETTING(extruderOffset[2].Y);
+ SETTING(extruderOffset[3].X);
+ SETTING(extruderOffset[3].Y);
+}
+
+#undef STRINGIFY
+#undef SETTING
+
+bool ConfigSettings::setSetting(const char* key, const char* value)
+{
+ for(unsigned int n=0; n < _index.size(); n++)
+ {
+ if (strcasecmp(key, _index[n].key) == 0)
+ {
+ *_index[n].ptr = atoi(value);
+ return true;
+ }
+ }
+ if (strcasecmp(key, "startCode") == 0)
+ {
+ this->startCode = value;
+ return true;
+ }
+ if (strcasecmp(key, "endCode") == 0)
+ {
+ this->endCode = value;
+ return true;
+ }
+ return false;
+}
diff --git a/settings.h b/settings.h
new file mode 100644
index 0000000..8d73d1a
--- /dev/null
+++ b/settings.h
@@ -0,0 +1,130 @@
+#ifndef SETTINGS_H
+#define SETTINGS_H
+
+#include <utils/floatpoint.h>
+#include <vector>
+
+#define VERSION "13.12"
+
+#define FIX_HORRIBLE_UNION_ALL_TYPE_A 0x01
+#define FIX_HORRIBLE_UNION_ALL_TYPE_B 0x02
+#define FIX_HORRIBLE_EXTENSIVE_STITCHING 0x04
+#define FIX_HORRIBLE_UNION_ALL_TYPE_C 0x08
+#define FIX_HORRIBLE_KEEP_NONE_CLOSED 0x10
+
+/**
+ * RepRap flavored GCode is Marlin/Sprinter/Repetier based GCode.
+ * This is the most commonly used GCode set.
+ * G0 for moves, G1 for extrusion.
+ * E values give mm of filament extrusion.
+ * Retraction is done on E values with G1. Start/end code is added.
+ * M106 Sxxx and M107 are used to turn the fan on/off.
+ **/
+#define GCODE_FLAVOR_REPRAP 0
+/**
+ * UltiGCode flavored is Marlin based GCode.
+ * UltiGCode uses less settings on the slicer and puts more settings in the firmware. This makes for more hardware/material independed GCode.
+ * G0 for moves, G1 for extrusion.
+ * E values give mm^3 of filament extrusion. Ignores the filament diameter setting.
+ * Retraction is done with G10 and G11. Retraction settings are ignored. G10 S1 is used for multi-extruder switch retraction.
+ * Start/end code is not added.
+ * M106 Sxxx and M107 are used to turn the fan on/off.
+ **/
+#define GCODE_FLAVOR_ULTIGCODE 1
+/**
+ * Makerbot flavored GCode.
+ * Looks a lot like RepRap GCode with a few changes. Requires MakerWare to convert to X3G files.
+ * Heating needs to be done with M104 Sxxx T0
+ * No G21 or G90
+ * Fan ON is M126 T0 (No fan strength control?)
+ * Fan OFF is M127 T0
+ * Homing is done with G162 X Y F2000
+ **/
+#define GCODE_FLAVOR_MAKERBOT 2
+
+#define MAX_EXTRUDERS 16
+
+class _ConfigSettingIndex
+{
+public:
+ const char* key;
+ int* ptr;
+
+ _ConfigSettingIndex(const char* key, int* ptr) : key(key), ptr(ptr) {}
+};
+
+class ConfigSettings
+{
+private:
+ std::vector<_ConfigSettingIndex> _index;
+public:
+ int layerThickness;
+ int initialLayerThickness;
+ int filamentDiameter;
+ int filamentFlow;
+ int extrusionWidth;
+ int insetCount;
+ int downSkinCount;
+ int upSkinCount;
+ int sparseInfillLineDistance;
+ int infillOverlap;
+ int skirtDistance;
+ int skirtLineCount;
+ int skirtMinLength;
+ int retractionAmount;
+ int retractionAmountExtruderSwitch;
+ int retractionSpeed;
+ int retractionMinimalDistance;
+ int minimalExtrusionBeforeRetraction;
+ int enableCombing;
+ int enableOozeShield;
+ int wipeTowerSize;
+ int multiVolumeOverlap;
+
+ int initialSpeedupLayers;
+ int initialLayerSpeed;
+ int printSpeed;
+ int infillSpeed;
+ int moveSpeed;
+ int fanFullOnLayerNr;
+
+ //Support material
+ int supportAngle;
+ int supportEverywhere;
+ int supportLineDistance;
+ int supportXYDistance;
+ int supportZDistance;
+ int supportExtruder;
+
+ //Cool settings
+ int minimalLayerTime;
+ int minimalFeedrate;
+ int coolHeadLift;
+ int fanSpeedMin;
+ int fanSpeedMax;
+
+ //Raft settings
+ int raftMargin;
+ int raftLineSpacing;
+ int raftBaseThickness;
+ int raftBaseLinewidth;
+ int raftInterfaceThickness;
+ int raftInterfaceLinewidth;
+
+ FMatrix3x3 matrix;
+ IntPoint objectPosition;
+ int objectSink;
+
+ int fixHorrible;
+ int spiralizeMode;
+ int gcodeFlavor;
+
+ IntPoint extruderOffset[MAX_EXTRUDERS];
+ const char* startCode;
+ const char* endCode;
+
+ ConfigSettings();
+ bool setSetting(const char* key, const char* value);
+};
+
+#endif//SETTINGS_H
diff --git a/skin.cpp b/skin.cpp
new file mode 100644
index 0000000..c5ee2cd
--- /dev/null
+++ b/skin.cpp
@@ -0,0 +1,116 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "skin.h"
+
+void generateSkins(int layerNr, SliceVolumeStorage& storage, int extrusionWidth, int downSkinCount, int upSkinCount, int infillOverlap)
+{
+ SliceLayer* layer = &storage.layers[layerNr];
+
+ for(unsigned int partNr=0; partNr<layer->parts.size(); partNr++)
+ {
+ SliceLayerPart* part = &layer->parts[partNr];
+
+ Polygons upskin = part->insets[part->insets.size() - 1].offset(-extrusionWidth/2);
+ Polygons downskin = upskin;
+
+ if (part->insets.size() > 1)
+ {
+ //Add thin wall filling by taking the area between the insets.
+ Polygons thinWalls = part->insets[0].offset(-extrusionWidth / 2 - extrusionWidth * infillOverlap / 100).difference(part->insets[1].offset(extrusionWidth * 6 / 10));
+ upskin.add(thinWalls);
+ downskin.add(thinWalls);
+ }
+ if (int(layerNr - downSkinCount) >= 0)
+ {
+ SliceLayer* layer2 = &storage.layers[layerNr - downSkinCount];
+ for(unsigned int partNr2=0; partNr2<layer2->parts.size(); partNr2++)
+ {
+ if (part->boundaryBox.hit(layer2->parts[partNr2].boundaryBox))
+ downskin = downskin.difference(layer2->parts[partNr2].insets[layer2->parts[partNr2].insets.size() - 1]);
+ }
+ }
+ if (int(layerNr + upSkinCount) < (int)storage.layers.size())
+ {
+ SliceLayer* layer2 = &storage.layers[layerNr + upSkinCount];
+ for(unsigned int partNr2=0; partNr2<layer2->parts.size(); partNr2++)
+ {
+ if (part->boundaryBox.hit(layer2->parts[partNr2].boundaryBox))
+ upskin = upskin.difference(layer2->parts[partNr2].insets[layer2->parts[partNr2].insets.size() - 1]);
+ }
+ }
+
+ part->skinOutline = upskin.unionPolygons(downskin);
+
+ double minAreaSize = (2 * M_PI * (double(extrusionWidth) / 1000.0) * (double(extrusionWidth) / 1000.0)) * 0.3;
+ for(unsigned int i=0; i<part->skinOutline.size(); i++)
+ {
+ double area = fabs(part->skinOutline[i].area()) / 1000.0 / 1000.0;
+ if (area < minAreaSize) // Only create an up/down skin if the area is large enough. So you do not create tiny blobs of "trying to fill"
+ {
+ part->skinOutline.remove(i);
+ i -= 1;
+ }
+ }
+ }
+}
+
+void generateSparse(int layerNr, SliceVolumeStorage& storage, int extrusionWidth, int downSkinCount, int upSkinCount)
+{
+ SliceLayer* layer = &storage.layers[layerNr];
+
+ for(unsigned int partNr=0; partNr<layer->parts.size(); partNr++)
+ {
+ SliceLayerPart* part = &layer->parts[partNr];
+
+ Polygons sparse = part->insets[part->insets.size() - 1].offset(-extrusionWidth/2);
+ Polygons downskin = sparse;
+ Polygons upskin = sparse;
+
+ if (int(layerNr - downSkinCount) >= 0)
+ {
+ SliceLayer* layer2 = &storage.layers[layerNr - downSkinCount];
+ for(unsigned int partNr2=0; partNr2<layer2->parts.size(); partNr2++)
+ {
+ if (part->boundaryBox.hit(layer2->parts[partNr2].boundaryBox))
+ {
+ if (layer2->parts[partNr2].insets.size() > 1)
+ {
+ downskin = downskin.difference(layer2->parts[partNr2].insets[layer2->parts[partNr2].insets.size() - 2]);
+ }else{
+ downskin = downskin.difference(layer2->parts[partNr2].insets[layer2->parts[partNr2].insets.size() - 1]);
+ }
+ }
+ }
+ }
+ if (int(layerNr + upSkinCount) < (int)storage.layers.size())
+ {
+ SliceLayer* layer2 = &storage.layers[layerNr + upSkinCount];
+ for(unsigned int partNr2=0; partNr2<layer2->parts.size(); partNr2++)
+ {
+ if (part->boundaryBox.hit(layer2->parts[partNr2].boundaryBox))
+ {
+ if (layer2->parts[partNr2].insets.size() > 1)
+ {
+ upskin = upskin.difference(layer2->parts[partNr2].insets[layer2->parts[partNr2].insets.size() - 2]);
+ }else{
+ upskin = upskin.difference(layer2->parts[partNr2].insets[layer2->parts[partNr2].insets.size() - 1]);
+ }
+ }
+ }
+ }
+
+ Polygons result = upskin.unionPolygons(downskin);
+
+ double minAreaSize = 3.0;//(2 * M_PI * (double(config.extrusionWidth) / 1000.0) * (double(config.extrusionWidth) / 1000.0)) * 3;
+ for(unsigned int i=0; i<result.size(); i++)
+ {
+ double area = fabs(result[i].area()) / 1000.0 / 1000.0;
+ if (area < minAreaSize) /* Only create an up/down skin if the area is large enough. So you do not create tiny blobs of "trying to fill" */
+ {
+ result.remove(i);
+ i -= 1;
+ }
+ }
+
+ part->sparseOutline = sparse.difference(result);
+ }
+}
diff --git a/skin.h b/skin.h
new file mode 100644
index 0000000..0ced1a9
--- /dev/null
+++ b/skin.h
@@ -0,0 +1,10 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef SKIN_H
+#define SKIN_H
+
+#include "sliceDataStorage.h"
+
+void generateSkins(int layerNr, SliceVolumeStorage& storage, int extrusionWidth, int downSkinCount, int upSkinCount, int infillOverlap);
+void generateSparse(int layerNr, SliceVolumeStorage& storage, int extrusionWidth, int downSkinCount, int upSkinCount);
+
+#endif//SKIN_H
diff --git a/skirt.cpp b/skirt.cpp
new file mode 100644
index 0000000..d1b67e0
--- /dev/null
+++ b/skirt.cpp
@@ -0,0 +1,39 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "skirt.h"
+#include "support.h"
+
+void generateSkirt(SliceDataStorage& storage, int distance, int extrusionWidth, int count, int minLength, int initialLayerHeight)
+{
+ for(int skirtNr=0; skirtNr<count;skirtNr++)
+ {
+ int offsetDistance = distance + extrusionWidth * skirtNr + extrusionWidth / 2;
+
+ Polygons skirtPolygons(storage.wipeTower.offset(offsetDistance));
+ for(unsigned int volumeIdx = 0; volumeIdx < storage.volumes.size(); volumeIdx++)
+ {
+ if (storage.volumes[volumeIdx].layers.size() < 1) continue;
+ SliceLayer* layer = &storage.volumes[volumeIdx].layers[0];
+ for(unsigned int i=0; i<layer->parts.size(); i++)
+ {
+ skirtPolygons = skirtPolygons.unionPolygons(layer->parts[i].outline.offset(offsetDistance));
+ }
+ }
+
+ SupportPolyGenerator supportGenerator(storage.support, initialLayerHeight);
+ skirtPolygons = skirtPolygons.unionPolygons(supportGenerator.polygons.offset(offsetDistance));
+
+ //Remove small inner skirt holes. Holes have a negative area, remove anything smaller then 100x extrusion "area"
+ for(unsigned int n=0; n<skirtPolygons.size(); n++)
+ {
+ double area = skirtPolygons[n].area();
+ if (area < 0 && area > -extrusionWidth * extrusionWidth * 100)
+ skirtPolygons.remove(n--);
+ }
+
+ storage.skirt.add(skirtPolygons);
+
+ int lenght = storage.skirt.polygonLength();
+ if (skirtNr + 1 >= count && lenght > 0 && lenght < minLength)
+ count++;
+ }
+}
diff --git a/skirt.h b/skirt.h
new file mode 100644
index 0000000..f732ae2
--- /dev/null
+++ b/skirt.h
@@ -0,0 +1,9 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef SKIRT_H
+#define SKIRT_H
+
+#include "sliceDataStorage.h"
+
+void generateSkirt(SliceDataStorage& storage, int distance, int extrusionWidth, int count, int minLength, int initialLayerHeight);
+
+#endif//SKIRT_H
diff --git a/sliceDataStorage.h b/sliceDataStorage.h
new file mode 100644
index 0000000..c25ce4a
--- /dev/null
+++ b/sliceDataStorage.h
@@ -0,0 +1,83 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef SLICE_DATA_STORAGE_H
+#define SLICE_DATA_STORAGE_H
+
+#include "utils/intpoint.h"
+#include "utils/polygon.h"
+
+/*
+SliceData
++ Layers[]
+ + LayerParts[]
+ + OutlinePolygons[]
+ + Insets[]
+ + Polygons[]
+ + SkinPolygons[]
+*/
+
+class SliceLayerPart
+{
+public:
+ AABB boundaryBox;
+ Polygons outline;
+ Polygons combBoundery;
+ vector<Polygons> insets;
+ Polygons skinOutline;
+ Polygons sparseOutline;
+ int bridgeAngle;
+};
+
+class SliceLayer
+{
+public:
+ vector<SliceLayerPart> parts;
+};
+
+/******************/
+class SupportPoint
+{
+public:
+ int32_t z;
+ double cosAngle;
+
+ SupportPoint(int32_t z, double cosAngle) : z(z), cosAngle(cosAngle) {}
+};
+class SupportStorage
+{
+public:
+ bool generated;
+ int angle;
+ bool everywhere;
+ int XYDistance;
+ int ZDistance;
+
+ Point gridOffset;
+ int32_t gridScale;
+ int32_t gridWidth, gridHeight;
+ vector<SupportPoint>* grid;
+ SupportStorage(){grid = NULL;}
+ ~SupportStorage(){if(grid) delete [] grid;}
+};
+/******************/
+
+class SliceVolumeStorage
+{
+public:
+ vector<SliceLayer> layers;
+};
+
+class SliceDataStorage
+{
+public:
+ Point3 modelSize, modelMin, modelMax;
+ Polygons skirt;
+ Polygons raftOutline;
+ vector<Polygons> oozeShield;
+ vector<SliceVolumeStorage> volumes;
+
+ SupportStorage support;
+ Polygons wipeTower;
+ Point wipePoint;
+};
+
+#endif//SLICE_DATA_STORAGE_H
diff --git a/slicer.cpp b/slicer.cpp
new file mode 100644
index 0000000..929a5c9
--- /dev/null
+++ b/slicer.cpp
@@ -0,0 +1,405 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <stdio.h>
+
+#include "utils/gettime.h"
+
+#include "slicer.h"
+#include "polygonOptimizer.h"
+
+void SlicerLayer::makePolygons(OptimizedVolume* ov, bool keepNoneClosed, bool extensiveStitching)
+{
+ for(unsigned int startSegment=0; startSegment < segmentList.size(); startSegment++)
+ {
+ if (segmentList[startSegment].addedToPolygon)
+ continue;
+
+ Polygons tmpPolygons;
+ PolygonRef poly = tmpPolygons.newPoly();
+ poly.add(segmentList[startSegment].start);
+
+ unsigned int segmentIndex = startSegment;
+ bool canClose;
+ while(true)
+ {
+ canClose = false;
+ segmentList[segmentIndex].addedToPolygon = true;
+ Point p0 = segmentList[segmentIndex].end;
+ poly.add(p0);
+ int nextIndex = -1;
+ OptimizedFace* face = &ov->faces[segmentList[segmentIndex].faceIndex];
+ for(unsigned int i=0;i<3;i++)
+ {
+ if (face->touching[i] > -1 && faceToSegmentIndex.find(face->touching[i]) != faceToSegmentIndex.end())
+ {
+ Point p1 = segmentList[faceToSegmentIndex[face->touching[i]]].start;
+ Point diff = p0 - p1;
+ if (shorterThen(diff, 10))
+ {
+ if (faceToSegmentIndex[face->touching[i]] == (int)startSegment)
+ canClose = true;
+ if (segmentList[faceToSegmentIndex[face->touching[i]]].addedToPolygon)
+ continue;
+ nextIndex = faceToSegmentIndex[face->touching[i]];
+ }
+ }
+ }
+ if (nextIndex == -1)
+ break;
+ segmentIndex = nextIndex;
+ }
+ if (canClose)
+ polygonList.add(poly);
+ else
+ openPolygonList.add(poly);
+ }
+ //Clear the segmentList to save memory, it is no longer needed after this point.
+ segmentList.clear();
+
+ //Connecting polygons that are not closed yet, as models are not always perfect manifold we need to join some stuff up to get proper polygons
+ //First link up polygon ends that are within 2 microns.
+ for(unsigned int i=0;i<openPolygonList.size();i++)
+ {
+ if (openPolygonList[i].size() < 1) continue;
+ for(unsigned int j=0;j<openPolygonList.size();j++)
+ {
+ if (openPolygonList[j].size() < 1) continue;
+
+ Point diff = openPolygonList[i][openPolygonList[i].size()-1] - openPolygonList[j][0];
+ int64_t distSquared = vSize2(diff);
+
+ if (distSquared < 2 * 2)
+ {
+ if (i == j)
+ {
+ polygonList.add(openPolygonList[i]);
+ openPolygonList[i].clear();
+ break;
+ }else{
+ for(unsigned int n=0; n<openPolygonList[j].size(); n++)
+ openPolygonList[i].add(openPolygonList[j][n]);
+
+ openPolygonList[j].clear();
+ }
+ }
+ }
+ }
+
+ //Next link up all the missing ends, closing up the smallest gaps first. This is an inefficient implementation which can run in O(n*n*n) time.
+ while(1)
+ {
+ int64_t bestScore = 10000 * 10000;
+ unsigned int bestA = -1;
+ unsigned int bestB = -1;
+ bool reversed = false;
+ for(unsigned int i=0;i<openPolygonList.size();i++)
+ {
+ if (openPolygonList[i].size() < 1) continue;
+ for(unsigned int j=0;j<openPolygonList.size();j++)
+ {
+ if (openPolygonList[j].size() < 1) continue;
+
+ Point diff = openPolygonList[i][openPolygonList[i].size()-1] - openPolygonList[j][0];
+ int64_t distSquared = vSize2(diff);
+ if (distSquared < bestScore)
+ {
+ bestScore = distSquared;
+ bestA = i;
+ bestB = j;
+ reversed = false;
+ }
+
+ if (i != j)
+ {
+ Point diff = openPolygonList[i][openPolygonList[i].size()-1] - openPolygonList[j][openPolygonList[j].size()-1];
+ int64_t distSquared = vSize2(diff);
+ if (distSquared < bestScore)
+ {
+ bestScore = distSquared;
+ bestA = i;
+ bestB = j;
+ reversed = true;
+ }
+ }
+ }
+ }
+
+ if (bestScore >= 10000 * 10000)
+ break;
+
+ if (bestA == bestB)
+ {
+ polygonList.add(openPolygonList[bestA]);
+ openPolygonList[bestA].clear();
+ }else{
+ if (reversed)
+ {
+ for(unsigned int n=openPolygonList[bestB].size()-1; int(n)>=0; n--)
+ openPolygonList[bestA].add(openPolygonList[bestB][n]);
+ }else{
+ for(unsigned int n=0; n<openPolygonList[bestB].size(); n++)
+ openPolygonList[bestA].add(openPolygonList[bestB][n]);
+ }
+
+ openPolygonList[bestB].clear();
+ }
+ }
+
+ if (extensiveStitching)
+ {
+ //For extensive stitching find 2 open polygons that are touching 2 closed polygons.
+ // Then find the sortest path over this polygon that can be used to connect the open polygons,
+ // And generate a path over this shortest bit to link up the 2 open polygons.
+ // (If these 2 open polygons are the same polygon, then the final result is a closed polyon)
+
+ while(1)
+ {
+ unsigned int bestA = -1;
+ unsigned int bestB = -1;
+ gapCloserResult bestResult;
+ bestResult.len = LLONG_MAX;
+ bestResult.polygonIdx = -1;
+ bestResult.pointIdxA = -1;
+ bestResult.pointIdxB = -1;
+
+ for(unsigned int i=0; i<openPolygonList.size(); i++)
+ {
+ if (openPolygonList[i].size() < 1) continue;
+
+ {
+ gapCloserResult res = findPolygonGapCloser(openPolygonList[i][0], openPolygonList[i][openPolygonList[i].size()-1]);
+ if (res.len > 0 && res.len < bestResult.len)
+ {
+ bestA = i;
+ bestB = i;
+ bestResult = res;
+ }
+ }
+
+ for(unsigned int j=0; j<openPolygonList.size(); j++)
+ {
+ if (openPolygonList[j].size() < 1 || i == j) continue;
+
+ gapCloserResult res = findPolygonGapCloser(openPolygonList[i][0], openPolygonList[j][openPolygonList[j].size()-1]);
+ if (res.len > 0 && res.len < bestResult.len)
+ {
+ bestA = i;
+ bestB = j;
+ bestResult = res;
+ }
+ }
+ }
+
+ if (bestResult.len < LLONG_MAX)
+ {
+ if (bestA == bestB)
+ {
+ if (bestResult.pointIdxA == bestResult.pointIdxB)
+ {
+ polygonList.add(openPolygonList[bestA]);
+ openPolygonList[bestA].clear();
+ }
+ else if (bestResult.AtoB)
+ {
+ PolygonRef poly = polygonList.newPoly();
+ for(unsigned int j = bestResult.pointIdxA; j != bestResult.pointIdxB; j = (j + 1) % polygonList[bestResult.polygonIdx].size())
+ poly.add(polygonList[bestResult.polygonIdx][j]);
+ for(unsigned int j = openPolygonList[bestA].size() - 1; int(j) >= 0; j--)
+ poly.add(openPolygonList[bestA][j]);
+ openPolygonList[bestA].clear();
+ }
+ else
+ {
+ unsigned int n = polygonList.size();
+ polygonList.add(openPolygonList[bestA]);
+ for(unsigned int j = bestResult.pointIdxB; j != bestResult.pointIdxA; j = (j + 1) % polygonList[bestResult.polygonIdx].size())
+ polygonList[n].add(polygonList[bestResult.polygonIdx][j]);
+ openPolygonList[bestA].clear();
+ }
+ }
+ else
+ {
+ if (bestResult.pointIdxA == bestResult.pointIdxB)
+ {
+ for(unsigned int n=0; n<openPolygonList[bestA].size(); n++)
+ openPolygonList[bestB].add(openPolygonList[bestA][n]);
+ openPolygonList[bestA].clear();
+ }
+ else if (bestResult.AtoB)
+ {
+ Polygons tmpPolygons;
+ PolygonRef poly = tmpPolygons.newPoly();
+ for(unsigned int n = bestResult.pointIdxA; n != bestResult.pointIdxB; n = (n + 1) % polygonList[bestResult.polygonIdx].size())
+ poly.add(polygonList[bestResult.polygonIdx][n]);
+ for(unsigned int n=poly.size()-1;int(n) >= 0; n--)
+ openPolygonList[bestB].add(poly[n]);
+ for(unsigned int n=0; n<openPolygonList[bestA].size(); n++)
+ openPolygonList[bestB].add(openPolygonList[bestA][n]);
+ openPolygonList[bestA].clear();
+ }
+ else
+ {
+ for(unsigned int n = bestResult.pointIdxB; n != bestResult.pointIdxA; n = (n + 1) % polygonList[bestResult.polygonIdx].size())
+ openPolygonList[bestB].add(polygonList[bestResult.polygonIdx][n]);
+ for(unsigned int n = openPolygonList[bestA].size() - 1; int(n) >= 0; n--)
+ openPolygonList[bestB].add(openPolygonList[bestA][n]);
+ openPolygonList[bestA].clear();
+ }
+ }
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ /*
+ int q=0;
+ for(unsigned int i=0;i<openPolygonList.size();i++)
+ {
+ if (openPolygonList[i].size() < 2) continue;
+ if (!q) printf("***\n");
+ printf("S: %f %f\n", float(openPolygonList[i][0].X), float(openPolygonList[i][0].Y));
+ printf("E: %f %f\n", float(openPolygonList[i][openPolygonList[i].size()-1].X), float(openPolygonList[i][openPolygonList[i].size()-1].Y));
+ q = 1;
+ }
+ */
+ //if (q) exit(1);
+
+ if (keepNoneClosed)
+ {
+ for(unsigned int n=0; n<openPolygonList.size(); n++)
+ {
+ if (openPolygonList[n].size() > 0)
+ polygonList.add(openPolygonList[n]);
+ }
+ }
+ //Clear the openPolygonList to save memory, the only reason to keep it after this is for debugging.
+ //openPolygonList.clear();
+
+ //Remove all the tiny polygons, or polygons that are not closed. As they do not contribute to the actual print.
+ int snapDistance = 1000;
+ for(unsigned int i=0;i<polygonList.size();i++)
+ {
+ int length = 0;
+
+ for(unsigned int n=1; n<polygonList[i].size(); n++)
+ {
+ length += vSize(polygonList[i][n] - polygonList[i][n-1]);
+ if (length > snapDistance)
+ break;
+ }
+ if (length < snapDistance)
+ {
+ polygonList.remove(i);
+ i--;
+ }
+ }
+
+ //Finally optimize all the polygons. Every point removed saves time in the long run.
+ optimizePolygons(polygonList);
+}
+
+
+Slicer::Slicer(OptimizedVolume* ov, int32_t initial, int32_t thickness, bool keepNoneClosed, bool extensiveStitching)
+{
+ modelSize = ov->model->modelSize;
+ modelMin = ov->model->vMin;
+
+ int layerCount = (modelSize.z - initial) / thickness + 1;
+ fprintf(stdout, "Layer count: %i\n", layerCount);
+ layers.resize(layerCount);
+
+ for(unsigned int i=0; i<ov->faces.size(); i++)
+ {
+ Point3 p0 = ov->points[ov->faces[i].index[0]].p;
+ Point3 p1 = ov->points[ov->faces[i].index[1]].p;
+ Point3 p2 = ov->points[ov->faces[i].index[2]].p;
+ int32_t minZ = p0.z;
+ int32_t maxZ = p0.z;
+ if (p1.z < minZ) minZ = p1.z;
+ if (p2.z < minZ) minZ = p2.z;
+ if (p1.z > maxZ) maxZ = p1.z;
+ if (p2.z > maxZ) maxZ = p2.z;
+
+ for(int32_t layerNr = (minZ - initial) / thickness; layerNr <= (maxZ - initial) / thickness; layerNr++)
+ {
+ int32_t z = layerNr * thickness + initial;
+ if (z < minZ) continue;
+ if (layerNr < 0) continue;
+
+ SlicerSegment s;
+ if (p0.z < z && p1.z >= z && p2.z >= z)
+ s = project2D(p0, p2, p1, z);
+ else if (p0.z > z && p1.z < z && p2.z < z)
+ s = project2D(p0, p1, p2, z);
+
+ else if (p1.z < z && p0.z >= z && p2.z >= z)
+ s = project2D(p1, p0, p2, z);
+ else if (p1.z > z && p0.z < z && p2.z < z)
+ s = project2D(p1, p2, p0, z);
+
+ else if (p2.z < z && p1.z >= z && p0.z >= z)
+ s = project2D(p2, p1, p0, z);
+ else if (p2.z > z && p1.z < z && p0.z < z)
+ s = project2D(p2, p0, p1, z);
+ else
+ {
+ //Not all cases create a segment, because a point of a face could create just a dot, and two touching faces
+ // on the slice would create two segments
+ continue;
+ }
+ layers[layerNr].faceToSegmentIndex[i] = layers[layerNr].segmentList.size();
+ s.faceIndex = i;
+ s.addedToPolygon = false;
+ layers[layerNr].segmentList.push_back(s);
+ }
+ }
+
+ for(unsigned int layerNr=0; layerNr<layers.size(); layerNr++)
+ {
+ layers[layerNr].makePolygons(ov, keepNoneClosed, extensiveStitching);
+ }
+}
+
+void Slicer::dumpSegmentsToHTML(const char* filename)
+{
+ float scale = std::max(modelSize.x, modelSize.y) / 1500;
+ FILE* f = fopen(filename, "w");
+ fprintf(f, "<!DOCTYPE html><html><body>\n");
+ for(unsigned int i=0; i<layers.size(); i++)
+ {
+ fprintf(f, "<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\" style='width:%ipx;height:%ipx'>\n", int(modelSize.x / scale), int(modelSize.y / scale));
+ fprintf(f, "<g fill-rule='evenodd' style=\"fill: gray; stroke:black;stroke-width:1\">\n");
+ fprintf(f, "<path d=\"");
+ for(unsigned int j=0; j<layers[i].polygonList.size(); j++)
+ {
+ PolygonRef p = layers[i].polygonList[j];
+ for(unsigned int n=0; n<p.size(); n++)
+ {
+ if (n == 0)
+ fprintf(f, "M");
+ else
+ fprintf(f, "L");
+ fprintf(f, "%f,%f ", float(p[n].X - modelMin.x)/scale, float(p[n].Y - modelMin.y)/scale);
+ }
+ fprintf(f, "Z\n");
+ }
+ fprintf(f, "\"/>");
+ fprintf(f, "</g>\n");
+ for(unsigned int j=0; j<layers[i].openPolygonList.size(); j++)
+ {
+ PolygonRef p = layers[i].openPolygonList[j];
+ if (p.size() < 1) continue;
+ fprintf(f, "<polyline points=\"");
+ for(unsigned int n=0; n<p.size(); n++)
+ {
+ fprintf(f, "%f,%f ", float(p[n].X - modelMin.x)/scale, float(p[n].Y - modelMin.y)/scale);
+ }
+ fprintf(f, "\" style=\"fill: none; stroke:red;stroke-width:1\" />\n");
+ }
+ fprintf(f, "</svg>\n");
+ }
+ fprintf(f, "</body></html>");
+ fclose(f);
+}
diff --git a/slicer.h b/slicer.h
new file mode 100644
index 0000000..376e89a
--- /dev/null
+++ b/slicer.h
@@ -0,0 +1,160 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef SLICER_H
+#define SLICER_H
+
+#include "optimizedModel.h"
+#include "utils/polygon.h"
+/*
+ The Slicer creates layers of polygons from an optimized 3D model.
+ The result of the Slicer is a list of polygons without any order or structure.
+*/
+
+class SlicerSegment
+{
+public:
+ Point start, end;
+ int faceIndex;
+ bool addedToPolygon;
+};
+
+class closePolygonResult
+{ //The result of trying to find a point on a closed polygon line. This gives back the point index, the polygon index, and the point of the connection.
+ //The line on which the point lays is between pointIdx-1 and pointIdx
+public:
+ Point intersectionPoint;
+ int polygonIdx;
+ unsigned int pointIdx;
+};
+class gapCloserResult
+{
+public:
+ int64_t len;
+ int polygonIdx;
+ unsigned int pointIdxA;
+ unsigned int pointIdxB;
+ bool AtoB;
+};
+
+class SlicerLayer
+{
+public:
+ std::vector<SlicerSegment> segmentList;
+ std::map<int, int> faceToSegmentIndex;
+
+ Polygons polygonList;
+ Polygons openPolygonList;
+
+ void makePolygons(OptimizedVolume* ov, bool keepNoneClosed, bool extensiveStitching);
+
+private:
+ gapCloserResult findPolygonGapCloser(Point ip0, Point ip1)
+ {
+ gapCloserResult ret;
+ closePolygonResult c1 = findPolygonPointClosestTo(ip0);
+ closePolygonResult c2 = findPolygonPointClosestTo(ip1);
+ if (c1.polygonIdx < 0 || c1.polygonIdx != c2.polygonIdx)
+ {
+ ret.len = -1;
+ return ret;
+ }
+ ret.polygonIdx = c1.polygonIdx;
+ ret.pointIdxA = c1.pointIdx;
+ ret.pointIdxB = c2.pointIdx;
+ ret.AtoB = true;
+
+ if (ret.pointIdxA == ret.pointIdxB)
+ {
+ //Connection points are on the same line segment.
+ ret.len = vSize(ip0 - ip1);
+ }else{
+ //Find out if we have should go from A to B or the other way around.
+ Point p0 = polygonList[ret.polygonIdx][ret.pointIdxA];
+ int64_t lenA = vSize(p0 - ip0);
+ for(unsigned int i = ret.pointIdxA; i != ret.pointIdxB; i = (i + 1) % polygonList[ret.polygonIdx].size())
+ {
+ Point p1 = polygonList[ret.polygonIdx][i];
+ lenA += vSize(p0 - p1);
+ p0 = p1;
+ }
+ lenA += vSize(p0 - ip1);
+
+ p0 = polygonList[ret.polygonIdx][ret.pointIdxB];
+ int64_t lenB = vSize(p0 - ip1);
+ for(unsigned int i = ret.pointIdxB; i != ret.pointIdxA; i = (i + 1) % polygonList[ret.polygonIdx].size())
+ {
+ Point p1 = polygonList[ret.polygonIdx][i];
+ lenB += vSize(p0 - p1);
+ p0 = p1;
+ }
+ lenB += vSize(p0 - ip0);
+
+ if (lenA < lenB)
+ {
+ ret.AtoB = true;
+ ret.len = lenA;
+ }else{
+ ret.AtoB = false;
+ ret.len = lenB;
+ }
+ }
+ return ret;
+ }
+
+ closePolygonResult findPolygonPointClosestTo(Point input)
+ {
+ closePolygonResult ret;
+ for(unsigned int n=0; n<polygonList.size(); n++)
+ {
+ Point p0 = polygonList[n][polygonList[n].size()-1];
+ for(unsigned int i=0; i<polygonList[n].size(); i++)
+ {
+ Point p1 = polygonList[n][i];
+
+ //Q = A + Normal( B - A ) * ((( B - A ) dot ( P - A )) / VSize( A - B ));
+ Point pDiff = p1 - p0;
+ int64_t lineLength = vSize(pDiff);
+ if (lineLength > 1)
+ {
+ int64_t distOnLine = dot(pDiff, input - p0) / lineLength;
+ if (distOnLine >= 0 && distOnLine <= lineLength)
+ {
+ Point q = p0 + pDiff * distOnLine / lineLength;
+ if (shorterThen(q - input, 100))
+ {
+ ret.intersectionPoint = q;
+ ret.polygonIdx = n;
+ ret.pointIdx = i;
+ return ret;
+ }
+ }
+ }
+ p0 = p1;
+ }
+ }
+ ret.polygonIdx = -1;
+ return ret;
+ }
+};
+
+class Slicer
+{
+public:
+ std::vector<SlicerLayer> layers;
+ Point3 modelSize, modelMin;
+
+ Slicer(OptimizedVolume* ov, int32_t initial, int32_t thickness, bool keepNoneClosed, bool extensiveStitching);
+
+ SlicerSegment project2D(Point3& p0, Point3& p1, Point3& p2, int32_t z) const
+ {
+ SlicerSegment seg;
+ seg.start.X = p0.x + int64_t(p1.x - p0.x) * int64_t(z - p0.z) / int64_t(p1.z - p0.z);
+ seg.start.Y = p0.y + int64_t(p1.y - p0.y) * int64_t(z - p0.z) / int64_t(p1.z - p0.z);
+ seg.end.X = p0.x + int64_t(p2.x - p0.x) * int64_t(z - p0.z) / int64_t(p2.z - p0.z);
+ seg.end.Y = p0.y + int64_t(p2.y - p0.y) * int64_t(z - p0.z) / int64_t(p2.z - p0.z);
+ return seg;
+ }
+
+ void dumpSegmentsToHTML(const char* filename);
+};
+
+#endif//SLICER_H
diff --git a/support.cpp b/support.cpp
new file mode 100644
index 0000000..b0d5feb
--- /dev/null
+++ b/support.cpp
@@ -0,0 +1,188 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "support.h"
+
+template<typename T> inline void swap(T& p0, T& p1)
+{
+ T tmp = p0;
+ p0 = p1;
+ p1 = tmp;
+}
+
+int cmp_SupportPoint(const void* a, const void* b)
+{
+ return ((SupportPoint*)a)->z - ((SupportPoint*)b)->z;
+}
+
+void generateSupportGrid(SupportStorage& storage, OptimizedModel* om, int supportAngle, bool supportEverywhere, int supportXYDistance, int supportZDistance)
+{
+ storage.generated = false;
+ if (supportAngle < 0)
+ return;
+ storage.generated = true;
+
+ storage.gridOffset.X = om->vMin.x;
+ storage.gridOffset.Y = om->vMin.y;
+ storage.gridScale = 200;
+ storage.gridWidth = (om->modelSize.x / storage.gridScale) + 1;
+ storage.gridHeight = (om->modelSize.y / storage.gridScale) + 1;
+ storage.grid = new vector<SupportPoint>[storage.gridWidth * storage.gridHeight];
+ storage.angle = supportAngle;
+ storage.everywhere = supportEverywhere;
+ storage.XYDistance = supportXYDistance;
+ storage.ZDistance = supportZDistance;
+
+ for(unsigned int volumeIdx = 0; volumeIdx < om->volumes.size(); volumeIdx++)
+ {
+ OptimizedVolume* vol = &om->volumes[volumeIdx];
+ for(unsigned int faceIdx = 0; faceIdx < vol->faces.size(); faceIdx++)
+ {
+ OptimizedFace* face = &vol->faces[faceIdx];
+ Point3 v0 = vol->points[face->index[0]].p;
+ Point3 v1 = vol->points[face->index[1]].p;
+ Point3 v2 = vol->points[face->index[2]].p;
+
+ Point3 normal = (v1 - v0).cross(v2 - v0);
+ int32_t normalSize = normal.vSize();
+
+ double cosAngle = fabs(double(normal.z) / double(normalSize));
+
+ v0.x = (v0.x - storage.gridOffset.X) / storage.gridScale;
+ v0.y = (v0.y - storage.gridOffset.Y) / storage.gridScale;
+ v1.x = (v1.x - storage.gridOffset.X) / storage.gridScale;
+ v1.y = (v1.y - storage.gridOffset.Y) / storage.gridScale;
+ v2.x = (v2.x - storage.gridOffset.X) / storage.gridScale;
+ v2.y = (v2.y - storage.gridOffset.Y) / storage.gridScale;
+
+ if (v0.x > v1.x) swap(v0, v1);
+ if (v1.x > v2.x) swap(v1, v2);
+ if (v0.x > v1.x) swap(v0, v1);
+ for(int64_t x=v0.x; x<v1.x; x++)
+ {
+ int64_t y0 = v0.y + (v1.y - v0.y) * (x - v0.x) / (v1.x - v0.x);
+ int64_t y1 = v0.y + (v2.y - v0.y) * (x - v0.x) / (v2.x - v0.x);
+ int64_t z0 = v0.z + (v1.z - v0.z) * (x - v0.x) / (v1.x - v0.x);
+ int64_t z1 = v0.z + (v2.z - v0.z) * (x - v0.x) / (v2.x - v0.x);
+
+ if (y0 > y1) { swap(y0, y1); swap(z0, z1); }
+ for(int64_t y=y0; y<y1; y++)
+ storage.grid[x+y*storage.gridWidth].push_back(SupportPoint(z0 + (z1 - z0) * (y-y0) / (y1-y0), cosAngle));
+ }
+ for(int64_t x=v1.x; x<v2.x; x++)
+ {
+ int64_t y0 = v1.y + (v2.y - v1.y) * (x - v1.x) / (v2.x - v1.x);
+ int64_t y1 = v0.y + (v2.y - v0.y) * (x - v0.x) / (v2.x - v0.x);
+ int64_t z0 = v1.z + (v2.z - v1.z) * (x - v1.x) / (v2.x - v1.x);
+ int64_t z1 = v0.z + (v2.z - v0.z) * (x - v0.x) / (v2.x - v0.x);
+
+ if (y0 > y1) { swap(y0, y1); swap(z0, z1); }
+ for(int64_t y=y0; y<y1; y++)
+ storage.grid[x+y*storage.gridWidth].push_back(SupportPoint(z0 + (z1 - z0) * (y-y0) / (y1-y0), cosAngle));
+ }
+ }
+ }
+
+ for(int32_t x=0; x<storage.gridWidth; x++)
+ {
+ for(int32_t y=0; y<storage.gridHeight; y++)
+ {
+ unsigned int n = x+y*storage.gridWidth;
+ qsort(storage.grid[n].data(), storage.grid[n].size(), sizeof(SupportPoint), cmp_SupportPoint);
+ }
+ }
+ storage.gridOffset.X += storage.gridScale / 2;
+ storage.gridOffset.Y += storage.gridScale / 2;
+}
+
+bool SupportPolyGenerator::needSupportAt(Point p)
+{
+ if (p.X < 1) return false;
+ if (p.Y < 1) return false;
+ if (p.X >= storage.gridWidth - 1) return false;
+ if (p.Y >= storage.gridHeight - 1) return false;
+ if (done[p.X + p.Y * storage.gridWidth]) return false;
+
+ unsigned int n = p.X+p.Y*storage.gridWidth;
+
+ if (everywhere)
+ {
+ bool ok = false;
+ for(unsigned int i=0; i<storage.grid[n].size(); i+=2)
+ {
+ if (storage.grid[n][i].cosAngle >= cosAngle && storage.grid[n][i].z - supportZDistance >= z && (i == 0 || storage.grid[n][i-1].z + supportZDistance < z))
+ {
+ ok = true;
+ break;
+ }
+ }
+ if (!ok) return false;
+ }else{
+ if (storage.grid[n].size() < 1) return false;
+ if (storage.grid[n][0].cosAngle < cosAngle) return false;
+ if (storage.grid[n][0].z - supportZDistance < z) return false;
+ }
+ return true;
+}
+
+void SupportPolyGenerator::lazyFill(Point startPoint)
+{
+ static int nr = 0;
+ nr++;
+ PolygonRef poly = polygons.newPoly();
+ Polygons tmpPolygons;
+ PolygonRef tmpPoly = tmpPolygons.newPoly();
+
+ while(1)
+ {
+ Point p = startPoint;
+ done[p.X + p.Y * storage.gridWidth] = nr;
+ while(needSupportAt(p + Point(1, 0)))
+ {
+ p.X ++;
+ done[p.X + p.Y * storage.gridWidth] = nr;
+ }
+ tmpPoly.add(startPoint * storage.gridScale + storage.gridOffset - Point(storage.gridScale/2, 0));
+ poly.add(p * storage.gridScale + storage.gridOffset);
+ startPoint.Y++;
+ while(!needSupportAt(startPoint) && startPoint.X <= p.X)
+ startPoint.X ++;
+ if (startPoint.X > p.X)
+ {
+ for(unsigned int n=0;n<tmpPoly.size();n++)
+ {
+ poly.add(tmpPoly[tmpPoly.size()-n-1]);
+ }
+ polygons.add(poly);
+ return;
+ }
+ while(needSupportAt(startPoint - Point(1, 0)) && startPoint.X > 1)
+ startPoint.X --;
+ }
+}
+
+SupportPolyGenerator::SupportPolyGenerator(SupportStorage& storage, int32_t z)
+: storage(storage), z(z), everywhere(storage.everywhere)
+{
+ if (!storage.generated)
+ return;
+
+ cosAngle = cos(double(90 - storage.angle) / 180.0 * M_PI) - 0.01;
+ this->supportZDistance = storage.ZDistance;
+
+ done = new int[storage.gridWidth*storage.gridHeight];
+ memset(done, 0, sizeof(int) * storage.gridWidth*storage.gridHeight);
+
+ for(int32_t y=1; y<storage.gridHeight; y++)
+ {
+ for(int32_t x=1; x<storage.gridWidth; x++)
+ {
+ if (!needSupportAt(Point(x, y)) || done[x + y * storage.gridWidth]) continue;
+
+ lazyFill(Point(x, y));
+ }
+ }
+
+ delete done;
+
+ polygons = polygons.offset(storage.XYDistance);
+}
+
diff --git a/support.h b/support.h
new file mode 100644
index 0000000..b5b9e12
--- /dev/null
+++ b/support.h
@@ -0,0 +1,30 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef SUPPORT_H
+#define SUPPORT_H
+
+#include "sliceDataStorage.h"
+#include "optimizedModel.h"
+
+void generateSupportGrid(SupportStorage& storage, OptimizedModel* om, int supportAngle, bool supportEverywhere, int supportXYDistance, int supportZDistance);
+
+class SupportPolyGenerator
+{
+public:
+ Polygons polygons;
+
+private:
+ SupportStorage& storage;
+ double cosAngle;
+ int32_t z;
+ int supportZDistance;
+ bool everywhere;
+ int* done;
+
+ bool needSupportAt(Point p);
+ void lazyFill(Point startPoint);
+
+public:
+ SupportPolyGenerator(SupportStorage& storage, int32_t z);
+};
+
+#endif//SUPPORT_H
diff --git a/timeEstimate.cpp b/timeEstimate.cpp
new file mode 100644
index 0000000..f766b66
--- /dev/null
+++ b/timeEstimate.cpp
@@ -0,0 +1,305 @@
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <algorithm>
+#include "timeEstimate.h"
+
+#define MINIMUM_PLANNER_SPEED 0.05// (mm/sec)
+
+const double max_feedrate[TimeEstimateCalculator::NUM_AXIS] = {600, 600, 40, 25};
+const double minimumfeedrate = 0.01;
+const double acceleration = 5000;
+const double max_acceleration[TimeEstimateCalculator::NUM_AXIS] = {9000,9000,100,10000};
+const double max_xy_jerk = 20.0;
+const double max_z_jerk = 0.4;
+const double max_e_jerk = 5.0;
+
+template<typename T> const T square(const T& a) { return a * a; }
+
+void TimeEstimateCalculator::setPosition(Position newPos)
+{
+ currentPosition = newPos;
+}
+
+void TimeEstimateCalculator::reset()
+{
+ blocks.clear();
+}
+
+// Calculates the maximum allowable speed at this point when you must be able to reach target_velocity using the
+// acceleration within the allotted distance.
+static inline double max_allowable_speed(double acceleration, double target_velocity, double distance)
+{
+ return sqrt(target_velocity*target_velocity-2*acceleration*distance);
+}
+
+// Calculates the distance (not time) it takes to accelerate from initial_rate to target_rate using the given acceleration:
+static inline float estimate_acceleration_distance(float initial_rate, float target_rate, float acceleration)
+{
+ if (acceleration == 0)
+ return 0.0;
+ return (square(target_rate)-square(initial_rate)) / (2.0*acceleration);
+}
+
+// This function gives you the point at which you must start braking (at the rate of -acceleration) if
+// you started at speed initial_rate and accelerated until this point and want to end at the final_rate after
+// a total travel of distance. This can be used to compute the intersection point between acceleration and
+// deceleration in the cases where the trapezoid has no plateau (i.e. never reaches maximum speed)
+static inline double intersection_distance(double initial_rate, double final_rate, double acceleration, double distance)
+{
+ if (acceleration == 0.0)
+ return 0.0;
+ return (2.0*acceleration*distance-square(initial_rate)+square(final_rate)) / (4.0*acceleration);
+}
+
+// This function gives the time it needs to accelerate from an initial speed to reach a final distance.
+static inline double acceleration_time_from_distance(double initial_feedrate, double distance, double acceleration)
+{
+ double discriminant = sqrt(square(initial_feedrate) - 2 * acceleration * -distance);
+ return (-initial_feedrate + discriminant) / acceleration;
+}
+
+// Calculates trapezoid parameters so that the entry- and exit-speed is compensated by the provided factors.
+void TimeEstimateCalculator::calculate_trapezoid_for_block(Block *block, double entry_factor, double exit_factor)
+{
+ double initial_feedrate = block->nominal_feedrate*entry_factor;
+ double final_feedrate = block->nominal_feedrate*exit_factor;
+
+ double acceleration = block->acceleration;
+ double accelerate_distance = estimate_acceleration_distance(initial_feedrate, block->nominal_feedrate, acceleration);
+ double decelerate_distance = estimate_acceleration_distance(block->nominal_feedrate, final_feedrate, -acceleration);
+
+ // Calculate the size of Plateau of Nominal Rate.
+ double plateau_distance = block->distance-accelerate_distance - decelerate_distance;
+
+ // Is the Plateau of Nominal Rate smaller than nothing? That means no cruising, and we will
+ // have to use intersection_distance() to calculate when to abort acceleration and start braking
+ // in order to reach the final_rate exactly at the end of this block.
+ if (plateau_distance < 0)
+ {
+ accelerate_distance = intersection_distance(initial_feedrate, final_feedrate, acceleration, block->distance);
+ accelerate_distance = std::max(accelerate_distance, 0.0); // Check limits due to numerical round-off
+ accelerate_distance = std::min(accelerate_distance, block->distance);//(We can cast here to unsigned, because the above line ensures that we are above zero)
+ plateau_distance = 0;
+ }
+
+ block->accelerate_until = accelerate_distance;
+ block->decelerate_after = accelerate_distance+plateau_distance;
+ block->initial_feedrate = initial_feedrate;
+ block->final_feedrate = final_feedrate;
+}
+
+void TimeEstimateCalculator::plan(Position newPos, double feedrate)
+{
+ Block block;
+ memset(&block, 0, sizeof(block));
+
+ block.maxTravel = 0;
+ for(unsigned int n=0; n<NUM_AXIS; n++)
+ {
+ block.delta[n] = newPos[n] - currentPosition[n];
+ block.absDelta[n] = fabs(block.delta[n]);
+ block.maxTravel = std::max(block.maxTravel, block.absDelta[n]);
+ }
+ if (block.maxTravel <= 0)
+ return;
+ if (feedrate < minimumfeedrate)
+ feedrate = minimumfeedrate;
+ block.distance = sqrtf(square(block.absDelta[0]) + square(block.absDelta[1]) + square(block.absDelta[2]));
+ if (block.distance == 0.0)
+ block.distance = block.absDelta[3];
+ block.nominal_feedrate = feedrate;
+
+ Position current_feedrate;
+ Position current_abs_feedrate;
+ double feedrate_factor = 1.0;
+ for(unsigned int n=0; n<NUM_AXIS; n++)
+ {
+ current_feedrate[n] = block.delta[n] * feedrate / block.distance;
+ current_abs_feedrate[n] = abs(current_feedrate[n]);
+ if (current_abs_feedrate[n] > max_feedrate[n])
+ feedrate_factor = std::min(feedrate_factor, max_feedrate[n] / current_abs_feedrate[n]);
+ }
+ //TODO: XY_FREQUENCY_LIMIT
+
+ if(feedrate_factor < 1.0)
+ {
+ for(unsigned int n=0; n<NUM_AXIS; n++)
+ {
+ current_feedrate[n] *= feedrate_factor;
+ current_abs_feedrate[n] *= feedrate_factor;
+ }
+ block.nominal_feedrate *= feedrate_factor;
+ }
+
+ block.acceleration = acceleration;
+ for(unsigned int n=0; n<NUM_AXIS; n++)
+ {
+ if (block.acceleration * (block.absDelta[n] / block.distance) > max_acceleration[n])
+ block.acceleration = max_acceleration[n];
+ }
+
+ double vmax_junction = max_xy_jerk/2;
+ double vmax_junction_factor = 1.0;
+ if(current_abs_feedrate[Z_AXIS] > max_z_jerk/2)
+ vmax_junction = std::min(vmax_junction, max_z_jerk/2);
+ if(current_abs_feedrate[E_AXIS] > max_e_jerk/2)
+ vmax_junction = std::min(vmax_junction, max_e_jerk/2);
+ vmax_junction = std::min(vmax_junction, block.nominal_feedrate);
+ double safe_speed = vmax_junction;
+
+ if ((blocks.size() > 0) && (previous_nominal_feedrate > 0.0001))
+ {
+ double xy_jerk = sqrt(square(current_feedrate[X_AXIS]-previous_feedrate[X_AXIS])+square(current_feedrate[Y_AXIS]-previous_feedrate[Y_AXIS]));
+ vmax_junction = block.nominal_feedrate;
+ if (xy_jerk > max_xy_jerk) {
+ vmax_junction_factor = (max_xy_jerk/xy_jerk);
+ }
+ if(fabs(current_feedrate[Z_AXIS] - previous_feedrate[Z_AXIS]) > max_z_jerk) {
+ vmax_junction_factor = std::min(vmax_junction_factor, (max_z_jerk/fabs(current_feedrate[Z_AXIS] - previous_feedrate[Z_AXIS])));
+ }
+ if(fabs(current_feedrate[E_AXIS] - previous_feedrate[E_AXIS]) > max_e_jerk) {
+ vmax_junction_factor = std::min(vmax_junction_factor, (max_e_jerk/fabs(current_feedrate[E_AXIS] - previous_feedrate[E_AXIS])));
+ }
+ vmax_junction = std::min(previous_nominal_feedrate, vmax_junction * vmax_junction_factor); // Limit speed to max previous speed
+ }
+
+ block.max_entry_speed = vmax_junction;
+
+ double v_allowable = max_allowable_speed(-block.acceleration, MINIMUM_PLANNER_SPEED, block.distance);
+ block.entry_speed = std::min(vmax_junction, v_allowable);
+ block.nominal_length_flag = block.nominal_feedrate <= v_allowable;
+ block.recalculate_flag = true; // Always calculate trapezoid for new block
+
+ previous_feedrate = current_feedrate;
+ previous_nominal_feedrate = block.nominal_feedrate;
+
+ currentPosition = newPos;
+
+ calculate_trapezoid_for_block(&block, block.entry_speed/block.nominal_feedrate, safe_speed/block.nominal_feedrate);
+
+ blocks.push_back(block);
+}
+
+double TimeEstimateCalculator::calculate()
+{
+ reverse_pass();
+ forward_pass();
+ recalculate_trapezoids();
+
+ double totalTime = 0;
+ for(unsigned int n=0; n<blocks.size(); n++)
+ {
+ double plateau_distance = blocks[n].decelerate_after - blocks[n].accelerate_until;
+
+ totalTime += acceleration_time_from_distance(blocks[n].initial_feedrate, blocks[n].accelerate_until, blocks[n].acceleration);
+ totalTime += plateau_distance / blocks[n].nominal_feedrate;
+ totalTime += acceleration_time_from_distance(blocks[n].final_feedrate, (blocks[n].distance - blocks[n].decelerate_after), blocks[n].acceleration);
+ }
+ return totalTime;
+}
+
+// The kernel called by accelerationPlanner::calculate() when scanning the plan from last to first entry.
+void TimeEstimateCalculator::planner_reverse_pass_kernel(Block *previous, Block *current, Block *next)
+{
+ if(!current || !next)
+ return;
+
+ // If entry speed is already at the maximum entry speed, no need to recheck. Block is cruising.
+ // If not, block in state of acceleration or deceleration. Reset entry speed to maximum and
+ // check for maximum allowable speed reductions to ensure maximum possible planned speed.
+ if (current->entry_speed != current->max_entry_speed)
+ {
+ // If nominal length true, max junction speed is guaranteed to be reached. Only compute
+ // for max allowable speed if block is decelerating and nominal length is false.
+ if ((!current->nominal_length_flag) && (current->max_entry_speed > next->entry_speed))
+ {
+ current->entry_speed = std::min(current->max_entry_speed, max_allowable_speed(-current->acceleration, next->entry_speed, current->distance));
+ } else {
+ current->entry_speed = current->max_entry_speed;
+ }
+ current->recalculate_flag = true;
+ }
+}
+
+void TimeEstimateCalculator::reverse_pass()
+{
+ Block* block[3] = {NULL, NULL, NULL};
+ for(unsigned int n=blocks.size()-1; int(n)>=0; n--)
+ {
+ block[2]= block[1];
+ block[1]= block[0];
+ block[0] = &blocks[n];
+ planner_reverse_pass_kernel(block[0], block[1], block[2]);
+ }
+}
+
+// The kernel called by accelerationPlanner::calculate() when scanning the plan from first to last entry.
+void TimeEstimateCalculator::planner_forward_pass_kernel(Block *previous, Block *current, Block *next)
+{
+ if(!previous)
+ return;
+
+ // If the previous block is an acceleration block, but it is not long enough to complete the
+ // full speed change within the block, we need to adjust the entry speed accordingly. Entry
+ // speeds have already been reset, maximized, and reverse planned by reverse planner.
+ // If nominal length is true, max junction speed is guaranteed to be reached. No need to recheck.
+ if (!previous->nominal_length_flag)
+ {
+ if (previous->entry_speed < current->entry_speed)
+ {
+ double entry_speed = std::min(current->entry_speed, max_allowable_speed(-previous->acceleration,previous->entry_speed,previous->distance) );
+
+ // Check for junction speed change
+ if (current->entry_speed != entry_speed)
+ {
+ current->entry_speed = entry_speed;
+ current->recalculate_flag = true;
+ }
+ }
+ }
+}
+
+void TimeEstimateCalculator::forward_pass()
+{
+ Block* block[3] = {NULL, NULL, NULL};
+ for(unsigned int n=0; n<blocks.size(); n++)
+ {
+ block[0]= block[1];
+ block[1]= block[2];
+ block[2] = &blocks[n];
+ planner_forward_pass_kernel(block[0], block[1], block[2]);
+ }
+ planner_forward_pass_kernel(block[1], block[2], NULL);
+}
+
+// Recalculates the trapezoid speed profiles for all blocks in the plan according to the
+// entry_factor for each junction. Must be called by planner_recalculate() after
+// updating the blocks.
+void TimeEstimateCalculator::recalculate_trapezoids()
+{
+ Block *current;
+ Block *next = NULL;
+
+ for(unsigned int n=0; n<blocks.size(); n--)
+ {
+ current = next;
+ next = &blocks[n];
+ if (current)
+ {
+ // Recalculate if current block entry or exit junction speed has changed.
+ if (current->recalculate_flag || next->recalculate_flag)
+ {
+ // NOTE: Entry and exit factors always > 0 by all previous logic operations.
+ calculate_trapezoid_for_block(current, current->entry_speed/current->nominal_feedrate, next->entry_speed/current->nominal_feedrate);
+ current->recalculate_flag = false; // Reset current only to ensure next trapezoid is computed
+ }
+ }
+ }
+ // Last/newest block in buffer. Exit speed is set with MINIMUM_PLANNER_SPEED. Always recalculated.
+ if(next != NULL)
+ {
+ calculate_trapezoid_for_block(next, next->entry_speed/next->nominal_feedrate, MINIMUM_PLANNER_SPEED/next->nominal_feedrate);
+ next->recalculate_flag = false;
+ }
+}
diff --git a/timeEstimate.h b/timeEstimate.h
new file mode 100644
index 0000000..45513e6
--- /dev/null
+++ b/timeEstimate.h
@@ -0,0 +1,76 @@
+#ifndef TIME_ESTIMATE_H
+#define TIME_ESTIMATE_H
+
+#include <stdint.h>
+#include <vector>
+
+/**
+ The TimeEstimateCalculator class generates a estimate of printing time calculated with acceleration in mind.
+ Some of this code has been addapted from the Marlin sources.
+*/
+
+class TimeEstimateCalculator
+{
+public:
+ const static unsigned int NUM_AXIS = 4;
+ const static unsigned int X_AXIS = 0;
+ const static unsigned int Y_AXIS = 1;
+ const static unsigned int Z_AXIS = 2;
+ const static unsigned int E_AXIS = 3;
+
+ class Position
+ {
+ public:
+ Position() {for(unsigned int n=0;n<NUM_AXIS;n++) axis[n] = 0;}
+ Position(double x, double y, double z, double e) {axis[0] = x;axis[1] = y;axis[2] = z;axis[3] = e;}
+ double axis[NUM_AXIS];
+
+ double& operator[](const int n) { return axis[n]; }
+ };
+
+ class Block
+ {
+ public:
+ bool recalculate_flag;
+
+ double accelerate_until;
+ double decelerate_after;
+ double initial_feedrate;
+ double final_feedrate;
+
+ double entry_speed;
+ double max_entry_speed;
+ bool nominal_length_flag;
+
+ double nominal_feedrate;
+ double maxTravel;
+ double distance;
+ double acceleration;
+ Position delta;
+ Position absDelta;
+ };
+
+private:
+ Position previous_feedrate;
+ double previous_nominal_feedrate;
+
+ Position currentPosition;
+
+ std::vector<Block> blocks;
+public:
+ void setPosition(Position newPos);
+ void plan(Position newPos, double feedRate);
+ void reset();
+
+ double calculate();
+private:
+ void reverse_pass();
+ void forward_pass();
+ void recalculate_trapezoids();
+
+ void calculate_trapezoid_for_block(Block *block, double entry_factor, double exit_factor);
+ void planner_reverse_pass_kernel(Block *previous, Block *current, Block *next);
+ void planner_forward_pass_kernel(Block *previous, Block *current, Block *next);
+};
+
+#endif//TIME_ESTIMATE_H
diff --git a/utils/floatpoint.h b/utils/floatpoint.h
new file mode 100644
index 0000000..df713db
--- /dev/null
+++ b/utils/floatpoint.h
@@ -0,0 +1,83 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef FLOAT_POINT_H
+#define FLOAT_POINT_H
+
+/*
+Floating point 3D points are used during model loading as 3D vectors.
+They represent millimeters in 3D space.
+*/
+
+#include "utils/intpoint.h"
+
+#include <stdint.h>
+#include <math.h>
+
+class FPoint3
+{
+public:
+ double x,y,z;
+ FPoint3() {}
+ FPoint3(double _x, double _y, double _z): x(_x), y(_y), z(_z) {}
+
+ FPoint3 operator+(const FPoint3& p) const { return FPoint3(x+p.x, y+p.y, z+p.z); }
+ FPoint3 operator-(const FPoint3& p) const { return FPoint3(x-p.x, y-p.y, z-p.z); }
+ FPoint3 operator*(const double f) const { return FPoint3(x*f, y*f, z*f); }
+ FPoint3 operator/(const double f) const { return FPoint3(x/f, y/f, z/f); }
+
+ FPoint3& operator += (const FPoint3& p) { x += p.x; y += p.y; z += p.z; return *this; }
+ FPoint3& operator -= (const FPoint3& p) { x -= p.x; y -= p.y; z -= p.z; return *this; }
+
+ bool operator==(FPoint3& p) const { return x==p.x&&y==p.y&&z==p.z; }
+ bool operator!=(FPoint3& p) const { return x!=p.x||y!=p.y||z!=p.z; }
+
+ double max()
+ {
+ if (x > y && x > z) return x;
+ if (y > z) return y;
+ return z;
+ }
+
+ bool testLength(double len)
+ {
+ return vSize2() <= len*len;
+ }
+
+ double vSize2()
+ {
+ return x*x+y*y+z*z;
+ }
+
+ double vSize()
+ {
+ return sqrt(vSize2());
+ }
+};
+
+class FMatrix3x3
+{
+public:
+ double m[3][3];
+
+ FMatrix3x3()
+ {
+ m[0][0] = 1.0;
+ m[1][0] = 0.0;
+ m[2][0] = 0.0;
+ m[0][1] = 0.0;
+ m[1][1] = 1.0;
+ m[2][1] = 0.0;
+ m[0][2] = 0.0;
+ m[1][2] = 0.0;
+ m[2][2] = 1.0;
+ }
+
+ Point3 apply(FPoint3 p)
+ {
+ return Point3(
+ (p.x * m[0][0] + p.y * m[1][0] + p.z * m[2][0]) * 1000.0,
+ (p.x * m[0][1] + p.y * m[1][1] + p.z * m[2][1]) * 1000.0,
+ (p.x * m[0][2] + p.y * m[1][2] + p.z * m[2][2]) * 1000.0);
+ }
+};
+
+#endif//INT_POINT_H
diff --git a/utils/gettime.cpp b/utils/gettime.cpp
new file mode 100644
index 0000000..dc8bc1a
--- /dev/null
+++ b/utils/gettime.cpp
@@ -0,0 +1,14 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include "gettime.h"
+
+TimeKeeper::TimeKeeper()
+{
+ restart();
+}
+
+double TimeKeeper::restart()
+{
+ double ret = getTime() - startTime;
+ startTime = getTime();
+ return ret;
+}
diff --git a/utils/gettime.h b/utils/gettime.h
new file mode 100644
index 0000000..1c1d5c4
--- /dev/null
+++ b/utils/gettime.h
@@ -0,0 +1,33 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef GETTIME_H
+#define GETTIME_H
+
+#ifdef WIN32
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <stddef.h>
+#endif
+
+static inline double getTime()
+{
+#ifdef WIN32
+ return double(GetTickCount()) / 1000.0;
+#else
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return double(tv.tv_sec) + double(tv.tv_usec) / 1000000.0;
+#endif
+}
+
+class TimeKeeper
+{
+private:
+ double startTime;
+public:
+ TimeKeeper();
+
+ double restart();
+};
+
+#endif//GETTIME_H
diff --git a/utils/intpoint.h b/utils/intpoint.h
new file mode 100644
index 0000000..d78b38c
--- /dev/null
+++ b/utils/intpoint.h
@@ -0,0 +1,183 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef INT_POINT_H
+#define INT_POINT_H
+
+/*
+The integer point classes are used as soon as possible and represent microns in 2D or 3D space.
+Integer points are used to avoid floating point rounding errors, and because ClipperLib uses them.
+*/
+#define INLINE static inline
+
+//Include Clipper to get the ClipperLib::IntPoint definition, which we reuse as Point definition.
+#include "clipper/clipper.hpp"
+
+#include <limits.h>
+#include <stdint.h>
+#include <math.h>
+
+class Point3
+{
+public:
+ int32_t x,y,z;
+ Point3() {}
+ Point3(const int32_t _x, const int32_t _y, const int32_t _z): x(_x), y(_y), z(_z) {}
+
+ Point3 operator+(const Point3& p) const { return Point3(x+p.x, y+p.y, z+p.z); }
+ Point3 operator-(const Point3& p) const { return Point3(x-p.x, y-p.y, z-p.z); }
+ Point3 operator/(const int32_t i) const { return Point3(x/i, y/i, z/i); }
+
+ Point3& operator += (const Point3& p) { x += p.x; y += p.y; z += p.z; return *this; }
+ Point3& operator -= (const Point3& p) { x -= p.x; y -= p.y; z -= p.z; return *this; }
+
+ bool operator==(const Point3& p) const { return x==p.x&&y==p.y&&z==p.z; }
+ bool operator!=(const Point3& p) const { return x!=p.x||y!=p.y||z!=p.z; }
+
+ int32_t max()
+ {
+ if (x > y && x > z) return x;
+ if (y > z) return y;
+ return z;
+ }
+
+ bool testLength(int32_t len)
+ {
+ if (x > len || x < -len)
+ return false;
+ if (y > len || y < -len)
+ return false;
+ if (z > len || z < -len)
+ return false;
+ return vSize2() <= len*len;
+ }
+
+ int64_t vSize2()
+ {
+ return int64_t(x)*int64_t(x)+int64_t(y)*int64_t(y)+int64_t(z)*int64_t(z);
+ }
+
+ int32_t vSize()
+ {
+ return sqrt(vSize2());
+ }
+
+ Point3 cross(const Point3& p)
+ {
+ return Point3(
+ y*p.z-z*p.y,
+ z*p.x-x*p.z,
+ x*p.y-y*p.x);
+ }
+};
+
+/* 64bit Points are used mostly troughout the code, these are the 2D points from ClipperLib */
+typedef ClipperLib::IntPoint Point;
+class IntPoint {
+public:
+ int X, Y;
+ Point p() { return Point(X, Y); }
+};
+
+/* Extra operators to make it easier to do math with the 64bit Point objects */
+INLINE Point operator+(const Point& p0, const Point& p1) { return Point(p0.X+p1.X, p0.Y+p1.Y); }
+INLINE Point operator-(const Point& p0, const Point& p1) { return Point(p0.X-p1.X, p0.Y-p1.Y); }
+INLINE Point operator*(const Point& p0, const int32_t i) { return Point(p0.X*i, p0.Y*i); }
+INLINE Point operator/(const Point& p0, const int32_t i) { return Point(p0.X/i, p0.Y/i); }
+
+//Point& operator += (const Point& p) { x += p.x; y += p.y; return *this; }
+//Point& operator -= (const Point& p) { x -= p.x; y -= p.y; return *this; }
+
+INLINE bool operator==(const Point& p0, const Point& p1) { return p0.X==p1.X&&p0.Y==p1.Y; }
+INLINE bool operator!=(const Point& p0, const Point& p1) { return p0.X!=p1.X||p0.Y!=p1.Y; }
+
+INLINE int64_t vSize2(const Point& p0)
+{
+ return p0.X*p0.X+p0.Y*p0.Y;
+}
+INLINE float vSize2f(const Point& p0)
+{
+ return float(p0.X)*float(p0.X)+float(p0.Y)*float(p0.Y);
+}
+
+INLINE bool shorterThen(const Point& p0, int32_t len)
+{
+ if (p0.X > len || p0.X < -len)
+ return false;
+ if (p0.Y > len || p0.Y < -len)
+ return false;
+ return vSize2(p0) <= len*len;
+}
+
+INLINE int32_t vSize(const Point& p0)
+{
+ return sqrt(vSize2(p0));
+}
+
+INLINE double vSizeMM(const Point& p0)
+{
+ double fx = double(p0.X) / 1000.0;
+ double fy = double(p0.Y) / 1000.0;
+ return sqrt(fx*fx+fy*fy);
+}
+
+INLINE Point normal(const Point& p0, int32_t len)
+{
+ int32_t _len = vSize(p0);
+ if (_len < 1)
+ return Point(len, 0);
+ return p0 * len / _len;
+}
+
+INLINE Point crossZ(const Point& p0)
+{
+ return Point(-p0.Y, p0.X);
+}
+INLINE int64_t dot(const Point& p0, const Point& p1)
+{
+ return p0.X * p1.X + p0.Y * p1.Y;
+}
+
+class PointMatrix
+{
+public:
+ double matrix[4];
+
+ PointMatrix()
+ {
+ matrix[0] = 1;
+ matrix[1] = 0;
+ matrix[2] = 0;
+ matrix[3] = 1;
+ }
+
+ PointMatrix(double rotation)
+ {
+ rotation = rotation / 180 * M_PI;
+ matrix[0] = cos(rotation);
+ matrix[1] = -sin(rotation);
+ matrix[2] = -matrix[1];
+ matrix[3] = matrix[0];
+ }
+
+ PointMatrix(const Point p)
+ {
+ matrix[0] = p.X;
+ matrix[1] = p.Y;
+ double f = sqrt((matrix[0] * matrix[0]) + (matrix[1] * matrix[1]));
+ matrix[0] /= f;
+ matrix[1] /= f;
+ matrix[2] = -matrix[1];
+ matrix[3] = matrix[0];
+ }
+
+ Point apply(const Point p) const
+ {
+ return Point(p.X * matrix[0] + p.Y * matrix[1], p.X * matrix[2] + p.Y * matrix[3]);
+ }
+
+ Point unapply(const Point p) const
+ {
+ return Point(p.X * matrix[0] + p.Y * matrix[2], p.X * matrix[1] + p.Y * matrix[3]);
+ }
+};
+
+#endif//INT_POINT_H
diff --git a/utils/logoutput.cpp b/utils/logoutput.cpp
new file mode 100644
index 0000000..7fd5274
--- /dev/null
+++ b/utils/logoutput.cpp
@@ -0,0 +1,36 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "utils/logoutput.h"
+
+int verbose_level;
+
+void logError(const char* fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ vfprintf(stdout, fmt, args);
+ va_end(args);
+ fflush(stdout);
+}
+
+void _log(const char* fmt, ...)
+{
+ if (verbose_level < 1)
+ return;
+
+ va_list args;
+ va_start(args, fmt);
+ vfprintf(stdout, fmt, args);
+ va_end(args);
+ fflush(stdout);
+}
+void logProgress(const char* type, int value, int maxValue)
+{
+ if (verbose_level < 2)
+ return;
+
+ fprintf(stdout, "Progress:%s:%i:%i\n", type, value, maxValue);
+ fflush(stdout);
+}
diff --git a/utils/logoutput.h b/utils/logoutput.h
new file mode 100644
index 0000000..e56e26a
--- /dev/null
+++ b/utils/logoutput.h
@@ -0,0 +1,12 @@
+/** Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License */
+#ifndef LOGOUTPUT_H
+#define LOGOUTPUT_H
+
+extern int verbose_level;
+
+void logError(const char* fmt, ...);
+void _log(const char* fmt, ...);
+#define log _log
+void logProgress(const char* type, int value, int maxValue);
+
+#endif//LOGOUTPUT_H
diff --git a/utils/polygon.h b/utils/polygon.h
new file mode 100644
index 0000000..66794cb
--- /dev/null
+++ b/utils/polygon.h
@@ -0,0 +1,301 @@
+#ifndef UTILS_POLYGON_H
+#define UTILS_POLYGON_H
+
+#include <vector>
+#include <assert.h>
+using std::vector;
+#include "clipper/clipper.hpp"
+
+#include "utils/intpoint.h"
+
+//#define CHECK_POLY_ACCESS
+#ifdef CHECK_POLY_ACCESS
+#define POLY_ASSERT(e) assert(e)
+#else
+#define POLY_ASSERT(e) do {} while(0)
+#endif
+
+class PolygonRef
+{
+ ClipperLib::Path* polygon;
+ PolygonRef()
+ : polygon(NULL)
+ {}
+public:
+ PolygonRef(ClipperLib::Path& polygon)
+ : polygon(&polygon)
+ {}
+
+ unsigned int size() const
+ {
+ return polygon->size();
+ }
+
+ Point operator[] (unsigned int index) const
+ {
+ POLY_ASSERT(index < size());
+ return (*polygon)[index];
+ }
+
+ void add(const Point p)
+ {
+ polygon->push_back(p);
+ }
+
+ void remove(unsigned int index)
+ {
+ POLY_ASSERT(index < size());
+ polygon->erase(polygon->begin() + index);
+ }
+
+ void clear()
+ {
+ polygon->clear();
+ }
+
+ bool orientation() const
+ {
+ return ClipperLib::Orientation(*polygon);
+ }
+
+ void reverse()
+ {
+ ClipperLib::ReversePath(*polygon);
+ }
+
+ int64_t polygonLength() const
+ {
+ int64_t length = 0;
+ Point p0 = (*polygon)[polygon->size()-1];
+ for(unsigned int n=0; n<polygon->size(); n++)
+ {
+ Point p1 = (*polygon)[n];
+ length += vSize(p0 - p1);
+ p0 = p1;
+ }
+ return length;
+ }
+
+ double area() const
+ {
+ return ClipperLib::Area(*polygon);
+ }
+
+ Point centerOfMass() const
+ {
+ double x = 0, y = 0;
+ Point p0 = (*polygon)[polygon->size()-1];
+ for(unsigned int n=0; n<polygon->size(); n++)
+ {
+ Point p1 = (*polygon)[n];
+ double second_factor = (p0.X * p1.Y) - (p1.X * p0.Y);
+
+ x += double(p0.X + p1.X) * second_factor;
+ y += double(p0.Y + p1.Y) * second_factor;
+ p0 = p1;
+ }
+
+ double area = Area(*polygon);
+ x = x / 6 / area;
+ y = y / 6 / area;
+
+ if (x < 0)
+ {
+ x = -x;
+ y = -y;
+ }
+ return Point(x, y);
+ }
+
+ friend class Polygons;
+};
+
+class Polygons
+{
+private:
+ ClipperLib::Paths polygons;
+public:
+ unsigned int size()
+ {
+ return polygons.size();
+ }
+
+ PolygonRef operator[] (unsigned int index)
+ {
+ POLY_ASSERT(index < size());
+ return PolygonRef(polygons[index]);
+ }
+ void remove(unsigned int index)
+ {
+ POLY_ASSERT(index < size());
+ polygons.erase(polygons.begin() + index);
+ }
+ void clear()
+ {
+ polygons.clear();
+ }
+ void add(const PolygonRef& poly)
+ {
+ polygons.push_back(*poly.polygon);
+ }
+ void add(const Polygons& other)
+ {
+ for(unsigned int n=0; n<other.polygons.size(); n++)
+ polygons.push_back(other.polygons[n]);
+ }
+ PolygonRef newPoly()
+ {
+ polygons.push_back(ClipperLib::Path());
+ return PolygonRef(polygons[polygons.size()-1]);
+ }
+
+ Polygons() {}
+ Polygons(const Polygons& other) { polygons = other.polygons; }
+ Polygons& operator=(const Polygons& other) { polygons = other.polygons; return *this; }
+ Polygons difference(const Polygons& other) const
+ {
+ Polygons ret;
+ ClipperLib::Clipper clipper;
+ clipper.AddPaths(polygons, ClipperLib::ptSubject, true);
+ clipper.AddPaths(other.polygons, ClipperLib::ptClip, true);
+ clipper.Execute(ClipperLib::ctDifference, ret.polygons);
+ return ret;
+ }
+ Polygons unionPolygons(const Polygons& other) const
+ {
+ Polygons ret;
+ ClipperLib::Clipper clipper;
+ clipper.AddPaths(polygons, ClipperLib::ptSubject, true);
+ clipper.AddPaths(other.polygons, ClipperLib::ptSubject, true);
+ clipper.Execute(ClipperLib::ctUnion, ret.polygons, ClipperLib::pftNonZero, ClipperLib::pftNonZero);
+ return ret;
+ }
+ Polygons intersection(const Polygons& other) const
+ {
+ Polygons ret;
+ ClipperLib::Clipper clipper;
+ clipper.AddPaths(polygons, ClipperLib::ptSubject, true);
+ clipper.AddPaths(other.polygons, ClipperLib::ptClip, true);
+ clipper.Execute(ClipperLib::ctIntersection, ret.polygons);
+ return ret;
+ }
+ Polygons offset(int distance) const
+ {
+ Polygons ret;
+ ClipperLib::ClipperOffset clipper;
+ clipper.AddPaths(polygons, ClipperLib::jtMiter, ClipperLib::etClosedPolygon);
+ clipper.MiterLimit = 2.0;
+ clipper.Execute(ret.polygons, distance);
+ return ret;
+ }
+ vector<Polygons> splitIntoParts(bool unionAll = false) const
+ {
+ vector<Polygons> ret;
+ ClipperLib::Clipper clipper;
+ ClipperLib::PolyTree resultPolyTree;
+ clipper.AddPaths(polygons, ClipperLib::ptSubject, true);
+ if (unionAll)
+ clipper.Execute(ClipperLib::ctUnion, resultPolyTree, ClipperLib::pftNonZero, ClipperLib::pftNonZero);
+ else
+ clipper.Execute(ClipperLib::ctUnion, resultPolyTree);
+
+ _processPolyTreeNode(&resultPolyTree, ret);
+ return ret;
+ }
+private:
+ void _processPolyTreeNode(ClipperLib::PolyNode* node, vector<Polygons>& ret) const
+ {
+ for(int n=0; n<node->ChildCount(); n++)
+ {
+ ClipperLib::PolyNode* child = node->Childs[n];
+ Polygons polygons;
+ polygons.add(child->Contour);
+ for(int i=0; i<child->ChildCount(); i++)
+ {
+ polygons.add(child->Childs[i]->Contour);
+ _processPolyTreeNode(child->Childs[i], ret);
+ }
+ ret.push_back(polygons);
+ }
+ }
+public:
+ Polygons processEvenOdd() const
+ {
+ Polygons ret;
+ ClipperLib::Clipper clipper;
+ clipper.AddPaths(polygons, ClipperLib::ptSubject, true);
+ clipper.Execute(ClipperLib::ctUnion, ret.polygons);
+ return ret;
+ }
+
+ int64_t polygonLength() const
+ {
+ int64_t length = 0;
+ for(unsigned int i=0; i<polygons.size(); i++)
+ {
+ Point p0 = polygons[i][polygons[i].size()-1];
+ for(unsigned int n=0; n<polygons[i].size(); n++)
+ {
+ Point p1 = polygons[i][n];
+ length += vSize(p0 - p1);
+ p0 = p1;
+ }
+ }
+ return length;
+ }
+
+ void applyMatrix(const PointMatrix& matrix)
+ {
+ for(unsigned int i=0; i<polygons.size(); i++)
+ {
+ for(unsigned int j=0; j<polygons[i].size(); j++)
+ {
+ polygons[i][j] = matrix.apply(polygons[i][j]);
+ }
+ }
+ }
+};
+
+/* Axis aligned boundary box */
+class AABB
+{
+public:
+ Point min, max;
+
+ AABB()
+ : min(LLONG_MIN, LLONG_MIN), max(LLONG_MIN, LLONG_MIN)
+ {
+ }
+ AABB(Polygons polys)
+ : min(LLONG_MIN, LLONG_MIN), max(LLONG_MIN, LLONG_MIN)
+ {
+ calculate(polys);
+ }
+
+ void calculate(Polygons polys)
+ {
+ min = Point(LLONG_MAX, LLONG_MAX);
+ max = Point(LLONG_MIN, LLONG_MIN);
+ for(unsigned int i=0; i<polys.size(); i++)
+ {
+ for(unsigned int j=0; j<polys[i].size(); j++)
+ {
+ if (min.X > polys[i][j].X) min.X = polys[i][j].X;
+ if (min.Y > polys[i][j].Y) min.Y = polys[i][j].Y;
+ if (max.X < polys[i][j].X) max.X = polys[i][j].X;
+ if (max.Y < polys[i][j].Y) max.Y = polys[i][j].Y;
+ }
+ }
+ }
+
+ bool hit(const AABB& other) const
+ {
+ if (max.X < other.min.X) return false;
+ if (min.X > other.max.X) return false;
+ if (max.Y < other.min.Y) return false;
+ if (min.Y > other.max.Y) return false;
+ return true;
+ }
+};
+
+#endif//UTILS_POLYGON_H
diff --git a/utils/polygondebug.h b/utils/polygondebug.h
new file mode 100644
index 0000000..3e19c00
--- /dev/null
+++ b/utils/polygondebug.h
@@ -0,0 +1,75 @@
+#ifndef POLYGON_DEBUG_H
+#define POLYGON_DEBUG_H
+
+#include <stdio.h>
+#include "polygon.h"
+
+class PolygonDebug
+{
+private:
+ Polygons polys;
+ const char* filename;
+public:
+ PolygonDebug(const char* filename)
+ : filename(filename)
+ {
+ }
+
+ PolygonDebug(const char* filename, Polygons polys)
+ : filename(filename)
+ {
+ add(polys);
+ }
+
+ PolygonDebug& add(Polygons polys)
+ {
+ this->polys.add(polys);
+ return *this;
+ }
+
+ ~PolygonDebug()
+ {
+ Point polyMin(INT_MAX, INT_MAX), polyMax(INT_MIN, INT_MIN);
+ for(unsigned int j=0; j<polys.size(); j++)
+ {
+ for(unsigned int n=0; n<polys[j].size(); n++)
+ {
+ polyMin.X = std::min(polyMin.X, polys[j][n].X);
+ polyMin.Y = std::min(polyMin.Y, polys[j][n].Y);
+ polyMax.X = std::max(polyMax.X, polys[j][n].X);
+ polyMax.Y = std::max(polyMax.Y, polys[j][n].Y);
+ }
+ }
+ Point polySize = polyMax - polyMin;
+
+ FILE* f = fopen(filename, "a");
+ fprintf(f, "<!DOCTYPE html><html><body>\n");
+ //for(unsigned int i=0; i<layers.size(); i++)
+ //{
+ float scale = std::max(polySize.X, polySize.Y) / 1500;
+ fprintf(f, "<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\" style='width:%ipx;height:%ipx'>\n", int(polySize.X / scale), int(polySize.Y / scale));
+ fprintf(f, "<g fill-rule='evenodd' style=\"fill: gray; stroke:black;stroke-width:1\">\n");
+ fprintf(f, "<path d=\"");
+ for(unsigned int j=0; j<polys.size(); j++)
+ {
+ PolygonRef p = polys[j];
+ for(unsigned int n=0; n<p.size(); n++)
+ {
+ if (n == 0)
+ fprintf(f, "M");
+ else
+ fprintf(f, "L");
+ fprintf(f, "%f,%f ", float(p[n].X - polyMin.X)/scale, float(p[n].Y - polyMin.Y)/scale);
+ }
+ fprintf(f, "Z\n");
+ }
+ fprintf(f, "\"/>");
+ fprintf(f, "</g>\n");
+ fprintf(f, "</svg>\n");
+ //}
+ fprintf(f, "</body></html>");
+ fclose(f);
+ }
+};
+
+#endif//POLYGON_DEBUG_H