diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..dfe0770424b2a19faf507a501ebfc23be8f54e7b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+# Auto detect text files and perform LF normalization
+* text=auto
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e62ec04cdeece724caeeeeaeb6ae1f6af1bb6b9a
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/algorithm_config.yaml b/algorithm_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc4a6e5d353726e1fd89b80c4c74d5eba92244ce
--- /dev/null
+++ b/algorithm_config.yaml
@@ -0,0 +1,34 @@
+description: Estimates AGBD from SAR backscatter and LiDAR footprint AGBD data
+algo_name: sarbio
+version: 0.1.0
+environment: ubuntu
+repository_url: https://repo.maap-project.org/bnarayanarao/insar_forest_height.git
+docker_url: mas.maap-project.org/root/maap-workspaces/base_images/python:v4.1.0
+queue: maap-dps-gedi_boreal_worker-16vcpu-32gb
+build_command: sarbio/build.sh
+run_command: sarbio/sarbio.sh
+disk_space: 20GB
+inputs:
+  - name: backscatter
+    download: True
+  - name: inc
+    download: True
+  - name: lidar
+    download: True
+  - name: sm
+    download: True
+  - name: rhs
+    download: True
+  - name: lower_limit
+    download: False
+  - name: upper_limit
+    download: False
+  - name: window
+    download: False
+  - name: validation
+    download: False
+  - name: algorithm
+    download: False  
+  - name: overlap
+    download: False 
+
diff --git a/build.sh b/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..480787845f3b325ea153d1c19f7b28b4843506dd
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+set -xeuo pipefail
+
+# Get the base directory of the script
+basedir=$(dirname "$(readlink -f "$0")")
+
+echo installing sarbio environment...
+conda env create --name sabio-env -f ${basedir}/environments/sarbio-env.yml
+
+# Install the maap.py environment
+echo installing maap-py...
+source activate sarbio-env
+git clone --single-branch --branch v4.1.0 https://github.com/MAAP-Project/maap-py.git
+cd maap-py
+pip install -e .
+echo installed maap-py package!
+
+
+# Verify the GDAL installation and other package availability
+conda run --no-capture-output -n sarbio-env python -c '
+try:
+    from osgeo import gdal
+    import warnings
+    import argparse
+    import os
+    import sys
+    # from skimage.util.shape import view_as_blocks
+    from mpl_toolkits.axes_grid1 import make_axes_locatable
+    from scipy.interpolate import griddata
+    from tqdm import tqdm
+    from concurrent.futures import ProcessPoolExecutor, as_completed
+    print("All modules imported successfully.")
+except ImportError as e:
+    print(f"Import error: {e}")
+    sys.exit(1)
+# '
+
diff --git a/environments/.gitkeep b/environments/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/environments/sarbio-env-win.yml b/environments/sarbio-env-win.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f4f2c753eab4de1af1321ba8c91b24600bd471ea
--- /dev/null
+++ b/environments/sarbio-env-win.yml
@@ -0,0 +1,73 @@
+name: sarbio-env
+channels:
+  - conda-forge
+  - defaults
+dependencies:
+  - blosc=1.21.6
+  - bzip2=1.0.8
+  - ca-certificates=2024.9.24
+  - freexl=2.0.0
+  - gdal=3.9.2
+  - geos=3.13.0
+  - geotiff=1.7.3
+  - intel-openmp=2024.2.1
+  - krb5=1.21.3
+  - lerc=4.0.0
+  - libarchive=3.7.4
+  - libblas=3.9.0
+  - libcblas=3.9.0
+  - libcurl=8.10.1
+  - libdeflate=1.22
+  - libexpat=2.6.3
+  - libffi=3.4.4
+  - libgdal-core=3.9.2
+  - libhwloc=2.11.1
+  - libiconv=1.17
+  - libjpeg-turbo=3.0.0
+  - libkml=1.3.0
+  - liblapack=3.9.0
+  - libpng=1.6.44
+  - librttopo=1.1.0
+  - libspatialite=5.1.0
+  - libsqlite=3.46.1
+  - libssh2=1.11.0
+  - libtiff=4.7.0
+  - libwebp-base=1.4.0
+  - libxml2=2.12.7
+  - libzlib=1.3.1
+  - lz4-c=1.9.4
+  - lzo=2.10
+  - minizip=4.0.6
+  - mkl=2024.1.0
+  - numpy=2.1.2
+  - openssl=3.3.2
+  - pcre2=10.44
+  - pip=24.2
+  - proj=9.5.0
+  - pthreads-win32=2.9.1
+  - python=3.10.15
+  - python_abi=3.10
+  - setuptools=75.1.0
+  - snappy=1.2.1
+  - sqlite=3.45.3
+  - tbb=2021.13.0
+  - tk=8.6.13
+  - tzdata=2024b
+  - ucrt=10.0.22621.0
+  - uriparser=0.9.8
+  - vc=14.40
+  - vc14_runtime=14.40.33810
+  - vs2015_runtime=14.40.33810
+  - wheel=0.44.0
+  - xerces-c=3.2.5
+  - xz=5.4.6
+  - zlib=1.3.1
+  - zstd=1.5.6
+  - pip:
+      - colorama==0.4.6
+      - empatches==0.2.3
+      - packaging==24.1
+      - python-dateutil==2.9.0.post0
+      - pytz==2024.2
+      - six==1.16.0
+
diff --git a/environments/sarbio-env.yml b/environments/sarbio-env.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5868b8e9af7b6e95fa3f2834d45b74958ba95dc6
--- /dev/null
+++ b/environments/sarbio-env.yml
@@ -0,0 +1,105 @@
+name: sarbio-env
+channels:
+  - conda-forge
+dependencies:
+  - _libgcc_mutex=0.1
+  - _openmp_mutex=4.5
+  - blosc=1.21.6
+  - bzip2=1.0.8
+  - c-ares=1.34.1
+  - ca-certificates=2024.8.30
+  - freexl=2.0.0
+  - gdal=3.9.2
+  - geos=3.13.0
+  - geotiff=1.7.3
+  - giflib=5.2.2
+  - icu=75.1
+  - json-c=0.18
+  - keyutils=1.6.1
+  - krb5=1.21.3
+  - ld_impl_linux-64=2.43
+  - lerc=4.0.0
+  - libarchive=3.7.4
+  - libblas=3.9.0
+  - libcblas=3.9.0
+  - libcurl=8.10.1
+  - libdeflate=1.22
+  - libedit=3.1.20191231
+  - libev=4.33
+  - libexpat=2.6.3
+  - libffi=3.4.2
+  - libgcc=14.1.0
+  - libgcc-ng=14.1.0
+  - libgdal-core=3.9.2
+  - libgfortran=14.1.0
+  - libgfortran-ng=14.1.0
+  - libgfortran5=14.1.0
+  - libgomp=14.1.0
+  - libiconv=1.17
+  - libjpeg-turbo=3.0.0
+  - libkml=1.3.0
+  - liblapack=3.9.0
+  - libnghttp2=1.58.0
+  - libnsl=2.0.1
+  - libopenblas=0.3.27
+  - libpng=1.6.44
+  - librttopo=1.1.0
+  - libspatialite=5.1.0
+  - libsqlite=3.46.1
+  - libssh2=1.11.0
+  - libstdcxx=14.1.0
+  - libstdcxx-ng=14.1.0
+  - libtiff=4.7.0
+  - libuuid=2.38.1
+  - libwebp-base=1.4.0
+  - libxcrypt=4.4.36
+  - libxml2=2.12.7
+  - libzlib=1.3.1
+  - lz4-c=1.9.4
+  - lzo=2.10
+  - minizip=4.0.7
+  - ncurses=6.5
+  - numpy=2.1.2
+  - openssl=3.3.2
+  - pcre2=10.44
+  - pip=24.2
+  - proj=9.5.0
+  - python=3.10.15
+  - python_abi=3.10
+  - readline=8.2
+  - setuptools=75.1.0
+  - snappy=1.2.1
+  - sqlite=3.46.1
+  - tk=8.6.13
+  - uriparser=0.9.8
+  - wheel=0.44.0
+  - xerces-c=3.2.5
+  - xz=5.2.6
+  - zlib=1.3.1
+  - zstd=1.5.6
+  - pip:
+      - contourpy==1.3.0
+      - cycler==0.12.1
+      - empatches==0.2.3
+      - fonttools==4.54.1
+      - imageio==2.35.1
+      - joblib==1.4.2
+      - kiwisolver==1.4.7
+      - lazy-loader==0.4
+      - matplotlib==3.9.2
+      - networkx==3.4.1
+      - packaging==24.1
+      - pandas==2.2.3
+      - pillow==10.4.0
+      - pyparsing==3.1.4
+      - python-dateutil==2.9.0.post0
+      - pytz==2024.2
+      - scikit-image==0.24.0
+      - scikit-learn==1.5.2
+      - scipy==1.14.1
+      - six==1.16.0
+      - threadpoolctl==3.5.0
+      - tifffile==2024.9.20
+      - tqdm==4.66.5
+      - tzdata==2024.2
+
diff --git a/sarbio.sh b/sarbio.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e5682d53911006b6ca9fc8841b3689742a24722e
--- /dev/null
+++ b/sarbio.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -xuo pipefail
+
+basedir=$(dirname "$(readlink -f "$0")")
+
+source activate sarbio-env
+
+sarbio_py="python ${basedir}/src/sarbio/sarbio.py"
+
+# Ensure there is at least one file of each type
+backscatterFile=$(ls input/*_hv_dB.tif 2>/dev/null || { echo "No backscatter File found"; exit 1; })
+incFile=$(ls input/*_inc_DN.tif 2>/dev/null || { echo "No  incFile found"; exit 1; })
+lidarFile=$(ls input/*_AGBD.tif 2>/dev/null || { echo "No lidarFile found"; exit 1; })
+smFile=$(ls input/*_SM.tif 2>/dev/null || { echo "No soil moisture files found"; exit 1; })
+rhsFile=$(ls input/*_roughness.tif 2>/dev/null || { echo "No roughness files found"; exit 1; })
+
+
+# Initialize options array
+options=()
+
+# Add options based on provided arguments
+[[ "${1:-}" != "-" ]] && options+=("--lower_limit" "${1}")
+[[ "${2:-}" != "-" ]] && options+=("--upper_limit" "${2}")
+[[ "${3:-}" != "-" ]] && options+=("--window" "${3}")
+[[ "${4:-}" != "-" ]] && options+=("--validation" "${4}")
+[[ "${5:-}" != "-" ]] && options+=("--algorithm" "${5}")
+[[ "${6:-}" != "-" ]] && options+=("--overlap" "${6}")
+# [[ "${7:-}" != "-" ]] && options+=("--filter" "${7}")
+
+# Run the Python script with the determined options
+${sarbio_py} --backscatter "${backscatterFile}" --inc "${incFile}" --lidar "${lidarFile}" --sm "${smFile}" --rhs "${rhsFile}" "${options[@]}"
\ No newline at end of file
diff --git a/src/sarbio/.gitignore b/src/sarbio/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..5a9391e2d4046adfeefba3f541e7dabcb0a983e2
--- /dev/null
+++ b/src/sarbio/.gitignore
@@ -0,0 +1,5 @@
+# Ignore Python bytecode files
+*.pyc
+
+# Ignore __pycache__ directories
+__pycache__/
diff --git a/src/sarbio/algo.py b/src/sarbio/algo.py
new file mode 100644
index 0000000000000000000000000000000000000000..76e6a40f8534bc7b8219b1d066c5346ba450e7f7
--- /dev/null
+++ b/src/sarbio/algo.py
@@ -0,0 +1,550 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 27 14:40:29 2023
+
+@author: Narayanarao
+"""
+
+
+import argparse,os,sys,warnings,time
+
+import numpy as np, pandas as pd  
+from osgeo import gdal
+from scipy.stats import pearsonr
+from tqdm import tqdm
+from concurrent.futures import ProcessPoolExecutor, as_completed
+import concurrent.futures
+import os
+import scipy
+from concurrent.futures import ThreadPoolExecutor
+import gc
+from utils import blockshaped,unblockshaped
+from scipy.optimize import differential_evolution
+
+def rmse(predictions, targets):
+    return np.sqrt(((predictions - targets) ** 2).mean())
+
+#################################################
+#################################################
+
+def surf_ref(er,inc,k,s):
+    RH = (np.cos(inc)-np.sqrt(er-(np.sin(inc)*np.sin(inc))))/(np.cos(inc)+np.sqrt(er-(np.sin(inc)*np.sin(inc))))
+    RV = (er*np.cos(inc)-np.sqrt(er-(np.sin(inc)*np.sin(inc))))/(er*np.cos(inc)+np.sqrt(er-(np.sin(inc)*np.sin(inc))))
+    return np.abs(RH*np.conjugate(RV))*np.exp(-1*k*k*s*s*np.cos(inc)*np.cos(inc))
+    
+def surf_scattering(er,inc,ks):
+    RH = (np.cos(inc)-np.sqrt(er-(np.sin(inc)*np.sin(inc))))/(np.cos(inc)+np.sqrt(er-(np.sin(inc)*np.sin(inc))))
+    RV = (er*np.cos(inc)-np.sqrt(er-(np.sin(inc)*np.sin(inc))))/(er*np.cos(inc)+np.sqrt(er-(np.sin(inc)*np.sin(inc))))
+    
+    gama0 = np.abs((1-np.sqrt(er))/(1+np.sqrt(er)))**2
+    v = 1-((2*inc)/np.pi)**(1/(3*gama0))
+    u = 0.23*np.sqrt(gama0)*(1-np.exp(-1*ks))
+    g = 0.7*(1-np.exp(-0.65*ks)**(1.8))
+    
+    sHH = g*np.sqrt(v)*(np.cos(inc)**3)*(np.abs(RH)**2+np.abs(RV)**2)
+    sVV = (g/np.sqrt(v))*(np.cos(inc)**3)*(np.abs(RH)**2+np.abs(RV)**2)
+    sHV = u*sVV
+    
+    return sHH,sVV,sHV
+
+def NISAR_bio_HV(x_data,A,B,C,alpha,beta,gama):
+    biomass,er,inc,k,s = x_data
+    sHH,sVV,sHV = surf_scattering(er,inc,k*s)
+
+
+    # A,B,C,alpha,beta,gama = .013,0.00195,.0047,0.11,0.9,0.23
+    tau2 = np.exp(-(B*biomass**beta)/(np.cos(inc)))
+    sigma_vol = A*(biomass**alpha)*np.cos(inc)*(1-tau2)
+    sigma_vol_surf = C*surf_ref(er,inc,k,s)*tau2*biomass**gama
+    sigma_surf = sHV*tau2
+    return(sigma_vol + sigma_vol_surf + sigma_surf)
+
+def NISAR_bio_HH(x_data,A,B,C,alpha,beta,gama):
+    biomass,er,inc,k,s = x_data
+    sHH,sVV,sHV = surf_scattering(er,inc,k*s)
+
+
+    # A,B,C,alpha,beta,gama = .013,0.00195,.0047,0.11,0.9,0.23
+    tau2 = np.exp(-(B*biomass**beta)/(np.cos(inc)))
+    sigma_vol = A*(biomass**alpha)*np.cos(inc)*(1-tau2)
+    sigma_vol_surf = C*surf_ref(er,inc,k,s)*tau2*biomass**gama
+    sigma_surf = sHH*tau2
+    return(sigma_vol + sigma_vol_surf + sigma_surf)
+
+
+#################################################
+#################################################
+
+def process_inv_window(win, temp_sig, temp_inc,  temp_er, temp_s, temp_lidar, A,B,C,alpha,beta,gama,args):
+    outData = inversion(temp_sig, temp_inc, temp_er, temp_s, temp_lidar, A,B,C,alpha,beta,gama,args.agbdl, args.agbdg)
+    return outData[0],outData[1],outData[2]
+
+
+def process_cal_window(win, temp_sig, temp_inc,  temp_er, temp_s, temp_lidar, lidar_Nsamples, args):
+    
+    
+    if np.isnan(temp_sig).all():
+        # print(win,"all nan") 
+        parm = [np.nan,np.nan, np.nan, np.nan, np.nan, np.nan   ]
+    elif np.isnan(temp_lidar).all() and np.any(~np.isnan(temp_sig)) and lidar_Nsamples<500:
+        # print(win,"lidar nan")
+        parm = [0.013,0.00195,0.0047, 0.11,0.9,0.23] 
+    elif np.isnan(temp_lidar).all() and np.any(~np.isnan(temp_sig)) and lidar_Nsamples>500:
+        # print(win,"lidar nan")
+        parm = [np.nan,np.nan, np.nan, np.nan, np.nan, np.nan   ]
+    else:
+        # print(win,"not nan")
+        parm = calibrate(temp_sig, temp_inc, temp_er, temp_s, temp_lidar, args.agbdl, args.agbdg)
+
+    # print(parm[0:6])
+    # mask = temp_lidar.copy()
+    # mask[~np.isnan(mask)] = 1 
+
+
+    A = np.full((args.window_size, args.window_size), parm[0]) 
+    B = np.full((args.window_size, args.window_size), parm[1]) 
+    C = np.full((args.window_size, args.window_size), parm[2]) 
+    alpha = np.full((args.window_size, args.window_size), parm[3]) 
+    beta = np.full((args.window_size, args.window_size), parm[4]) 
+    gama = np.full((args.window_size, args.window_size), parm[5]) 
+
+    # rmse_hv = np.full((args.window_size, args.window_size), parm[6]) 
+    # r_hv = np.full((args.window_size, args.window_size), parm[7]) 
+
+    # rmse_agbd = np.full((args.window_size, args.window_size), parm[8]) 
+    # r_agbd = np.full((args.window_size, args.window_size), parm[9]) 
+    
+    count = np.full((args.window_size, args.window_size), 
+                    np.count_nonzero(~np.isnan(temp_lidar)))
+    # rmse_est = count
+    # agbd_est = rmse_hv.copy()
+    
+    
+    # biom_est, error_list, est_sig = parm[10],parm[11],parm[12]
+    
+    return A, B,C,alpha,beta,gama, count
+
+#################################################
+##################################################
+
+def inversion(temp_sig, temp_inc, temp_er, temp_s, temp_agbd, A,B,C,alpha,beta,gama,agbdl, agbdg):
+
+        biom_step = np.arange(agbdl,agbdg+.1,.1).astype(np.float32)
+        
+        dim = np.shape(temp_inc)
+        k = np.repeat(2*np.pi/.23,np.size(temp_inc.flatten()))
+
+        slices = 10
+        rPad = len(temp_inc.flatten()) %slices
+        # print('pad:', rPad)
+        k = np.repeat(2*np.pi/.23,np.size(temp_inc.flatten()))
+        
+        # print(np.nanmean(temp_sig),np.nanmean(sigma_model))
+        if rPad != 0:
+            inc_ = np.pad(temp_inc.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            sigma_vh_ = np.pad(temp_sig.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            er_ = np.pad(temp_er.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            s_ = np.pad(temp_s.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            k_ = np.pad(k.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            A_ = np.pad(A.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            B_ = np.pad(B.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            C_ = np.pad(C.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            alpha_ = np.pad(alpha.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            beta_ = np.pad(beta.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            gama_ = np.pad(gama.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+            
+        else:
+            inc_ = (temp_inc.flatten()).copy()
+            sigma_vh_ = (temp_sig.flatten()).copy()
+            er_ = temp_er.flatten().copy()
+            s_ = temp_s.flatten().copy()
+            k_ = temp_s.flatten().copy()
+            A_ = A.flatten().copy()
+            B_ = B.flatten().copy()
+            C_ = C.flatten().copy()
+            alpha_ = alpha.flatten().copy()
+            beta_ = beta.flatten().copy()
+            gama_ = gama.flatten().copy()
+
+        inc_ = np.array_split(inc_, slices)
+        sigma_vh_ = np.array_split(sigma_vh_, slices)
+        er_ = np.array_split(er_, slices)
+        s_ = np.array_split(s_, slices)
+        k_ = np.array_split(k_, slices)
+        A_ = np.array_split(A_, slices)
+        B_ = np.array_split(B_, slices)
+        C_ = np.array_split(C_, slices)
+        alpha_ = np.array_split(alpha_, slices)
+        beta_ = np.array_split(beta_, slices)
+        gama_ = np.array_split(gama_, slices)
+        
+        biom_est=[]
+        error_list=[]
+        est_sig = []
+        for i in range(np.shape(inc_)[0]):
+            inc_in = np.tile(inc_[i], (np.size(biom_step), 1)).T
+            sigvh_in = np.tile(sigma_vh_[i], (np.size(biom_step), 1)).T
+            er_in = np.tile(er_[i], (np.size(biom_step), 1)).T
+            k_in = np.tile(k_[i], (np.size(biom_step), 1)).T
+            s_in = np.tile(s_[i], (np.size(biom_step), 1)).T
+            A_in = np.tile(A_[i], (np.size(biom_step), 1)).T
+            B_in = np.tile(B_[i], (np.size(biom_step), 1)).T
+            C_in = np.tile(C_[i], (np.size(biom_step), 1)).T
+            alpha_in = np.tile(alpha_[i], (np.size(biom_step), 1)).T
+            beta_in = np.tile(beta_[i], (np.size(biom_step), 1)).T
+            gama_in = np.tile(gama_[i], (np.size(biom_step), 1)).T
+
+            bio_sim =  np.tile(biom_step, (np.size(inc_[i].flatten()), 1))
+            sigvh_sim = NISAR_bio_HV((bio_sim,er_in,inc_in,k_in,s_in),A_in,B_in,C_in,alpha_in,beta_in,gama_in)
+            
+            error = np.abs(sigvh_in-sigvh_sim)
+            ind1 = np.arange(len(bio_sim))
+            ind2 = np.argmin(error, axis=1)
+            result = bio_sim[ind1, ind2]
+            biom_est.append(result)
+            error_list.append(error[ind1, ind2])
+            est_sig.append(sigvh_sim[ind1, ind2])
+        
+        biom_est = np.concatenate(biom_est)
+        if rPad>0:
+            biom_est = biom_est[:-(slices-rPad)]
+        
+        sigma_vh = np.concatenate(sigma_vh_)
+        if rPad>0:
+            sigma_vh = sigma_vh[:-(slices-rPad)]
+        
+        error_list = np.concatenate(error_list)
+        if rPad>0:
+            error_list = error_list[:-(slices-rPad)]
+        
+        est_sig = np.concatenate(est_sig)
+        if rPad>0:
+            est_sig = est_sig[:-(slices-rPad)]
+        
+        # print(biom_est.shape,temp_agbd.shape)
+        biom_est = biom_est.reshape(dim)
+        error_list = error_list.reshape(dim)
+        est_sig = est_sig.reshape(dim)
+        
+        return biom_est, error_list, est_sig
+
+    
+
+def calibrate(temp_sig, temp_inc, temp_er, temp_s, temp_agbd, agbdl, agbdg):
+    # try:
+        def error(params):
+            error = np.sqrt(np.sum((sigma_vh - NISAR_bio_HV((biomass,er,inc,k,s), *params)) ** 2).mean())
+            return error
+        def gen_IP():
+            # range of initial guess
+            parameterBounds = []
+            parameterBounds.append([0,1])
+            parameterBounds.append([0,1])
+            parameterBounds.append([0,1]) 
+            parameterBounds.append([0,1]) 
+            parameterBounds.append([0,1]) 
+            parameterBounds.append([0,1]) 
+            result = differential_evolution(error, parameterBounds, strategy='best1bin',\
+                                            polish=True,seed=2,init='latinhypercube')
+            return result.x
+        # temp_sig = cor.copy()
+        # temp_gedi = gedi.copy()
+        nn = np.count_nonzero(~np.isnan(temp_sig))
+        result = []
+
+        inc = temp_inc.copy()
+        sigma_vh = temp_sig.copy()
+        sigma_vh[sigma_vh==0]=np.nan
+        temp_agbd[temp_agbd<=agbdl]=np.nan
+        temp_agbd[temp_agbd>=agbdg]=np.nan
+
+        rows,cols = sigma_vh.shape
+        xy = np.vstack([sigma_vh.flatten(), temp_agbd.flatten(),inc.flatten(),temp_er.flatten(),temp_s.flatten()]).T
+        xy_data = xy[~np.isnan(xy).any(axis=1)]
+        del xy
+        # 
+        sigma_vh,biomass,inc,er,s = xy_data[:,0], xy_data[:,1],xy_data[:,2],xy_data[:,3],xy_data[:,4]
+        # print(xy_data.shape)
+
+        # sigma_vh,biomass,inc = xy[0:100,0], xy[0:100,1],xy[0:100,2]
+
+        # er = np.repeat(8,np.size(sigma_vh))
+        k = np.repeat(2*np.pi/.23,np.size(sigma_vh))
+        # s = np.repeat(0.03,np.size(sigma_vh))
+
+        # # generate initial parameter values
+        # initial_parameters = gen_IP()
+        # # initial_parameters = gen_IP_parallel()
+
+        initial_parameters = np.array([0.013,0.00195,0.0047, 0.11,0.9,0.23])
+        try:
+                nlfit, nlpcov = scipy.optimize.curve_fit(NISAR_bio_HV,(biomass,er,inc,k,s),sigma_vh,      
+                                                        initial_parameters, 
+                                                        #  sigma=np.repeat(3,np.size(sigma_vh)), \
+                                                                bounds=((0,0,0,0,0,0),(1,1,1,1,1,1)),\
+                                                            method='dogbox', maxfev=100000,\
+                                                            )
+        except:
+                nlfit = np.array([0.013,0.00195,0.0047, 0.11,0.9,0.23])
+        
+        if np.all(nlfit == 0):
+            nlfit[:] = np.nan
+        # # print(nlfit)        
+        # sigma_model = NISAR_bio_HV((biomass,er,inc,k,s),nlfit[0],nlfit[1],nlfit[2],nlfit[3],nlfit[4],nlfit[5])
+
+        # try:
+        #     x,y = sigma_vh, sigma_model
+        #     xy = np.vstack([x,y]).T
+        #     del x,y
+        #     xy = xy[~np.isnan(xy).any(axis=1)]
+        #     x,y = xy[:,0], xy[:,1]
+        #     del xy
+
+        #     corr_c, _ = pearsonr(x, y)
+        #     RMSE_c = rmse(x,y) 
+        # except:
+        #     corr_c = 0
+        #     RMSE_c = 999
+        
+        # ################################################################
+        
+        
+        
+        # biom_step = np.arange(agbdl,agbdg+1,1).astype(np.float32)
+
+        # slices = 10
+        # rPad = len(temp_inc.flatten()) %slices
+        # # print('pad:', rPad)
+        # k = np.repeat(2*np.pi/.23,np.size(temp_inc.flatten()))
+        
+        # # print(np.nanmean(temp_sig),np.nanmean(sigma_model))
+        # if rPad != 0:
+        #     inc_ = np.pad(temp_inc.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+        #     sigma_vh_ = np.pad(temp_sig.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+        #     er_ = np.pad(temp_er.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+        #     s_ = np.pad(temp_s.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+        #     k_ = np.pad(k.flatten(), (0, slices - rPad), mode='constant', constant_values=np.nan)
+        # else:
+        #     inc_ = (temp_inc.flatten()).copy()
+        #     sigma_vh_ = (temp_sig.flatten()).copy()
+        #     er_ = temp_er.flatten().copy()
+        #     s_ = temp_s.flatten().copy()
+        #     k_ = temp_s.flatten().copy()
+
+
+        # inc_ = np.array_split(inc_, slices)
+        # sigma_vh_ = np.array_split(sigma_vh_, slices)
+        # er_ = np.array_split(er_, slices)
+        # s_ = np.array_split(s_, slices)
+        # k_ = np.array_split(k_, slices)
+        
+        # biom_est=[]
+        # error_list=[]
+        # est_sig = []
+        # for i in range(np.shape(inc_)[0]):
+        #     inc_in = np.tile(inc_[i], (np.size(biom_step), 1)).T
+        #     sigvh_in = np.tile(sigma_vh_[i], (np.size(biom_step), 1)).T
+        #     er_in = np.tile(er_[i], (np.size(biom_step), 1)).T
+        #     k_in = np.tile(k_[i], (np.size(biom_step), 1)).T
+        #     s_in = np.tile(s_[i], (np.size(biom_step), 1)).T
+
+        #     bio_sim =  np.tile(biom_step, (np.size(inc_[i].flatten()), 1))
+        #     sigvh_sim = NISAR_bio_HV((bio_sim,er_in,inc_in,k_in,s_in),*nlfit)
+            
+        #     error = np.abs(sigvh_in-sigvh_sim)
+        #     ind1 = np.arange(len(bio_sim))
+        #     ind2 = np.argmin(error, axis=1)
+        #     result = bio_sim[ind1, ind2]
+        #     biom_est.append(result)
+        #     error_list.append(error[ind1, ind2])
+        #     est_sig.append(sigvh_sim[ind1, ind2])
+        
+        # biom_est = np.concatenate(biom_est)
+        # if rPad>0:
+        #     biom_est = biom_est[:-(slices-rPad)]
+        
+        # sigma_vh = np.concatenate(sigma_vh_)
+        # if rPad>0:
+        #     sigma_vh = sigma_vh[:-(slices-rPad)]
+        
+        # error_list = np.concatenate(error_list)
+        # if rPad>0:
+        #     error_list = error_list[:-(slices-rPad)]
+        
+        # est_sig = np.concatenate(est_sig)
+        # if rPad>0:
+        #     est_sig = est_sig[:-(slices-rPad)]
+        
+        # # print(biom_est.shape,temp_agbd.shape)
+        # biom_est = biom_est.reshape(temp_agbd.shape)
+        # error_list = error_list.reshape(temp_agbd.shape)
+        # est_sig = est_sig.reshape(temp_agbd.shape)
+        
+        
+        
+        # try:
+        #     x,y = temp_agbd.flatten(), biom_est
+        #     xy = np.vstack([x,y]).T
+        #     del x,y
+        #     xy = xy[~np.isnan(xy).any(axis=1)]
+        #     x,y = xy[:,0], xy[:,1]
+        #     del xy
+
+        #     corr_agbd, _ = pearsonr(x, y)
+        #     RMSE_agbd = rmse(x,y) 
+        # except: 
+        #     corr_agbd = 0
+        #     RMSE_agbd = 999
+               
+        return nlfit
+        # return nlfit[0],nlfit[1],nlfit[2],nlfit[3],nlfit[4],nlfit[5]
+
+    # except Exception as e:
+    #     print(f"Exception in cal_: {e}")
+    #     return [0.013,0.00195,0.0047, 0.11,0.9,0.23,999,0,999,0]
+
+def process_block(i, j, cohArray, lidarArray, initial_ws, agbdl, agbdg, parm_):
+    rows, cols = cohArray.shape
+    S = initial_ws
+    start_i = max(i - S // 2, 0)
+    end_i = min(i + S // 2, rows)
+    start_j = max(j - S // 2, 0)
+    end_j = min(j + S // 2, cols)
+
+    while True:
+        lidarBlock = lidarArray[start_i:end_i, start_j:end_j]
+        cohBlock = cohArray[start_i:end_i, start_j:end_j]
+
+        if np.count_nonzero(~np.isnan(cohBlock)) <5:
+            # print('no coh')
+            count = np.count_nonzero(~np.isnan(lidarBlock))
+            s_parm = 0
+            c_parm = 0
+            rmse_parm = 0
+            ht = np.zeros(cohBlock.shape)
+            # print(lidarBlock.shape)
+            del lidarBlock, cohBlock,lidarArray, cohArray
+            gc.collect()
+            return start_i, end_i, start_j, end_j, s_parm, c_parm, rmse_parm, ht, count
+        
+        # elif (np.isfinite(lidarBlock).any() and np.count_nonzero(~np.isnan(lidarBlock)) > 1) or max(lidarBlock.shape)>512:
+        elif (np.isfinite(lidarBlock).any() and np.count_nonzero(~np.isnan(lidarBlock)) > 3) or max(lidarBlock.shape)>512:
+
+            parm = cal_(cohBlock, lidarBlock, agbdl, agbdg)
+            mask = np.where(~np.isnan(lidarBlock), 1, 0)
+
+            # if np.all(np.array(parm) == 0):
+            #     parm = parm_.copy()
+            #     del parm_
+            # if lidarBlock.shape[0]*lidarBlock.shape[1]>initial_ws*initial_ws:
+            #     mask[np.shape(mask)[0]//2,np.shape(mask)[1]//2]=1
+            #     # print('blank_blocks',lidarBlock.shape,parm)
+            #     # np.fill_diagonal(mask, 1)
+            #     # mask = np.flipud(mask)
+            #     # np.fill_diagonal(mask, 1)
+
+            s_parm = np.full(lidarBlock.shape, parm[1]) 
+            c_parm = np.full(lidarBlock.shape, parm[2]) 
+            rmse_parm = np.full(lidarBlock.shape, parm[4])
+            count = np.count_nonzero(~np.isnan(lidarBlock))
+
+            gama = cohBlock / parm[1]
+            ht = arc_sinc(gama, parm[2]) * mask
+            # ht = arc_sinc_fast(gama, parm[2]) * mask
+            # print(lidarBlock.shape)
+            del lidarBlock, cohBlock,lidarArray, cohArray,parm,mask,gama
+            gc.collect()
+            return start_i, end_i, start_j, end_j, s_parm, c_parm, rmse_parm, ht, count
+        else:
+            S += 2
+            start_i = max(i - S // 2, 0)
+            end_i = min(i + S // 2, rows)
+            start_j = max(j - S // 2, 0)
+            end_j = min(j + S // 2, cols)
+
+            if start_i == 0 and end_i == rows and start_j == 0 and end_j == cols:
+                print(f"Unable to find finite block at position ({i}, {j}).")
+                return start_i, end_i, start_j, end_j, np.zeros_like(lidarBlock), np.zeros_like(lidarBlock), 0, np.zeros_like(lidarBlock), 0
+
+## blocks (windows) are processed in parallel bacthes
+def dynamicWindow(cohArray, lidarArray, initial_ws, agbdl, agbdg, batch_size=10):
+    rows, cols = cohArray.shape
+    s_parm = np.zeros((rows, cols), dtype=np.float32)
+    c_parm = np.zeros((rows, cols), dtype=np.float32)
+    rmse_parm = np.zeros((rows, cols), dtype=np.float32)
+    count = np.zeros((rows, cols), dtype=np.int16)
+    ht_ = np.zeros((rows, cols), dtype=np.float32)
+
+    parm_ = [0, 0, 0, 0, 0]
+
+    num_workers = os.cpu_count() - 1
+    futures = []
+
+    # Calculate the total number of batches (for progress bar)
+    total_batches = ((rows // initial_ws) // batch_size) * ((cols // initial_ws) // batch_size)
+    with ProcessPoolExecutor(max_workers=num_workers) as executor:
+        # Loop through rows and columns in batches
+        with tqdm(total=total_batches) as pbar:  # Set progress bar for the total number of batches
+            for i in range(0, rows, initial_ws * batch_size):
+                for j in range(0, cols, initial_ws * batch_size):
+                    # Collect blocks for the current batch
+                    batch_futures = []
+                    for bi in range(batch_size):
+                        for bj in range(batch_size):
+                            block_i = i + bi * initial_ws
+                            block_j = j + bj * initial_ws
+                            if block_i < rows and block_j < cols:
+                                batch_futures.append((block_i, block_j))
+                    
+                    # Submit the batch to the executor (parallel execution)
+                    future = executor.submit(process_batch, batch_futures, cohArray, lidarArray, initial_ws, agbdl, agbdg, parm_)
+                    futures.append(future)
+
+                    # Once we accumulate enough futures, process them in parallel
+                    if len(futures) >= num_workers:
+                        for completed_future in as_completed(futures):
+                            results = completed_future.result()
+                            for start_i, end_i, start_j, end_j, s_p, c_p, r_p, ht, cnt in results:
+                                s_parm[start_i:end_i, start_j:end_j] = s_p
+                                c_parm[start_i:end_i, start_j:end_j] = c_p
+                                rmse_parm[start_i:end_i, start_j:end_j] = r_p
+                                ht_[start_i:end_i, start_j:end_j] = ht
+                                count[start_i:end_i, start_j:end_j] = cnt
+
+                            # Update progress for each completed batch (not each block)
+                            pbar.update(1)
+
+                            # Free the future's memory once processed
+                            futures.remove(completed_future)
+                            del completed_future
+                            gc.collect()
+
+            # Ensure any remaining futures are processed
+            for completed_future in as_completed(futures):
+                results = completed_future.result()
+                for start_i, end_i, start_j, end_j, s_p, c_p, r_p, ht, cnt in results:
+                    s_parm[start_i:end_i, start_j:end_j] = s_p
+                    c_parm[start_i:end_i, start_j:end_j] = c_p
+                    rmse_parm[start_i:end_i, start_j:end_j] = r_p
+                    ht_[start_i:end_i, start_j:end_j] = ht
+                    count[start_i:end_i, start_j:end_j] = cnt
+
+                # Update progress for each completed batch
+                pbar.update(1)
+
+                del completed_future
+                gc.collect()
+
+    return s_parm, c_parm, rmse_parm, ht_, count
+
+def process_batch(batch_futures, cohArray, lidarArray, initial_ws, agbdl, agbdg, parm_):
+    results = []
+    for block_i, block_j in batch_futures:
+        # Call process_block for each block in the batch
+        start_i, end_i, start_j, end_j, s_p, c_p, r_p, ht, cnt = process_block(block_i, block_j, cohArray, lidarArray, initial_ws, agbdl, agbdg, parm_)
+        results.append((start_i, end_i, start_j, end_j, s_p, c_p, r_p, ht, cnt))
+
+    del batch_futures, cohArray, lidarArray, initial_ws, agbdl, agbdg, parm_
+    # Invoke garbage collection
+    gc.collect()
+    return results
diff --git a/src/sarbio/args_in.py b/src/sarbio/args_in.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a7c0e69ad71de904f67b5b372a7ee8f3576f437
--- /dev/null
+++ b/src/sarbio/args_in.py
@@ -0,0 +1,629 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 27 14:40:29 2023
+
+@author: Narayanarao
+"""
+
+import argparse,os,sys,warnings,time
+
+import numpy as np, pandas as pd
+from osgeo import gdal
+from scipy import interpolate
+import matplotlib.pyplot as plt
+from matplotlib import cm
+from matplotlib.colors import Normalize 
+
+from scipy.stats import linregress
+from skimage.util.shape import view_as_blocks
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from scipy.interpolate import griddata
+from tqdm import tqdm
+from scipy.interpolate import interpn
+from empatches import EMPatches
+from sklearn.model_selection import train_test_split
+import concurrent.futures
+
+from scipy.ndimage import median_filter
+
+
+emp = EMPatches()
+
+gdal.UseExceptions()
+warnings.filterwarnings('ignore')
+warnings.filterwarnings('error')
+np.seterr(divide='ignore', invalid='ignore')
+
+
+from plotting import density_scatter,plot_data,rmse
+# from algo import arc_sinc,arc_sinc_,calibrate
+from algo import calibrate, dynamicWindow
+from algo import process_cal_window, process_inv_window
+from rst_io import read_bin, write_tif, write_bin
+from utils import blockshaped,unblockshaped,unpad,spatial_intp_lin,split_sample
+##########################################################################
+
+
+##########################################################################
+def agb_inverse(args):
+
+    # try:
+        t0 = time.time()
+        print( "-backscatterFile {} -incFile {} -lidarFile {}  -smFile {} -rhsFile {} -agbdl {} -agbdg {} -window_size {}  -validation {} -algo {} -window_overlap {}".format(
+        args.backscatterFile,
+        args.incFile,
+        args.lidarFile,
+        args.smFile,
+        args.rhsFile,
+        args.agbdl,
+        args.agbdg,
+        args.window_size,
+        args.validation,
+        args.algo,
+        args.window_overlap
+        
+        ))
+
+        lidar_agbd = read_bin(args.lidarFile)       
+        backscatterFile = args.backscatterFile
+        sigma_hv = 10**(read_bin(backscatterFile)/10)
+        inc = read_bin(args.incFile)/100
+        
+        lidar_agbd = np.round(lidar_agbd,1)
+        
+        # inc[inc<33*180/np.pi] = np.nan
+        # inc[inc>47*180/np.pi] = np.nan             
+        # incmask = inc.copy()
+        # incmask[~np.isnan(inc)] = 1
+        # sigma_hv = sigma_hv*incmask      
+        
+
+        
+        sm = read_bin(args.smFile)
+        er = 3.03 + 9.3*sm + 146*sm**2- 76.7*sm**3
+        del sm 
+        s = read_bin(args.rhsFile)/100
+        
+        # print(lidar_agbd.shape,sigma_hv.shape,inc.shape)
+
+        rPad,cPad = args.window_size-np.shape(lidar_agbd)[0]%args.window_size,args.window_size-np.shape(lidar_agbd)[1]%args.window_size
+        pad_width = [(0, rPad), (0, cPad)]
+
+        if rPad!=0 or cPad!=0:
+            lidar_agbd = np.pad(lidar_agbd, pad_width, mode='constant',constant_values=0)
+            sigma_hv = np.pad(sigma_hv, pad_width, mode='constant',constant_values=0)
+            inc = np.pad(inc, pad_width, mode='constant',constant_values=0)
+            er = np.pad(er, pad_width, mode='constant',constant_values=0)
+            s = np.pad(s, pad_width, mode='constant',constant_values=0)
+
+
+        lidar_agbd[lidar_agbd==0] = np.nan
+        if args.agbdg==0 or (args.agbdl==None and args.agbdg==None):
+            """ Calculating the upper limit of the heights to be estimated from the LiDAR data 
+            Ceiling the upper limit to the closest multiple of 5 for the height at 99.9 percentile """
+            args.agbdl=0
+            pp99 = np.nanpercentile(lidar_agbd,99.9)
+            args.agbdg = int( 5 * np.ceil( pp99 / 5. ))
+
+
+        lidar_agbd[lidar_agbd<=args.agbdl] = np.nan
+        lidar_agbd[lidar_agbd>args.agbdg] = np.nan
+        
+        lidar_Nsamples = np.sum(~np.isnan(lidar_agbd))
+        # sigma_hv[sigma_hv>0]=np.nan
+        rows,cols = np.shape(sigma_hv)[0],np.shape(sigma_hv)[1]
+        ######################################################
+
+        if args.validation>0 and args.validation<1:
+            temp_lidar = blockshaped(lidar_agbd, args.window_size, args.window_size)
+            lidar_agbd_cal = np.zeros(np.shape(temp_lidar))
+            lidar_agbd_val = np.zeros(np.shape(temp_lidar))
+            
+            for win in range(np.shape(temp_lidar)[0]):
+
+                if np.count_nonzero(~np.isnan(temp_lidar[win,:,:])) > 10:
+                    lidar_agbd_cal[win,:,:],lidar_agbd_val[win,:,:] = split_sample(temp_lidar[win,:,:],args.validation)
+                else:
+                    lidar_agbd_cal[win,:,:] = temp_lidar[win,:,:]
+
+            lidar_agbd_cal = unblockshaped(lidar_agbd_cal, rows,cols)
+            lidar_agbd_val = unblockshaped(lidar_agbd_val, rows,cols)
+            
+            lidar_agbd_cal[lidar_agbd_cal==0]=np.nan
+            lidar_agbd_val[lidar_agbd_val==0]=np.nan
+
+        elif args.validation==0:
+            lidar_agbd_cal = lidar_agbd.copy()
+            lidar_agbd_val = lidar_agbd.copy()
+        else:
+            print("\n   Invalid validation fraction!!\n    Please enter validation fraction between 0 and 1; \n    -val=[0,1)")
+            sys.exit(1)
+
+        # del lidar_agbd
+        #########################################################
+        os.chdir(os.path.dirname(backscatterFile))
+        outDir = r'../output'
+        if not os.path.exists(outDir):
+            os.mkdir(outDir)
+
+        # _postfix_str = '_w'+str(args.window_size)+'o'+"%02d" %(args.window_overlap*10)+"_"+str(args.agbdl)+str(args.agbdg)
+        _postfix_str = f"_w{args.window_size}_{args.agbdl}_{args.agbdg}"
+
+        outAFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_A.tif')
+        outBFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_B.tif')
+        outCFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_C.tif')
+        
+        outalphaFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_al.tif')
+        outbetaFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_be.tif')
+        outgamaFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_ga.tif')
+
+        outAiFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_Ai.tif')
+        outBiFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_Bi.tif')
+        outCiFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_Ci.tif')
+
+        outalphaiFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_ali.tif')
+        outbetaiFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_bei.tif')
+        outgamaiFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_gei.tif')
+        
+        outrmsehvFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_rmsehv.tif')
+        outrhvFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_rhv.tif')
+
+
+
+        outagbdFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_agbd.tif')
+        outsigFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_hvest.tif')
+        outhverrorFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_hverror.tif')
+        
+        outcntFile = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_count.tif')
+        pltName = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'.png')
+        # pltvalName = os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_val.png')
+
+        if args.validation > 0 and args.validation < 1:
+            pltName = os.path.join(outDir, os.path.basename(backscatterFile).split('.tif')[0] + _postfix_str + f"_cal_{int(100-args.validation*100)}.png")
+        
+        pltvalName = os.path.join(outDir, os.path.basename(backscatterFile).split('.tif')[0] + _postfix_str + f"_val_{int(args.validation*100)}.png")
+
+
+        ##############################################
+        # Generate calibration parameters
+        print("[1/4] Generating calibration parameters... ")
+        t1 = time.time()
+
+
+        if args.algo==1:   
+            temp_sig = blockshaped(sigma_hv, args.window_size, args.window_size)
+            temp_inc = blockshaped(inc, args.window_size, args.window_size)
+            temp_lidar = blockshaped(lidar_agbd_cal, args.window_size, args.window_size)
+            temp_er = blockshaped(er, args.window_size, args.window_size)
+            temp_s = blockshaped(s, args.window_size, args.window_size)
+
+            # batch_size = 300  # Define your batch size
+            num_windows = np.shape(temp_sig)[0]
+            
+            batch_size = max(num_windows//16,100)
+            results = [None] * num_windows  # Preallocate a list to hold results
+
+            with concurrent.futures.ProcessPoolExecutor() as executor:
+                futures = {}
+                for start in range(0, num_windows, batch_size):
+                    end = min(start + batch_size, num_windows)
+                    for win in range(start, end):
+                        futures[executor.submit(process_cal_window, win, temp_sig[win,:,:], temp_inc[win,:,:], temp_er[win,:,:], 
+                                                temp_s[win,:,:], temp_lidar[win,:,:], lidar_Nsamples, args)] = win
+                        
+                for future in tqdm(concurrent.futures.as_completed(futures),total=len(futures)):
+                    win_index = futures[future]  # Get the corresponding index
+                    result = future.result()
+                    results[win_index] = result  # Store result directly at the corresponding index
+
+            # Unpack results
+
+            A, B,C,alpha,beta,gama, count   = zip(*results)          
+            
+            temp_mask = np.zeros(temp_lidar.shape)
+            for win in range(np.shape(temp_lidar)[0]):
+                mask = temp_lidar[win,:,:].copy()
+                mask[~np.isnan(mask)] = 1 
+                temp_mask[win,:,:] = mask
+
+                if np.all(temp_lidar[win,:,:]==0) or np.all(np.isnan(temp_lidar[win,:,:])):
+                    mask = np.zeros(temp_mask[win,:,:].shape)
+                    # mask[np.shape(mask)[0]//2,np.shape(mask)[1]//2]=1
+                    np.fill_diagonal(mask, 1)
+                    mask = np.flipud(mask)
+                    np.fill_diagonal(mask, 1)
+                    temp_mask[win,:,:] = mask
+                    
+
+            temp_mask = unblockshaped(temp_mask, rows,cols)
+
+            
+
+            A = unblockshaped(np.array(A), rows,cols)
+            B = unblockshaped(np.array(B), rows,cols)
+            C = unblockshaped(np.array(C), rows,cols)
+            alpha = unblockshaped(np.array(alpha), rows,cols)
+            beta = unblockshaped(np.array(beta), rows,cols)
+            gama = unblockshaped(np.array(gama), rows,cols)
+
+            # rmse_hv = unblockshaped(np.array(rmse_hv), rows,cols)
+            # r_hv =  unblockshaped(np.array(r_hv), rows,cols)
+            # agbd_est = unblockshaped(np.array(agbd_est), rows,cols)
+            count = unblockshaped(np.array(count), rows,cols)            
+            # temp_sig = unblockshaped(temp_sig, rows,cols)
+            # temp_er = unblockshaped(temp_er, rows,cols)
+            # temp_inc = unblockshaped(temp_inc, rows,cols)
+            # temp_s = unblockshaped(temp_s, rows,cols)
+            # temp_lidar = unblockshaped(temp_lidar, rows,cols)
+
+
+            A[A==0]=np.nan
+            B[B==0]=np.nan
+            C[C==0]=np.nan
+            alpha[alpha==0]=np.nan
+            beta[beta==0]=np.nan
+            gama[gama==0]=np.nan
+            
+            temp_mask[temp_mask==0]=np.nan   
+            A = A*temp_mask
+            B = B*temp_mask
+            C = C*temp_mask
+            alpha = alpha*temp_mask
+            beta = beta*temp_mask
+            gama = gama*temp_mask
+
+
+            t2 = time.time()
+            print("[2/4] interpolating calibration parameters ... ",end=" ")
+            # sys.exit(1)
+            Ai = spatial_intp_lin(A)
+            Bi = spatial_intp_lin(B)
+            Ci = spatial_intp_lin(C)
+            alphai = spatial_intp_lin(alpha)
+            betai = spatial_intp_lin(beta)
+            gamai = spatial_intp_lin(gama)
+            print("%0.2f sec "%(time.time()-t2))
+
+
+
+            Ai = blockshaped(Ai, args.window_size, args.window_size)
+            Bi = blockshaped(Bi, args.window_size, args.window_size)
+            Ci = blockshaped(Ci, args.window_size, args.window_size)
+            alphai = blockshaped(alphai, args.window_size, args.window_size)
+            betai = blockshaped(betai, args.window_size, args.window_size)
+            gamai = blockshaped(gamai, args.window_size, args.window_size)
+
+            t3 = time.time()
+            print("[3/4] Generating AGBD ... ")
+
+           
+            num_windows = np.shape(temp_sig)[0]
+            batch_size = max(num_windows//16,100)
+            results = [None] * num_windows  # Preallocate a list to hold results
+
+            with concurrent.futures.ProcessPoolExecutor() as executor:
+                futures = {}
+                for start in range(0, num_windows, batch_size):
+                    end = min(start + batch_size, num_windows)
+                    for win in range(start, end):
+                        futures[executor.submit(process_inv_window, win, temp_sig[win,:,:], temp_inc[win,:,:], temp_er[win,:,:], temp_s[win,:,:], temp_lidar[win,:,:], 
+                                                Ai[win,:,:], Bi[win,:,:], Ci[win,:,:], alphai[win,:,:], betai[win,:,:], gamai[win,:,:], args)] = win
+                        
+                        # (win, temp_sig, temp_inc,  temp_er, temp_s, temp_lidar, A,B,C,alpha,beta,gama,args)
+                for future in tqdm(concurrent.futures.as_completed(futures),total=len(futures)):
+                    win_index = futures[future]  # Get the corresponding index
+                    result = future.result()
+                    results[win_index] = result  # Store result directly at the corresponding index
+
+            # Unpack results
+
+            biom_est, error_list, est_sig = zip(*results)  
+
+
+            Ai = unblockshaped(Ai, rows,cols)
+            Bi = unblockshaped(Bi, rows,cols)
+            Ci = unblockshaped(Ci, rows,cols)
+            alphai = unblockshaped(alphai, rows,cols)
+            betai = unblockshaped(betai, rows,cols)
+            gamai = unblockshaped(gamai, rows,cols)
+            
+            biom_est = unblockshaped(np.array(biom_est), rows,cols)
+            error_list = unblockshaped(np.array(error_list), rows,cols)
+            est_sig = unblockshaped(np.array(est_sig), rows,cols)
+
+
+
+
+        elif args.algo==2 and args.window_overlap>0:
+            temp_sig, indices__ = emp.extract_patches(sigma_hv, patchsize=args.window_size, overlap=args.window_overlap)
+            temp_lidar, indices__ = emp.extract_patches(lidar_agbd_cal, patchsize=args.window_size, overlap=args.window_overlap)
+
+            # print("[1/3] Generating calibration parameters...")
+            s=[]
+            c=[]
+            ht_=[]
+            rmse__=[]
+            count = []
+            parm_ = [0,0,0,0,0]
+            
+
+            batch_size = 10  # Define your batch size
+            num_windows = np.shape(temp_sig)[0]
+            results = [None] * num_windows  # Preallocate a list to hold results
+
+            with concurrent.futures.ProcessPoolExecutor() as executor:
+                futures = {}
+                for start in range(0, num_windows, batch_size):
+                    end = min(start + batch_size, num_windows)
+                    for win in range(start, end):
+                        futures[executor.submit(process_st_window, win, temp_sig[win], temp_lidar[win], args)] = win
+                        
+                for future in tqdm(concurrent.futures.as_completed(futures),total=len(futures)):
+                    win_index = futures[future]  # Get the corresponding index
+                    result = future.result()
+                    results[win_index] = result  # Store result directly at the corresponding index
+
+            # Unpack results
+            s, c, rmse_est, count, ht_ = zip(*results)
+
+
+            s = emp.merge_patches(s, indices__,mode='max')
+            c = emp.merge_patches(c, indices__,mode='max')
+            rmse__ = emp.merge_patches(rmse__, indices__,mode='max')
+            ht_ = emp.merge_patches(ht_, indices__,mode='max')
+            temp_sig = emp.merge_patches(temp_sig, indices__,mode='max')
+            count = emp.merge_patches(count, indices__,mode='avg')
+        
+        elif args.algo==3:
+            s, c, rmse__, ht_, count = dynamicWindow(sigma_hv, lidar_agbd_cal, args.window_size, args.agbdl, args.agbdg)
+
+            temp_lidar = blockshaped(lidar_agbd_cal, args.window_size, args.window_size)
+            temp_mask = np.zeros(temp_lidar.shape)
+
+            """ uncomment below to get cal coefficents uniformly across all windows """    
+            # for win in tqdm(range(np.shape(temp_lidar)[0])):
+            #     mask = np.zeros(temp_mask[win,:,:].shape)
+            #     np.fill_diagonal(mask, 1)
+            #     mask = np.flipud(mask)
+            #     np.fill_diagonal(mask, 1)
+            #     temp_mask[win,:,:] = mask
+
+            
+            for win in tqdm(range(np.shape(temp_lidar)[0])):
+                mask = temp_lidar[win,:,:].copy()
+                mask[~np.isnan(mask)] = 1 
+                temp_mask[win,:,:] = mask
+
+                if np.all(temp_lidar[win,:,:]==0) or np.all(np.isnan(temp_lidar[win,:,:])):
+                    mask = np.zeros(temp_mask[win,:,:].shape)
+                    # mask[np.shape(mask)[0]//2,np.shape(mask)[1]//2]=1
+                    np.fill_diagonal(mask, 1)
+                    mask = np.flipud(mask)
+                    np.fill_diagonal(mask, 1)
+                    temp_mask[win,:,:] = mask
+
+            temp_mask = unblockshaped(temp_mask, rows,cols)
+            s = s*temp_mask
+            c = c*temp_mask
+
+            temp_sig = sigma_hv.copy()
+        elif args.algo==4:   
+            temp_sig = blockshaped(sigma_hv, args.window_size, args.window_size)
+            temp_lidar = blockshaped(lidar_agbd_cal, args.window_size, args.window_size)
+
+            batch_size = 100  # Define your batch size
+            num_windows = np.shape(temp_sig)[0]
+            results = [None] * num_windows  # Preallocate a list to hold results
+
+            with concurrent.futures.ProcessPoolExecutor() as executor:
+                futures = {}
+                for start in range(0, num_windows, batch_size):
+                    end = min(start + batch_size, num_windows)
+                    for win in range(start, end):
+                        futures[executor.submit(process_st_window, win, temp_sig[win,:,:], temp_lidar[win,:,:], args)] = win
+                        
+                for future in tqdm(concurrent.futures.as_completed(futures),total=len(futures)):
+                    win_index = futures[future]  # Get the corresponding index
+                    result = future.result()
+                    results[win_index] = result  # Store result directly at the corresponding index
+
+            # Unpack results
+            sw, cw, rmse__, count, ht_ = zip(*results)
+            del results
+            print('20')
+            args.window_size = 20
+            cor20 = blockshaped(sigma_hv, args.window_size, args.window_size)
+            lidar20 = blockshaped(lidar_agbd_cal, args.window_size,args.window_size)
+            print('20 bolcked')
+            batch_size = 100  # Define your batch size
+            num_windows = np.shape(cor20)[0]
+            results = [None] * num_windows  # Preallocate a list to hold results
+
+            with concurrent.futures.ProcessPoolExecutor() as executor:
+                futures = {}
+                for start in range(0, num_windows, batch_size):
+                    end = min(start + batch_size, num_windows)
+                    for win in range(start, end):
+                        futures[executor.submit(process_st_window, win, cor20[win,:,:], lidar20[win,:,:], args)] = win
+                        
+                for future in tqdm(concurrent.futures.as_completed(futures),total=len(futures)):
+                    win_index = futures[future]  # Get the corresponding index
+                    result = future.result()
+                    results[win_index] = result  # Store result directly at the corresponding index
+
+            # Unpack results
+            s20, c20, _, _, _ = zip(*results)
+            del results
+            print('50')
+            args.window_size = 50
+            cor50 = blockshaped(sigma_hv, 50, 50)
+            lidar50 = blockshaped(lidar_agbd_cal, 50,50)
+
+            batch_size = 100  # Define your batch size
+            num_windows = np.shape(cor50)[0]
+            results = [None] * num_windows  # Preallocate a list to hold results
+
+            with concurrent.futures.ProcessPoolExecutor() as executor:
+                futures = {}
+                for start in range(0, num_windows, batch_size):
+                    end = min(start + batch_size, num_windows)
+                    for win in range(start, end):
+                        futures[executor.submit(process_st_window, win, cor50[win,:,:], lidar50[win,:,:], args)] = win
+                        
+                for future in tqdm(concurrent.futures.as_completed(futures),total=len(futures)):
+                    win_index = futures[future]  # Get the corresponding index
+                    result = future.result()
+                    results[win_index] = result  # Store result directly at the corresponding index
+
+            # Unpack results
+            s50, c50, _, _, _ = zip(*results)
+
+
+            s20 = unblockshaped(np.array(s20), rows,cols)
+            c20 = unblockshaped(np.array(c20), rows,cols)
+            s50 = unblockshaped(np.array(s50), rows,cols)
+            c50 = unblockshaped(np.array(c50), rows,cols)
+
+
+            sw = unblockshaped(np.array(sw), rows,cols)
+            cw = unblockshaped(np.array(cw), rows,cols)
+            rmse__ = unblockshaped(np.array(rmse__), rows,cols)
+            ht_ = unblockshaped(np.array(ht_), rows,cols)
+            count = unblockshaped(np.array(count), rows,cols)            
+            temp_sig = unblockshaped(temp_sig, rows,cols)
+
+            # # s = np.nanmean(np.dstack([sw,s20,s50])
+
+            # s = np.nanmean(np.stack((sw,s20,s50), axis=0), axis=0)
+            # c = np.nanmean(np.stack((cw,c20,c50), axis=0), axis=0)
+
+
+            temp_lidar = blockshaped(lidar_agbd_cal, 10, 10)
+            sw = blockshaped(sw, 10, 10)
+            cw = blockshaped(cw, 10, 10)
+
+            s20 = blockshaped(s20, 10, 10)
+            c20 = blockshaped(c20, 10, 10)
+            
+            s50 = blockshaped(s50, 10, 10)
+            c50 = blockshaped(c50, 10, 10)
+
+            s = np.zeros(temp_lidar.shape)
+            c = np.zeros(temp_lidar.shape)
+            temp_mask = np.zeros(temp_lidar.shape)
+
+
+
+            for win in tqdm(range(np.shape(temp_lidar)[0])):
+                mask = temp_lidar[win,:,:].copy()
+                mask[~np.isnan(mask)] = 1 
+                temp_mask[win,:,:] = mask
+                s[win,:,:] = sw[win,:,:]
+                c[win,:,:] = cw[win,:,:]
+
+
+
+                if np.all(temp_lidar[win,:,:]==0) or np.all(np.isnan(temp_lidar[win,:,:])):
+                    mask = np.zeros(temp_mask[win,:,:].shape)
+                    # mask[np.shape(mask)[0]//2,np.shape(mask)[1]//2]=1
+                    np.fill_diagonal(mask, 1)
+                    mask = np.flipud(mask)
+                    np.fill_diagonal(mask, 1)
+                    temp_mask[win,:,:] = mask
+                    s[win,:,:] = np.nanmean(np.stack((sw[win,:,:],s20[win,:,:],s50[win,:,:]), axis=0), axis=0)
+                    c[win,:,:] = np.nanmean(np.stack((cw[win,:,:],c20[win,:,:],c50[win,:,:]), axis=0), axis=0)
+            
+            # del temp_lidar,sw,s20,s50,cw,c20,c50
+
+            temp_mask = unblockshaped(temp_mask, rows,cols)
+            s = unblockshaped(np.array(s), rows,cols)
+            c = unblockshaped(np.array(c), rows,cols)
+
+
+            s = s*temp_mask
+            c = c*temp_mask
+
+            # c = np.nanmean([sw,c20,c50])
+        else:
+            raise ValueError('Invalid algorithm type!')
+
+
+        if rPad!=0 or cPad!=0:
+            lidar_agbd_cal = unpad(lidar_agbd_cal, pad_width)
+            temp_sig = unpad(temp_sig, pad_width)
+            A = unpad(A, pad_width)
+            B = unpad(B, pad_width)
+            C = unpad(C, pad_width)
+            alpha = unpad(alpha, pad_width)
+            beta = unpad(beta, pad_width)
+            gama = unpad(gama, pad_width)
+            # temp_sig = unpad(temp_sig, pad_width)
+            # temp_er = unpad(temp_er, pad_width)
+            # temp_inc = unpad(temp_inc, pad_width)
+            # temp_s = unpad(temp_s, pad_width)
+            temp_lidar = unpad(temp_lidar, pad_width)
+            Ai = unpad(Ai, pad_width)
+            Bi = unpad(Bi, pad_width)
+            Ci = unpad(Ci, pad_width)
+            alphai = unpad(alphai, pad_width)
+            betai = unpad(betai, pad_width)
+            gamai = unpad(gamai, pad_width)
+            
+            biom_est = unpad(biom_est, pad_width)
+            est_sig = unpad(est_sig, pad_width)    
+            error_list = unpad(error_list, pad_width)
+            
+
+        # print("%0.2f sec"%(time.time()-t1))
+        del temp_mask,temp_sig,temp_inc, temp_er, temp_s
+        plt_agbd_est = biom_est.copy()
+        plt_agbd_est[plt_agbd_est<=1] = np.nan
+        plt_agbd_est[plt_agbd_est>args.agbdg-5] = np.nan
+        
+        plot_data(lidar_agbd_cal,plt_agbd_est,args.agbdl,args.agbdg-5,'LiDAR AGBD(Mg/Ha)','SAR estimatedAGBD (Mg/Ha)',pltName)
+
+        # if args.validation>0 and args.validation<1:
+        #     lidar_agbd_val = unpad(lidar_agbd_val, pad_width)
+        #     write_tif(os.path.join(outDir,os.path.basename(backscatterFile).split('.tif')[0]+_postfix_str+'_val.tif'),
+        #         lidar_agbd_val,os.path.basename(args.backscatterFile))
+        #     plot_data(lidar_agbd_val,height,args.agbdl,args.agbdg,'Lidar height(m)','InSAR inverted (m)',pltvalName)
+
+        t3 = time.time()
+        
+        
+        # Write all output files
+        print("[4/4] Writing output files... ",end=" ")
+
+
+        write_tif(outAFile,A,os.path.basename(args.backscatterFile))
+        write_tif(outBFile,B,os.path.basename(args.backscatterFile))
+        write_tif(outCFile,C,os.path.basename(args.backscatterFile))
+        
+        write_tif(outalphaFile,alpha,os.path.basename(args.backscatterFile))
+        write_tif(outbetaFile,beta,os.path.basename(args.backscatterFile))
+        write_tif(outgamaFile,gama,os.path.basename(args.backscatterFile))
+        
+        write_tif(outAiFile,Ai,os.path.basename(args.backscatterFile))
+        write_tif(outBiFile,Bi,os.path.basename(args.backscatterFile))
+        write_tif(outCiFile,Ci,os.path.basename(args.backscatterFile))
+        
+        write_tif(outalphaiFile,alphai,os.path.basename(args.backscatterFile))
+        write_tif(outbetaiFile,betai,os.path.basename(args.backscatterFile))
+        write_tif(outgamaiFile,gamai,os.path.basename(args.backscatterFile))
+        
+        # write_tif(outcntFile,count,os.path.basename(args.backscatterFile))
+        # write_tif(outrmsehvFile,rmse_hv,os.path.basename(args.backscatterFile))
+        # write_tif(outrhvFile,r_hv,os.path.basename(args.backscatterFile))
+        write_tif(outagbdFile,biom_est,os.path.basename(args.backscatterFile))
+        write_tif(outsigFile,est_sig,os.path.basename(args.backscatterFile))
+        write_tif(outhverrorFile,error_list,os.path.basename(args.backscatterFile))
+        
+
+
+        print("%0.2f sec"%(time.time()-t3))
+        print("\n      Total computing time : %0.2f sec" % (time.time() - t0))
+
+
+    # except Exception as e:
+    #     print ("Task failed due to ", e)
+    #     sys.exit(1)
diff --git a/src/sarbio/plotting.py b/src/sarbio/plotting.py
new file mode 100644
index 0000000000000000000000000000000000000000..8deeb956ccdbf3ca36532c8f7cfee87caf8f928e
--- /dev/null
+++ b/src/sarbio/plotting.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 27 14:40:29 2023
+
+@author: Narayanarao
+"""
+
+
+import argparse,os,sys,warnings,time
+
+import numpy as np, pandas as pd  
+from osgeo import gdal
+from scipy import interpolate
+import matplotlib.pyplot as plt
+from matplotlib import cm
+from matplotlib.colors import Normalize 
+
+from scipy.stats import linregress
+from skimage.util.shape import view_as_blocks
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from scipy.interpolate import griddata
+from tqdm import tqdm
+from scipy.interpolate import interpn
+
+def rmse(predictions, targets):
+    return np.sqrt(((predictions - targets) ** 2).mean())
+    
+def density_scatter( x , y, ax = None, sort = True, bins = 20, **kwargs )   :
+    """
+    Scatter plot colored by 2d histogram
+    """
+    if ax is None :
+        fig , ax = plt.subplots(dpi=300)
+    data , x_e, y_e = np.histogram2d( x, y, bins = bins, density = True )
+    z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data , np.vstack([x,y]).T , method = "splinef2d", bounds_error = False)
+
+    #To be sure to plot all data
+    z[np.where(np.isnan(z))] = 0.0
+
+    # Sort the points by density, so that the densest points are plotted last
+    if sort :
+        idx = z.argsort()
+        x, y, z = x[idx], y[idx], z[idx]
+    norm = Normalize(vmin = np.min(z), vmax = np.max(z))
+    ax.scatter( x, y,s=.1, c=z, cmap='jet',norm=norm,**kwargs )
+
+    # cbar = fig.colorbar(cm.ScalarMappable(norm = norm,cmap='jet'), ax=ax)
+    # cbar.ax.set_ylabel('Density')
+
+    return ax
+
+def plot_data(x,y,htl,htg,xlab,ylab,pltName):
+
+    stats_fs = 12
+    x, y = x.flatten(),y.flatten()
+    y[y==0]=np.nan
+    y[y<htl] = np.nan
+    y[y>htg] = np.nan
+    xy = np.vstack([x,y]).T
+    del x,y
+    xy = xy[~np.isnan(xy).any(axis=1)]
+    x,y = xy[:,0], xy[:,1]
+    del xy
+    # try:
+    slope, intercept, r_value, p_value, std_err = linregress(x,y)
+    RMSE = rmse(x,y)
+    # except:
+    #     r_value = np.nan
+    #     RMSE = np.nan
+        
+    fig,ax = plt.subplots(figsize=(4,4),dpi=300)
+    ax = density_scatter( x, y, ax=ax,bins = [50,50] )
+    # plt.scatter(x,y)
+
+    plt.xlabel(xlab)
+    plt.ylabel(ylab)
+
+    xx = np.linspace(0,htg+1,100)
+    yy = slope*xx+intercept
+    # plt.plot(xx,yy,color='#ffffff',linewidth=.7,linestyle='-')
+    plt.plot(xx,xx,color='#000000',linewidth=.7,linestyle='--',zorder=0)
+    ax.grid(color='k',linewidth=.4,alpha=0.3,linestyle='--',zorder=0)
+    plt.xlim([htl,htg])
+    plt.ylim([htl,htg])
+    ax.text(0., 0.93, 
+    "         $ r $  = %0.2f"%(r_value),
+    # ' 'r'$p=$ {:.2f}x10$^{{ {:d} }}$'.format(lpval,rpval),
+    transform=ax.transAxes,
+    fontsize = stats_fs,
+    color='#000000',
+    )
+    ax.text(0., 0.85, 
+        " RMSE  = %0.2f"%(rmse(x,y)),
+            transform=ax.transAxes,
+            fontsize = stats_fs,
+            color='#000000',
+            )
+    # ax.text(0., 0.85, 
+    #     "ubRMSE  = %0.2f"%(ubrmsd(x,y)),
+    #         transform=ax.transAxes,
+    #         # fontsize = stats_fs,
+    #         color='#ffffff',
+    #         )
+    ax.text(0., 0.77, 
+        " N  = %d"%(np.size(x)),
+            transform=ax.transAxes,
+            fontsize = stats_fs,
+            color='#000000',
+            )
+    plt.tight_layout()
+    if pltName:
+        plt.savefig(pltName)
+    if '_val' in pltName:
+        print('%s %s; \t r = %0.2f, RMSE = %0.2f N = %d'%("      Scatterplot saved to: ",pltName,r_value,RMSE,np.size(x)))
+    else:
+        print('%s %s; \t r = %0.2f, RMSE = %0.2f N = %d'%("      Scatterplot saved to: ",pltName,r_value,RMSE,np.size(x)))
\ No newline at end of file
diff --git a/src/sarbio/rst_io.py b/src/sarbio/rst_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..d14ca6153c3b562eeca2218bcf3ad7d22da3278f
--- /dev/null
+++ b/src/sarbio/rst_io.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 27 14:40:29 2023
+
+@author: Narayanarao
+"""
+
+import argparse,os,sys,warnings,time
+
+import numpy as np, pandas as pd  
+from osgeo import gdal
+from scipy import interpolate
+import matplotlib.pyplot as plt
+from matplotlib import cm
+from matplotlib.colors import Normalize 
+
+from scipy.stats import linregress
+from skimage.util.shape import view_as_blocks
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from scipy.interpolate import griddata
+from tqdm import tqdm
+from scipy.interpolate import interpn
+
+def read_bin(file):
+    ds = gdal.Open(file)
+    band = ds.GetRasterBand(1)
+    arr = band.ReadAsArray()
+    return arr
+    
+def write_tif(file,wdata,refData):
+    
+    ds = gdal.Open(refData)
+    [cols, rows] = wdata.shape
+
+    driver = gdal.GetDriverByName("GTiff")
+    outdata = driver.Create(file, rows, cols, 1, gdal.GDT_Float32,options=['COMPRESS=DEFLATE','PREDICTOR=2','ZLEVEL=9',])
+    outdata.SetGeoTransform(ds.GetGeoTransform())##sets same geotransform as input
+    outdata.SetProjection(ds.GetProjection())##sets same projection as input
+    
+    outdata.SetDescription(file)
+    outdata.GetRasterBand(1).WriteArray(wdata)
+    # outdata.GetRasterBand(1).SetNoDataValue(np.NaN)##if you want these values transparent
+    outdata.FlushCache() ##saves to disk!!   
+    
+def write_bin(file,wdata,refData):
+    
+    ds = gdal.Open(refData)
+    [cols, rows] = wdata.shape
+
+    driver = gdal.GetDriverByName("ENVI")
+    outdata = driver.Create(file, rows, cols, 1, gdal.GDT_Float32)
+    outdata.SetGeoTransform(ds.GetGeoTransform())##sets same geotransform as input
+    outdata.SetProjection(ds.GetProjection())##sets same projection as input
+    
+    outdata.SetDescription(file)
+    outdata.GetRasterBand(1).WriteArray(wdata)
+    # outdata.GetRasterBand(1).SetNoDataValue(np.NaN)##if you want these values transparent
+    outdata.FlushCache() ##saves to disk!!
+
diff --git a/src/sarbio/sarbio.py b/src/sarbio/sarbio.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6cca4a1b5562a7b9894d60de4ec559cefaf1fe4
--- /dev/null
+++ b/src/sarbio/sarbio.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 27 14:40:29 2023
+
+@author: Narayanarao
+"""
+
+
+import argparse,os,sys,warnings,time
+
+import numpy as np, pandas as pd
+from osgeo import gdal
+from scipy import interpolate
+import matplotlib.pyplot as plt
+from matplotlib import cm
+from matplotlib.colors import Normalize 
+
+from scipy.stats import linregress
+from skimage.util.shape import view_as_blocks
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from scipy.interpolate import griddata
+from tqdm import tqdm
+from scipy.interpolate import interpn
+gdal.UseExceptions()
+warnings.filterwarnings('ignore')
+warnings.filterwarnings('error')
+np.seterr(divide='ignore', invalid='ignore')
+
+
+from args_in import agb_inverse
+##########################################################################
+parser = argparse.ArgumentParser()
+
+parser.add_argument("-s", "--backscatter", dest = "backscatterFile", help="correlation file [0,1]")
+parser.add_argument("-i", "--inc", dest = "incFile",  help="Calibration height file e.g. LiDAR heights in (m)")
+parser.add_argument("-l", "--lidar", dest = "lidarFile",  help="Calibration height file e.g. LiDAR heights in (m)")
+parser.add_argument("-sm", "--sm", dest = "smFile",  help="Soil moisture file [0,1]") 
+parser.add_argument("-rh", "--rhs", dest = "rhsFile",  help="Roughness rms file(m)")
+parser.add_argument("-ll", "--lower_limit",dest ="agbdl", default = None ,help="lower limit of canopy height (m)", type=int)
+parser.add_argument("-ul", "--upper_limit",dest = "agbdg", default = None,help="upper limit of canopy height (m)", type=int)
+parser.add_argument("-w", "--window",dest = "window_size", default = 100, help="Size", type=int)
+parser.add_argument("-val", "--validation",dest = "validation", default = 0, help="fraction to split cross validation", type=float)
+parser.add_argument("-al", "--algorithm",dest = "algo", default = 1, help="Algorithm Type", type=int)
+parser.add_argument("-ol", "--overlap",dest = "window_overlap", default = 0, help="window overlap fraction", type=float)
+
+args = parser.parse_args()
+
+
+if __name__ == "__main__":
+
+    agb_inverse(args)
diff --git a/src/sarbio/utils.py b/src/sarbio/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..473c240d90882325113cdbaeaf67af06e6fa12d8
--- /dev/null
+++ b/src/sarbio/utils.py
@@ -0,0 +1,142 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 27 14:40:29 2023
+
+@author: Narayanarao
+"""
+
+import argparse,os,sys,warnings
+
+import numpy as np, pandas as pd
+from osgeo import gdal
+from scipy import interpolate
+import matplotlib.pyplot as plt
+from matplotlib import cm
+from matplotlib.colors import Normalize 
+
+from scipy.stats import linregress
+from skimage.util.shape import view_as_blocks
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from scipy.interpolate import griddata
+from tqdm import tqdm
+from scipy.interpolate import interpn
+from sklearn.model_selection import train_test_split
+# from pykrige.ok import OrdinaryKriging
+
+def blockshaped(arr, nrows, ncols):
+    """
+    Return an array of shape (n, nrows, ncols) where
+    n * nrows * ncols = arr.size
+
+    If arr is a 2D array, the returned array looks like n subblocks with
+    each subblock preserving the "physical" layout of arr.
+    """
+    h, w = arr.shape
+    return (arr.reshape(h//nrows, nrows, -1, ncols)
+               .swapaxes(1,2)
+               .reshape(-1, nrows, ncols))
+
+
+def unblockshaped(arr, h, w):
+    """
+    Return an array of shape (h, w) where
+    h * w = arr.size
+
+    If arr is of shape (n, nrows, ncols), n sublocks of shape (nrows, ncols),
+    then the returned array preserves the "physical" layout of the sublocks.
+    """
+    n, nrows, ncols = arr.shape
+    return (arr.reshape(h//nrows, -1, nrows, ncols)
+               .swapaxes(1,2)
+               .reshape(h, w))
+
+def unpad(x, pad_width):
+    slices = []
+    for c in pad_width:
+        e = None if c[1] == 0 else -c[1]
+        slices.append(slice(c[0], e))
+    return x[tuple(slices)]
+
+
+def spatial_intp_lin(array):
+    x = np.arange(0, array.shape[1])
+    y = np.arange(0, array.shape[0])
+    #mask invalid values
+    array = np.ma.masked_invalid(array)
+    xx, yy = np.meshgrid(x, y)
+    #get only the valid values
+    x1 = xx[~array.mask]
+    y1 = yy[~array.mask]
+    newarr = array[~array.mask]
+    
+    intp = interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='linear')
+
+    return intp
+
+
+
+# def spatial_intp_kriging(array):
+#     x = np.arange(0, array.shape[1])
+#     y = np.arange(0, array.shape[0])
+#     # Mask invalid values
+#     array = np.ma.masked_invalid(array)
+#     xx, yy = np.meshgrid(x, y)
+    
+#     # Get only the valid values
+#     x1 = xx[~array.mask]
+#     y1 = yy[~array.mask]
+#     newarr = array[~array.mask]
+    
+#     # Perform ordinary kriging
+#     OK = OrdinaryKriging(x1, y1, newarr, variogram_model='linear', verbose=False, enable_plotting=False)
+#     z, ss = OK.execute('grid', x, y)
+    
+#     return z
+
+def spatial_intp_idw(array, power=0.5):
+    x = np.arange(0, array.shape[1])
+    y = np.arange(0, array.shape[0])
+    # Mask invalid values
+    array = np.ma.masked_invalid(array)
+    xx, yy = np.meshgrid(x, y)
+    
+    # Get only the valid values
+    x1 = xx[~array.mask]
+    y1 = yy[~array.mask]
+    newarr = array[~array.mask]
+    
+    # Function to compute IDW
+    def idw(xi, yi, x1, y1, values, power):
+        dist = np.sqrt((x1 - xi) ** 2 + (y1 - yi) ** 2)
+        # Handle case where distance is zero
+        if np.any(dist == 0):
+            return values[dist == 0][0]
+        weights = 1 / (dist ** power)
+        return np.sum(weights * values) / np.sum(weights)
+    
+    # Apply IDW for each point in the grid
+    idw_values = np.array([[idw(i, j, x1, y1, newarr, power) for i in x] for j in y])
+    
+    return idw_values
+
+
+def split_sample(a,frac=0.3):
+    """
+    Returns calibration and validation 2d arrays 
+    from a single input 2d array
+    """
+    ind = np.argwhere(~np.isnan(a))
+    idx_cal,idx_val = train_test_split(ind,test_size=frac,random_state=55)
+
+    a_val = np.zeros(np.shape(a))
+    a_cal = np.zeros(np.shape(a))
+
+    for ii in idx_val:
+        a_val[ii[0],ii[1]] = a[ii[0],ii[1]]
+    for ii in idx_cal:
+        a_cal[ii[0],ii[1]] = a[ii[0],ii[1]]
+
+    a_val[a_val==0]=np.nan
+    a_cal[a_cal==0]=np.nan
+    
+    return a_cal,a_val
\ No newline at end of file