commit e0dd59f90ea3737c2684db7985a29151c034a3be Author: w12 Date: Wed Jan 1 20:47:22 2025 +0100 nolambocoin new miner diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..ee9a246 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,35 @@ +name: Build CPU miner + +on: [push] + +jobs: + build: + + runs-on: ubuntu-18.04 + + steps: + - uses: actions/checkout@v1 + - name: Get static libcurl-dev package + run: cd deps-linux64/ && ls && chmod +x ./deps-linux64.sh && ./deps-linux64.sh + - name: Set-up autoconf + run: chmod +x ./autogen.sh && ./autogen.sh + - name: configure + run: chmod +x ./configure && ./configure CFLAGS="-Wall -O2 -fomit-frame-pointer" LDFLAGS="-static" CXXFLAGS="$CFLAGS -std=gnu++11" --with-curl=/usr/local/ --with-crypto=/usr/local/ssl + - name: make + run: make + - name: make check + run: make check + - name: CPU test + run: ./sugarmaker --help + - name: Zips + run: zip --junk-paths cpuminer sugarmaker + - name: Upload Release Asset + id: upload-release-asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.get_release.outputs.upload_url }} + asset_path: ./cpuminer.zip + asset_name: cpuminer.zip + asset_content_type: application/zip diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..91610a3 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,42 @@ +name: Release CPU miner + +on: + release: + types: [created] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - name: Get release + id: get_release + uses: bruceadams/get-release@v1.2.3 + env: + GITHUB_TOKEN: ${{ github.token }} + - uses: actions/checkout@v1 + - name: Get libcurl-dev package + run: sudo apt-get install libcurl4-openssl-dev + - name: Set-up autoconf + run: chmod +x ./autogen.sh && ./autogen.sh + - name: configure + run: chmod +x ./configure && ./configure --with-crypto --with-curl + - name: make + run: make + - name: make check + run: make check + - name: CPU test + run: ./sugarmaker --help + - name: Zips + run: zip --junk-paths cpuminer sugarmaker + - name: Upload Release Asset + id: upload-release-asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.get_release.outputs.upload_url }} + asset_path: ./cpuminer.zip + asset_name: cpuminer.zip + asset_content_type: application/zip diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fdceb7b --- /dev/null +++ b/.gitignore @@ -0,0 +1,47 @@ + +sugarmaker +sugarmaker.exe +*.o + +# old binary +minerd +minerd.exe + +autom4te.cache +.deps + +Makefile +Makefile.in +INSTALL +aclocal.m4 +configure +configure.lineno +depcomp +missing +install-sh +stamp-h1 +cpuminer-config.h* +compile +config.log +config.status +config.status.lineno +config.guess +config.sub + +mingw32-config.cache + +# yespower +yespower-1.0.1*/.dirstamp + +# release +sugarmaker-v*/ + +# deps-win64 +#deps-win64/curl-* +#deps-win64/pthread-* +#deps-win64/x86_64-w64-mingw32/ + +# deps-win32 +#deps-win32/curl-* +#deps-win32/pthread-* +#deps-win32/i686-w64-mingw32/ diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..9621f6c --- /dev/null +++ b/AUTHORS @@ -0,0 +1,9 @@ +Jeff Garzik + +ArtForz + +pooler + +Alexander Peslyak + +Kanon <60179867+decryp2kanon@users.noreply.github.com> diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..d60c31a --- /dev/null +++ b/COPYING @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 0000000..326703b --- /dev/null +++ b/ChangeLog @@ -0,0 +1 @@ +See git repository ('git log') for full changelog. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..e230e1d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,22 @@ +# +# Dockerfile for sugarmaker +# usage: docker run creack/cpuminer --url xxxx --user xxxx --pass xxxx +# ex: docker run creack/cpuminer --url stratum+tcp://ltc.pool.com:80 --user creack.worker1 --pass abcdef +# +# + +FROM ubuntu:16.04 +MAINTAINER kanon <60179867+decryp2kanon@users.noreply.github.com> + +RUN apt-get update -qq && \ + apt-get install -qqy automake libcurl4-openssl-dev git make gcc + +RUN git clone https://github.com/decryp2kanon/sugarmaker + +RUN cd sugarmaker && \ + ./autogen.sh && \ + ./configure CFLAGS='-O2 -fomit-frame-pointer' && \ + make + +WORKDIR /sugarmaker +ENTRYPOINT ["./sugarmaker"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8dada6c --- /dev/null +++ b/LICENSE @@ -0,0 +1,3 @@ +sugarmaker is available under the terms of the GNU Public License version 2. + +See COPYING for details. diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..ef42eb5 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,35 @@ +AUTOMAKE_OPTIONS = subdir-objects + +if WANT_JANSSON +JANSSON_INCLUDES= -I$(top_srcdir)/compat/jansson +else +JANSSON_INCLUDES= +endif + +EXTRA_DIST = example-cfg.json + +SUBDIRS = compat + +bin_PROGRAMS = sugarmaker + +dist_man_MANS = sugarmaker.1 + +sugarmaker_SOURCES = elist.h miner.h compat.h \ + cpu-miner.c util.c \ + sha2.c \ + yespower-1.0.1/sha256.c yespower-1.0.1/yespower-opt.c \ + YespowerSugar.c \ + YespowerIso.c \ + YespowerNull.c \ + YespowerUrx.c \ + YespowerLitb.c \ + YespowerIots.c \ + YespowerItc.c \ + YespowerYtn.c \ + yespower-1.0.1-power2b/sha256-p2b.c yespower-1.0.1-power2b/yespower-opt-p2b.c yespower-1.0.1-power2b/blake2b.c \ + YespowerMbc.c \ + YespowerARM.c + +sugarmaker_LDFLAGS = $(PTHREAD_FLAGS) +sugarmaker_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ +sugarmaker_CPPFLAGS = $(PTHREAD_FLAGS) @CPPFLAGS@ $(JANSSON_INCLUDES) diff --git a/NEWS b/NEWS new file mode 100644 index 0000000..30ac443 --- /dev/null +++ b/NEWS @@ -0,0 +1,13 @@ +Version 1.0 - 01.01.2025 + +- Add YespowerARM for NoLamboCoin + * YespowerSugar: Sugarchain (default) + * YespowerIso: IsotopeC + * YespowerNull: CranePay, Bellcoin, Veco, SwampCoin + * YespowerUrx: UraniumX + * YespowerLitb: LightBit + * YespowerIots: IOTS + * YespowerItc: Intercoin + * YespowerMbc: power2b for MicroBitcoin + * YespowerARM: NolamboCoin + diff --git a/README.md b/README.md new file mode 100644 index 0000000..58d03fe --- /dev/null +++ b/README.md @@ -0,0 +1,90 @@ +# Yenten ARM miner (yespowerr16 algo) + +cmd for test Yenten mining: +``` +sugarmaker.exe -a yespowerr16 -o stratum+tcp://cpu-pool.com:63368 -u wallet_address +``` + +![GitHub All Releases](https://img.shields.io/github/downloads/yentencoin/yenten-arm-miner-yespowerr16/total) + +This is a multi-threaded CPU miner for ***Yenten Coin***, fork of sugarmaker, fork of solardiz's (Resistance) fork of pooler's (Litecoin) fork of Jeff Garzik's (Bitcoin) reference cpuminer. This fork is supporting only Yespower variant algorithms. + +License: [GPLv2](https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html). See COPYING for details. + +Git tree: https://github.com/yentencoin/yenten-arm-miner-yespowerr16 + +### Build dependencies: +``` +autoconf +automake +GNU make +gcc +libcurl https://curl.haxx.se/libcurl/ +``` + +- On recent Debian and Ubuntu, these can be installed with: +``` +sudo apt-get install build-essential libcurl4-openssl-dev autotools-dev automake libtool +``` + +### Basic Unix build instructions: +``` +./autogen.sh +./configure CFLAGS="-Wall -O2 -fomit-frame-pointer" CXXFLAGS="$CFLAGS -std=gnu++11" +make +``` + +Notes for AIX users: +- To build a 64-bit binary, export `OBJECT_MODE=64` +- GNU-style long options are not supported, but are accessible via configuration file + +### Basic Windows build instructions, using MinGW: +- Install MinGW and the MSYS Developer Tool Kit (http://www.mingw.org/) + * Make sure you have `mstcpip.h` in `MinGW\include` +- If using MinGW-w64, install `pthreads-w64` +- Install `libcurl devel` (https://curl.haxx.se/download.html) + * Make sure you have `libcurl.m4` in `MinGW\share\aclocal` + * Make sure you have `curl-config` in `MinGW\bin` +- In the MSYS shell, run: + ``` + ./autogen.sh + LIBCURL='-lcurldll' ./configure + make + ``` + +### Usage instructions: +Run `sugarmaker --help` to see options. You can solo-mine using these options: + +- Mainnet (Solo) +``` +./sugarmaker -a yespowerr16 -o http://127.0.0.1:9982 -u user -p pass --coinbase-addr=wallet_address -t1 +``` +- Mainnet (Stratum Pool) +``` +./sugarmaker -a yespowerr16 -o stratum+tcp://cpu-pool.com:63368 -u wallet_address -t1 +``` + +(Omit the leading `./` if you're on Windows.) For the above to work, for solo mining you need +a *fully-synced node* running locally and with RPC username/password configured, + +- e.g. with the below in your `.yenten/yenten.conf`: +``` +rpcbind=127.0.0.1 +rpcallowip=127.0.0.0/8 +rpcuser=user +rpcpassword=pass +``` + +- Connecting through a proxy: + * Use the `--proxy` option. + * To use a SOCKS proxy, add a `socks4://` or `socks5://` prefix to the proxy host. + * Protocols `socks4a` and `socks5h`, allowing remote name resolving, are also available since libcurl 7.18.0. + * If no protocol is specified, the proxy is assumed to be a HTTP proxy. + * When the `--proxy` option is not used, the program honors the `http_proxy` and `all_proxy` environment variables. + +### Author +- Jeff Garzik +- Pooler +- Alexander Peslyak +- Kanon <60179867+decryp2kanon@users.noreply.github.com> +- Yentencoin diff --git a/YespowerARM.c b/YespowerARM.c new file mode 100644 index 0000000..8372abe --- /dev/null +++ b/YespowerARM.c @@ -0,0 +1,110 @@ +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" +#include "yespower.h" +#include "sysendian.h" +#include +#include +#include + +const yespower_params_t *select_yespower_params(const char *cpu_info) { +#ifdef __arm__ + if (strstr(cpu_info, "BCM2837") || strstr(cpu_info, "BCM2711")) { + static const yespower_params_t params_rpi = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 8, + .pers = (const uint8_t *)"Raspberry", + .perslen = 7 + }; + return ¶ms_rpi; + } else if (strstr(cpu_info, "BCM2712")) { + static const yespower_params_t params_rpi5 = { + .version = YESPOWER_1_0, + .N = 3072, + .r = 12, + .pers = (const uint8_t *)"Raspberry5", + .perslen = 7 + }; + return ¶ms_rpi5; + } else { + // ARM-Server + static const yespower_params_t params_arm_server = { + .version = YESPOWER_1_0, + .N = 4096, + .r = 16, + .pers = (const uint8_t *)"ARMServer", + .perslen = 10 + }; + return ¶ms_arm_server; + } +#else + static const yespower_params_t params_default = { + .version = YESPOWER_1_0, + .N = 4096, + .r = 16, + .pers = (const uint8_t *)"Default", + .perslen = 7 + }; + return ¶ms_default; +#endif +} +static void get_cpu_info(char *cpu_info, size_t max_size) { +#ifdef __arm__ + FILE *cpuinfo_file = fopen("/proc/cpuinfo", "r"); + if (cpuinfo_file) { + fread(cpu_info, 1, max_size - 1, cpuinfo_file); + fclose(cpuinfo_file); + cpu_info[max_size - 1] = '\0'; + } else { + strncpy(cpu_info, "Unknown ARM", max_size); + } +#else + strncpy(cpu_info, "x86/x64", max_size); +#endif +} + +int yespower_hash(const char *input, char *output) { + char cpu_info[256] = {0}; + get_cpu_info(cpu_info, sizeof(cpu_info)); + + const yespower_params_t *params = select_yespower_params(cpu_info); + return yespower_tls(input, 80, params, (yespower_binary_t *) output); +} + +int scanhash_arm_yespower(int thr_id, uint32_t *data, uint32_t *target, uint32_t max_nonce, unsigned long *hashes_done) { + uint32_t nonce = data[19]; // Nonce ist das 20. Element der Daten + unsigned char hash[32]; // Speicher für den berechneten Hash + int result = 0; // Rückgabewert + *hashes_done = 0; // Initialisierung der Hashanzahl + + // Initialisierung des CPU-Informationspuffers + char cpu_info[256] = {0}; + get_cpu_info(cpu_info, sizeof(cpu_info)); + + // Wähle die Yespower-Parameter basierend auf CPU-Informationen + const yespower_params_t *params = select_yespower_params(cpu_info); + + for (; nonce < max_nonce; nonce++) { + data[19] = nonce; // Aktualisiere die Nonce + + // Berechne den Hash mit Yespower + if (yespower_tls((const uint8_t *)data, 80, params, (yespower_binary_t *)hash) != 0) { + fprintf(stderr, "Thread %d: Fehler bei der Yespower-Berechnung.\n", thr_id); + break; + } + + // Prüfe, ob der berechnete Hash kleiner als das Ziel ist + if (memcmp(hash, target, 32) <= 0) { + printf("Thread %d: Gültiger Hash gefunden! Nonce: %u\n", thr_id, nonce); + result = 1; + break; + } + + (*hashes_done)++; + } + + data[19] = nonce; // Stelle die Nonce wieder her + return result; +} diff --git a/YespowerIots.c b/YespowerIots.c new file mode 100644 index 0000000..ca30f5b --- /dev/null +++ b/YespowerIots.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_iots_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"Iots is committed to the development of IOT", + .perslen = 43 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerIso.c b/YespowerIso.c new file mode 100644 index 0000000..b683d29 --- /dev/null +++ b/YespowerIso.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_iso_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"IsotopeC", + .perslen = 8 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerItc.c b/YespowerItc.c new file mode 100644 index 0000000..b79dab5 --- /dev/null +++ b/YespowerItc.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_itc_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"InterITC", + .perslen = 8 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerLitb.c b/YespowerLitb.c new file mode 100644 index 0000000..111acc3 --- /dev/null +++ b/YespowerLitb.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_litb_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"LITBpower: The number of LITB working or available for proof-of-work mining", + .perslen = 73 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerMbc.c b/YespowerMbc.c new file mode 100644 index 0000000..b5e24e2 --- /dev/null +++ b/YespowerMbc.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1-power2b/yespower-p2b.h" + +#include +#include +#include + +int scanhash_mbc_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0_BLAKE2B, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"Now I am become Death, the destroyer of worlds", + .perslen = 46 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t_p2b yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls_p2b(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerNull.c b/YespowerNull.c new file mode 100644 index 0000000..bb0ec5e --- /dev/null +++ b/YespowerNull.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_null_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = NULL, + .perslen = 0 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerSugar.c b/YespowerSugar.c new file mode 100644 index 0000000..6ff9318 --- /dev/null +++ b/YespowerSugar.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_sugar_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote", + .perslen = 74 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerUrx.c b/YespowerUrx.c new file mode 100644 index 0000000..14420dd --- /dev/null +++ b/YespowerUrx.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_urx_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 2048, + .r = 32, + .pers = (const uint8_t *)"UraniumX", + .perslen = 8 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/YespowerYtn.c b/YespowerYtn.c new file mode 100644 index 0000000..6ca49e1 --- /dev/null +++ b/YespowerYtn.c @@ -0,0 +1,84 @@ +/* + * Copyright 2011 ArtForz, 2011-2014 pooler, 2018 The Resistance developers, 2020 The Sugarchain Yumekawa developers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is loosly based on a tiny portion of pooler's cpuminer scrypt.c. + */ + +#include "cpuminer-config.h" +#include "miner.h" + +#include "yespower-1.0.1/yespower.h" + +#include +#include +#include + +int scanhash_ytn_yespower(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, + uint32_t max_nonce, unsigned long *hashes_done) +{ + static const yespower_params_t params = { + .version = YESPOWER_1_0, + .N = 4096, + .r = 16, + .pers = NULL, + .perslen = 0 + }; + union { + uint8_t u8[8]; + uint32_t u32[20]; + } data; + union { + yespower_binary_t yb; + uint32_t u32[7]; + } hash; + uint32_t n = pdata[19] - 1; + const uint32_t Htarg = ptarget[7]; + int i; + + for (i = 0; i < 19; i++) + be32enc(&data.u32[i], pdata[i]); + + do { + be32enc(&data.u32[19], ++n); + + if (yespower_tls(data.u8, 80, ¶ms, &hash.yb)) + abort(); + + if (le32dec(&hash.u32[7]) <= Htarg) { + for (i = 0; i < 7; i++) + hash.u32[i] = le32dec(&hash.u32[i]); + if (fulltest(hash.u32, ptarget)) { + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 1; + } + } + } while (n < max_nonce && !work_restart[thr_id].restart); + + *hashes_done = n - pdata[19] + 1; + pdata[19] = n; + return 0; +} diff --git a/autogen.sh b/autogen.sh new file mode 100644 index 0000000..989604a --- /dev/null +++ b/autogen.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +# You need autoconf 2.5x, preferably 2.57 or later +# You need automake 1.7 or later. 1.6 might work. + +set -e + +aclocal +autoheader +automake --gnu --add-missing --copy +autoconf + diff --git a/build-aarch64.sh b/build-aarch64.sh new file mode 100644 index 0000000..114b721 --- /dev/null +++ b/build-aarch64.sh @@ -0,0 +1,51 @@ +# try on virtualbox ubuntu 16.04 +# https://lxadm.com/Static_compilation_of_cpuminer + +# CLEAN +make distclean || echo clean +rm -f config.status + +# DEPENDS + +## OPENSSL +# wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +# tar -xvzf openssl-1.1.0g.tar.gz +# cd openssl-1.1.0g/ +# ./config no-shared +# make -j$(nproc) +# sudo make install +# cd .. + +## CURL +# wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +# tar -xvzf curl-7.57.0.tar.gz +# cd curl-7.57.0/ +# .buildconf | grep "buildconf: OK" +# ./configure --disable-shared | grep "Static=yes" +# make -j$(nproc) +# sudo make install +# cd .. + +# BUILD +./autogen.sh +# CFLAGS="-Wall -O2 -fomit-frame-pointer" ./configure +# ./configure --with-curl="/usr/local/" --with-crypto="/usr/local/" CFLAGS="-Wall -O2 -fomit-frame-pointer" LDFLAGS="-static -I/usr/local/lib/ -L/usr/local/lib/libcrypto.a" LIBS="-lssl -lcrypto -lz -lpthread -ldl" CFLAGS="-DCURL_STATICLIB" --with-crypto +./configure --with-curl="/usr/local/" --with-crypto="/usr/local/" CFLAGS="-Wall -O2 -fomit-frame-pointer" LDFLAGS="-static" LIBS="-ldl -lz" +make +strip -s sugarmaker + +# CHECK STATIC +file sugarmaker | grep "statically linked" + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-aarch64 +rm -rf $RELEASE +mkdir $RELEASE +cp ./mining-script/sh/*.sh $RELEASE/ +cp sugarmaker $RELEASE/ + +# SIGN +zip -X $RELEASE/$RELEASE.zip $RELEASE/* +sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build-armv7l.sh b/build-armv7l.sh new file mode 100644 index 0000000..e55c4c5 --- /dev/null +++ b/build-armv7l.sh @@ -0,0 +1,51 @@ +# try on virtualbox ubuntu 16.04 +# https://lxadm.com/Static_compilation_of_cpuminer + +# CLEAN +make distclean || echo clean +rm -f config.status + +# DEPENDS + +## OPENSSL +# wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +# tar -xvzf openssl-1.1.0g.tar.gz +# cd openssl-1.1.0g/ +# ./config no-shared +# make -j$(nproc) +# sudo make install +# cd .. + +## CURL +# wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +# tar -xvzf curl-7.57.0.tar.gz +# cd curl-7.57.0/ +# .buildconf | grep "buildconf: OK" +# ./configure --disable-shared | grep "Static=yes" +# make -j$(nproc) +# sudo make install +# cd .. + +# BUILD +./autogen.sh +# CFLAGS="-Wall -O2 -fomit-frame-pointer" ./configure +# ./configure --with-curl="/usr/local/" --with-crypto="/usr/local/" CFLAGS="-Wall -O2 -fomit-frame-pointer" LDFLAGS="-static -I/usr/local/lib/ -L/usr/local/lib/libcrypto.a" LIBS="-lssl -lcrypto -lz -lpthread -ldl" CFLAGS="-DCURL_STATICLIB" --with-crypto +./configure --with-curl="/usr/local/" --with-crypto="/usr/local/" CFLAGS="-Wall -O2 -fomit-frame-pointer" LDFLAGS="-static" LIBS="-ldl -lz" +make +strip -s sugarmaker + +# CHECK STATIC +file sugarmaker | grep "statically linked" + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-armv7l +rm -rf $RELEASE +mkdir $RELEASE +cp ./mining-script/sh/*.sh $RELEASE/ +cp sugarmaker $RELEASE/ + +# SIGN +zip -X $RELEASE/$RELEASE.zip $RELEASE/* +sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build-linux32.sh b/build-linux32.sh new file mode 100644 index 0000000..bf9cf06 --- /dev/null +++ b/build-linux32.sh @@ -0,0 +1,49 @@ +# try on virtualbox ubuntu 16.04 +# https://lxadm.com/Static_compilation_of_cpuminer + +# CLEAN +make distclean || echo clean +rm -f config.status + +# DEPENDS + +## OPENSSL +# wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +# tar -xvzf openssl-1.1.0g.tar.gz +# cd openssl-1.1.0g/ +# ./config no-shared +# make -j$(nproc) +# sudo make install +# cd .. + +## CURL +# wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +# tar -xvzf curl-7.57.0.tar.gz +# cd curl-7.57.0/ +# .buildconf | grep "buildconf: OK" +# ./configure --disable-shared | grep "Static=yes" +# make -j$(nproc) +# sudo make install +# cd .. + +# BUILD +./autogen.sh +CFLAGS="-Wall -O2 -fomit-frame-pointer" CXXFLAGS="$CFLAGS -std=gnu++11" LDFLAGS="-static" ./configure --with-curl=/usr/local/ +make +strip -s sugarmaker + +# CHECK STATIC +file sugarmaker | grep "statically linked" + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-linux32 +rm -rf $RELEASE +mkdir $RELEASE +cp ./mining-script/sh/*.sh $RELEASE/ +cp sugarmaker $RELEASE/ + +# SIGN +zip -X $RELEASE/$RELEASE.zip $RELEASE/* +sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build-linux64.sh b/build-linux64.sh new file mode 100644 index 0000000..3961e0b --- /dev/null +++ b/build-linux64.sh @@ -0,0 +1,33 @@ +# try on virtualbox ubuntu 16.04 +# https://lxadm.com/Static_compilation_of_cpuminer + +# CLEAN +make distclean || echo clean +rm -f config.status + +# DEPENDS +# cd deps-linux64/ +# ./deps-linux64.sh +# cd .. + +# BUILD +./autogen.sh +./configure CFLAGS="-Wall -O2 -fomit-frame-pointer" LDFLAGS="-static" CXXFLAGS="$CFLAGS -std=gnu++11" --with-curl=/usr/local/ +make +strip -s sugarmaker + +# CHECK STATIC +file sugarmaker | grep "statically linked" + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-linux64 +rm -rf $RELEASE +mkdir $RELEASE +cp ./mining-script/sh/*.sh $RELEASE/ +cp sugarmaker $RELEASE/ + +# SIGN +zip -X $RELEASE/$RELEASE.zip $RELEASE/* +sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build-osx.sh b/build-osx.sh new file mode 100644 index 0000000..e02e788 --- /dev/null +++ b/build-osx.sh @@ -0,0 +1,34 @@ +# try on virtualbox ubuntu 16.04 +# https://gist.github.com/quagliero/90f493f123c7b1ddba5428ba0146329a + +# CLEAN +make distclean || echo clean +rm -f config.status + +# HOTFIX for OSX +./autogen.sh +sed -i '' '/LIBCURL_CHECK_CONFIG/d' ./configure +sed -i '' '/AC_MSG_ERROR/d' ./configure + +# BUILD +./autogen.sh +./configure CFLAGS="-Wall -O2 -fomit-frame-pointer" --with-crypto=/usr/local/opt/openssl --with-curl +make +strip sugarmaker + +# CHECK STATIC +file sugarmaker | grep "statically linked" + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-osx +rm -rf $RELEASE +mkdir $RELEASE +cp ./mining-script/sh/*.sh $RELEASE/ +cp sugarmaker $RELEASE/ + +# SIGN +zip -r $RELEASE/$RELEASE.zip $RELEASE/* +# sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +shasum -a 256 $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build-w32.sh b/build-w32.sh new file mode 100644 index 0000000..6112c1d --- /dev/null +++ b/build-w32.sh @@ -0,0 +1,29 @@ +# CLEAN +make distclean || echo clean +rm -f config.status + +# DEPS +# cd deps-win32 +# ./build_win_x86_deps.sh +# cd .. + +# BUILD +autoreconf -fi -I./deps-win32/i686-w64-mingw32/share/aclocal +./autogen.sh +./configure --host=i686-w64-mingw32 LDFLAGS="-L./deps-win32/i686-w64-mingw32/lib -static" CFLAGS="-Wall -O2 -fomit-frame-pointer -I./deps-win32/i686-w64-mingw32/include -std=c99 -DWIN32 -DCURL_STATICLIB -DPTW32_STATIC_LIB" --with-libcurl=deps-win32/i686-w64-mingw32 +make +strip -p --strip-debug --strip-unneeded sugarmaker.exe + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-w32 +rm -rf $RELEASE +mkdir $RELEASE + +cp ./mining-script/bat/*.bat $RELEASE/ +mv sugarmaker.exe $RELEASE/ + +# SIGN +zip -X $RELEASE/$RELEASE.zip $RELEASE/* +sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build-w64.sh b/build-w64.sh new file mode 100644 index 0000000..6dae0ee --- /dev/null +++ b/build-w64.sh @@ -0,0 +1,29 @@ +# CLEAN +make distclean || echo clean +rm -f config.status + +# DEPS +# cd deps-win64 +# ./build_win_x64_deps.sh +# cd .. + +# BUILD +autoreconf -fi -I./deps-win64/x86_64-w64-mingw32/share/aclocal +./autogen.sh +./configure --host=x86_64-w64-mingw32 LDFLAGS="-L./deps-win64/x86_64-w64-mingw32/lib -static" CFLAGS="-Wall -O2 -fomit-frame-pointer -I./deps-win64/x86_64-w64-mingw32/include -std=c99 -DWIN32 -DCURL_STATICLIB -DPTW32_STATIC_LIB" --with-libcurl=deps-win64/x86_64-w64-mingw32 +make +strip -p --strip-debug --strip-unneeded sugarmaker.exe + +# PACKAGE +RELEASE=sugarmaker-v2.5.0-sugar4-w64 +rm -rf $RELEASE +mkdir $RELEASE + +cp ./mining-script/bat/*.bat $RELEASE/ +mv sugarmaker.exe $RELEASE/ + +# SIGN +zip -X $RELEASE/$RELEASE.zip $RELEASE/* +sha256sum $RELEASE/$RELEASE.zip > $RELEASE/$RELEASE +gpg --digest-algo sha256 --clearsign $RELEASE/$RELEASE +rm $RELEASE/$RELEASE && cat $RELEASE/$RELEASE.asc diff --git a/build.sh b/build.sh new file mode 100644 index 0000000..be66563 --- /dev/null +++ b/build.sh @@ -0,0 +1,12 @@ +# CLEAN +make distclean || echo clean +rm -f config.status + +# BUILD +./autogen.sh +./configure CFLAGS="-Wall -O2 -fomit-frame-pointer" +make -j$(nproc) +strip -s sugarmaker + +# CHECK STATIC +file sugarmaker | grep "dynamically linked" diff --git a/compat.h b/compat.h new file mode 100644 index 0000000..283fc9b --- /dev/null +++ b/compat.h @@ -0,0 +1,21 @@ +#ifndef __COMPAT_H__ +#define __COMPAT_H__ + +#ifdef WIN32 + +#include + +#define sleep(secs) Sleep((secs) * 1000) + +enum { + PRIO_PROCESS = 0, +}; + +static inline int setpriority(int which, int who, int prio) +{ + return -!SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE); +} + +#endif /* WIN32 */ + +#endif /* __COMPAT_H__ */ diff --git a/compat/Makefile.am b/compat/Makefile.am new file mode 100644 index 0000000..77af3c5 --- /dev/null +++ b/compat/Makefile.am @@ -0,0 +1,7 @@ + +if WANT_JANSSON +SUBDIRS = jansson +else +SUBDIRS = +endif + diff --git a/compat/jansson/.gitignore b/compat/jansson/.gitignore new file mode 100644 index 0000000..173737b --- /dev/null +++ b/compat/jansson/.gitignore @@ -0,0 +1,3 @@ + +libjansson.a + diff --git a/compat/jansson/LICENSE b/compat/jansson/LICENSE new file mode 100644 index 0000000..552b349 --- /dev/null +++ b/compat/jansson/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2009, 2010 Petri Lehtinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/compat/jansson/Makefile.am b/compat/jansson/Makefile.am new file mode 100644 index 0000000..94a583f --- /dev/null +++ b/compat/jansson/Makefile.am @@ -0,0 +1,18 @@ + +noinst_LIBRARIES = libjansson.a + +libjansson_a_SOURCES = \ + config.h \ + dump.c \ + hashtable.c \ + hashtable.h \ + jansson.h \ + jansson_private.h \ + load.c \ + strbuffer.c \ + strbuffer.h \ + utf.c \ + utf.h \ + util.h \ + value.c + diff --git a/compat/jansson/config.h b/compat/jansson/config.h new file mode 100644 index 0000000..43858aa --- /dev/null +++ b/compat/jansson/config.h @@ -0,0 +1,73 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "jansson" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "petri@digip.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "jansson" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "jansson 1.3" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "jansson" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "1.3" + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#define VERSION "1.3" + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* #undef inline */ +#endif + +/* Define to the type of a signed integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +/* #undef int32_t */ diff --git a/compat/jansson/dump.c b/compat/jansson/dump.c new file mode 100644 index 0000000..dc27fbd --- /dev/null +++ b/compat/jansson/dump.c @@ -0,0 +1,460 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include + +#include +#include "jansson_private.h" +#include "strbuffer.h" +#include "utf.h" + +#define MAX_INTEGER_STR_LENGTH 100 +#define MAX_REAL_STR_LENGTH 100 + +typedef int (*dump_func)(const char *buffer, int size, void *data); + +struct string +{ + char *buffer; + int length; + int size; +}; + +static int dump_to_strbuffer(const char *buffer, int size, void *data) +{ + return strbuffer_append_bytes((strbuffer_t *)data, buffer, size); +} + +static int dump_to_file(const char *buffer, int size, void *data) +{ + FILE *dest = (FILE *)data; + if(fwrite(buffer, size, 1, dest) != 1) + return -1; + return 0; +} + +/* 256 spaces (the maximum indentation size) */ +static char whitespace[] = " "; + +static int dump_indent(unsigned long flags, int depth, int space, dump_func dump, void *data) +{ + if(JSON_INDENT(flags) > 0) + { + int i, ws_count = JSON_INDENT(flags); + + if(dump("\n", 1, data)) + return -1; + + for(i = 0; i < depth; i++) + { + if(dump(whitespace, ws_count, data)) + return -1; + } + } + else if(space && !(flags & JSON_COMPACT)) + { + return dump(" ", 1, data); + } + return 0; +} + +static int dump_string(const char *str, int ascii, dump_func dump, void *data) +{ + const char *pos, *end; + int32_t codepoint; + + if(dump("\"", 1, data)) + return -1; + + end = pos = str; + while(1) + { + const char *text; + char seq[13]; + int length; + + while(*end) + { + end = utf8_iterate(pos, &codepoint); + if(!end) + return -1; + + /* mandatory escape or control char */ + if(codepoint == '\\' || codepoint == '"' || codepoint < 0x20) + break; + + /* non-ASCII */ + if(ascii && codepoint > 0x7F) + break; + + pos = end; + } + + if(pos != str) { + if(dump(str, pos - str, data)) + return -1; + } + + if(end == pos) + break; + + /* handle \, ", and control codes */ + length = 2; + switch(codepoint) + { + case '\\': text = "\\\\"; break; + case '\"': text = "\\\""; break; + case '\b': text = "\\b"; break; + case '\f': text = "\\f"; break; + case '\n': text = "\\n"; break; + case '\r': text = "\\r"; break; + case '\t': text = "\\t"; break; + default: + { + /* codepoint is in BMP */ + if(codepoint < 0x10000) + { + sprintf(seq, "\\u%04x", codepoint); + length = 6; + } + + /* not in BMP -> construct a UTF-16 surrogate pair */ + else + { + int32_t first, last; + + codepoint -= 0x10000; + first = 0xD800 | ((codepoint & 0xffc00) >> 10); + last = 0xDC00 | (codepoint & 0x003ff); + + sprintf(seq, "\\u%04x\\u%04x", first, last); + length = 12; + } + + text = seq; + break; + } + } + + if(dump(text, length, data)) + return -1; + + str = pos = end; + } + + return dump("\"", 1, data); +} + +static int object_key_compare_keys(const void *key1, const void *key2) +{ + return strcmp((*(const object_key_t **)key1)->key, + (*(const object_key_t **)key2)->key); +} + +static int object_key_compare_serials(const void *key1, const void *key2) +{ + return (*(const object_key_t **)key1)->serial - + (*(const object_key_t **)key2)->serial; +} + +static int do_dump(const json_t *json, unsigned long flags, int depth, + dump_func dump, void *data) +{ + int ascii = flags & JSON_ENSURE_ASCII ? 1 : 0; + + switch(json_typeof(json)) { + case JSON_NULL: + return dump("null", 4, data); + + case JSON_TRUE: + return dump("true", 4, data); + + case JSON_FALSE: + return dump("false", 5, data); + + case JSON_INTEGER: + { + char buffer[MAX_INTEGER_STR_LENGTH]; + int size; + + size = snprintf(buffer, MAX_INTEGER_STR_LENGTH, "%d", json_integer_value(json)); + if(size >= MAX_INTEGER_STR_LENGTH) + return -1; + + return dump(buffer, size, data); + } + + case JSON_REAL: + { + char buffer[MAX_REAL_STR_LENGTH]; + int size; + + size = snprintf(buffer, MAX_REAL_STR_LENGTH, "%.17g", + json_real_value(json)); + if(size >= MAX_REAL_STR_LENGTH) + return -1; + + /* Make sure there's a dot or 'e' in the output. Otherwise + a real is converted to an integer when decoding */ + if(strchr(buffer, '.') == NULL && + strchr(buffer, 'e') == NULL) + { + if(size + 2 >= MAX_REAL_STR_LENGTH) { + /* No space to append ".0" */ + return -1; + } + buffer[size] = '.'; + buffer[size + 1] = '0'; + size += 2; + } + + return dump(buffer, size, data); + } + + case JSON_STRING: + return dump_string(json_string_value(json), ascii, dump, data); + + case JSON_ARRAY: + { + int i; + int n; + json_array_t *array; + + /* detect circular references */ + array = json_to_array(json); + if(array->visited) + goto array_error; + array->visited = 1; + + n = json_array_size(json); + + if(dump("[", 1, data)) + goto array_error; + if(n == 0) { + array->visited = 0; + return dump("]", 1, data); + } + if(dump_indent(flags, depth + 1, 0, dump, data)) + goto array_error; + + for(i = 0; i < n; ++i) { + if(do_dump(json_array_get(json, i), flags, depth + 1, + dump, data)) + goto array_error; + + if(i < n - 1) + { + if(dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + goto array_error; + } + else + { + if(dump_indent(flags, depth, 0, dump, data)) + goto array_error; + } + } + + array->visited = 0; + return dump("]", 1, data); + + array_error: + array->visited = 0; + return -1; + } + + case JSON_OBJECT: + { + json_object_t *object; + void *iter; + const char *separator; + int separator_length; + + if(flags & JSON_COMPACT) { + separator = ":"; + separator_length = 1; + } + else { + separator = ": "; + separator_length = 2; + } + + /* detect circular references */ + object = json_to_object(json); + if(object->visited) + goto object_error; + object->visited = 1; + + iter = json_object_iter((json_t *)json); + + if(dump("{", 1, data)) + goto object_error; + if(!iter) { + object->visited = 0; + return dump("}", 1, data); + } + if(dump_indent(flags, depth + 1, 0, dump, data)) + goto object_error; + + if(flags & JSON_SORT_KEYS || flags & JSON_PRESERVE_ORDER) + { + const object_key_t **keys; + unsigned int size; + unsigned int i; + int (*cmp_func)(const void *, const void *); + + size = json_object_size(json); + keys = malloc(size * sizeof(object_key_t *)); + if(!keys) + goto object_error; + + i = 0; + while(iter) + { + keys[i] = jsonp_object_iter_fullkey(iter); + iter = json_object_iter_next((json_t *)json, iter); + i++; + } + assert(i == size); + + if(flags & JSON_SORT_KEYS) + cmp_func = object_key_compare_keys; + else + cmp_func = object_key_compare_serials; + + qsort(keys, size, sizeof(object_key_t *), cmp_func); + + for(i = 0; i < size; i++) + { + const char *key; + json_t *value; + + key = keys[i]->key; + value = json_object_get(json, key); + assert(value); + + dump_string(key, ascii, dump, data); + if(dump(separator, separator_length, data) || + do_dump(value, flags, depth + 1, dump, data)) + { + free(keys); + goto object_error; + } + + if(i < size - 1) + { + if(dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + { + free(keys); + goto object_error; + } + } + else + { + if(dump_indent(flags, depth, 0, dump, data)) + { + free(keys); + goto object_error; + } + } + } + + free(keys); + } + else + { + /* Don't sort keys */ + + while(iter) + { + void *next = json_object_iter_next((json_t *)json, iter); + + dump_string(json_object_iter_key(iter), ascii, dump, data); + if(dump(separator, separator_length, data) || + do_dump(json_object_iter_value(iter), flags, depth + 1, + dump, data)) + goto object_error; + + if(next) + { + if(dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + goto object_error; + } + else + { + if(dump_indent(flags, depth, 0, dump, data)) + goto object_error; + } + + iter = next; + } + } + + object->visited = 0; + return dump("}", 1, data); + + object_error: + object->visited = 0; + return -1; + } + + default: + /* not reached */ + return -1; + } +} + + +char *json_dumps(const json_t *json, unsigned long flags) +{ + strbuffer_t strbuff; + char *result; + + if(!json_is_array(json) && !json_is_object(json)) + return NULL; + + if(strbuffer_init(&strbuff)) + return NULL; + + if(do_dump(json, flags, 0, dump_to_strbuffer, (void *)&strbuff)) { + strbuffer_close(&strbuff); + return NULL; + } + + result = strdup(strbuffer_value(&strbuff)); + strbuffer_close(&strbuff); + + return result; +} + +int json_dumpf(const json_t *json, FILE *output, unsigned long flags) +{ + if(!json_is_array(json) && !json_is_object(json)) + return -1; + + return do_dump(json, flags, 0, dump_to_file, (void *)output); +} + +int json_dump_file(const json_t *json, const char *path, unsigned long flags) +{ + int result; + + FILE *output = fopen(path, "w"); + if(!output) + return -1; + + result = json_dumpf(json, output, flags); + + fclose(output); + return result; +} diff --git a/compat/jansson/hashtable.c b/compat/jansson/hashtable.c new file mode 100644 index 0000000..45b0589 --- /dev/null +++ b/compat/jansson/hashtable.c @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include + +#include +#include "hashtable.h" + +typedef struct hashtable_list list_t; +typedef struct hashtable_pair pair_t; +typedef struct hashtable_bucket bucket_t; + +#define container_of(ptr_, type_, member_) \ + ((type_ *)((char *)ptr_ - (size_t)&((type_ *)0)->member_)) + +#define list_to_pair(list_) container_of(list_, pair_t, list) + +static inline void list_init(list_t *list) +{ + list->next = list; + list->prev = list; +} + +static inline void list_insert(list_t *list, list_t *node) +{ + node->next = list; + node->prev = list->prev; + list->prev->next = node; + list->prev = node; +} + +static inline void list_remove(list_t *list) +{ + list->prev->next = list->next; + list->next->prev = list->prev; +} + +static inline int bucket_is_empty(hashtable_t *hashtable, bucket_t *bucket) +{ + return bucket->first == &hashtable->list && bucket->first == bucket->last; +} + +static void insert_to_bucket(hashtable_t *hashtable, bucket_t *bucket, + list_t *list) +{ + if(bucket_is_empty(hashtable, bucket)) + { + list_insert(&hashtable->list, list); + bucket->first = bucket->last = list; + } + else + { + list_insert(bucket->first, list); + bucket->first = list; + } +} + +static unsigned int primes[] = { + 5, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593, + 49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469, + 12582917, 25165843, 50331653, 100663319, 201326611, 402653189, + 805306457, 1610612741 +}; + +static inline unsigned int num_buckets(hashtable_t *hashtable) +{ + return primes[hashtable->num_buckets]; +} + + +static pair_t *hashtable_find_pair(hashtable_t *hashtable, bucket_t *bucket, + const void *key, unsigned int hash) +{ + list_t *list; + pair_t *pair; + + if(bucket_is_empty(hashtable, bucket)) + return NULL; + + list = bucket->first; + while(1) + { + pair = list_to_pair(list); + if(pair->hash == hash && hashtable->cmp_keys(pair->key, key)) + return pair; + + if(list == bucket->last) + break; + + list = list->next; + } + + return NULL; +} + +/* returns 0 on success, -1 if key was not found */ +static int hashtable_do_del(hashtable_t *hashtable, + const void *key, unsigned int hash) +{ + pair_t *pair; + bucket_t *bucket; + unsigned int index; + + index = hash % num_buckets(hashtable); + bucket = &hashtable->buckets[index]; + + pair = hashtable_find_pair(hashtable, bucket, key, hash); + if(!pair) + return -1; + + if(&pair->list == bucket->first && &pair->list == bucket->last) + bucket->first = bucket->last = &hashtable->list; + + else if(&pair->list == bucket->first) + bucket->first = pair->list.next; + + else if(&pair->list == bucket->last) + bucket->last = pair->list.prev; + + list_remove(&pair->list); + + if(hashtable->free_key) + hashtable->free_key(pair->key); + if(hashtable->free_value) + hashtable->free_value(pair->value); + + free(pair); + hashtable->size--; + + return 0; +} + +static void hashtable_do_clear(hashtable_t *hashtable) +{ + list_t *list, *next; + pair_t *pair; + + for(list = hashtable->list.next; list != &hashtable->list; list = next) + { + next = list->next; + pair = list_to_pair(list); + if(hashtable->free_key) + hashtable->free_key(pair->key); + if(hashtable->free_value) + hashtable->free_value(pair->value); + free(pair); + } +} + +static int hashtable_do_rehash(hashtable_t *hashtable) +{ + list_t *list, *next; + pair_t *pair; + unsigned int i, index, new_size; + + free(hashtable->buckets); + + hashtable->num_buckets++; + new_size = num_buckets(hashtable); + + hashtable->buckets = malloc(new_size * sizeof(bucket_t)); + if(!hashtable->buckets) + return -1; + + for(i = 0; i < num_buckets(hashtable); i++) + { + hashtable->buckets[i].first = hashtable->buckets[i].last = + &hashtable->list; + } + + list = hashtable->list.next; + list_init(&hashtable->list); + + for(; list != &hashtable->list; list = next) { + next = list->next; + pair = list_to_pair(list); + index = pair->hash % new_size; + insert_to_bucket(hashtable, &hashtable->buckets[index], &pair->list); + } + + return 0; +} + + +hashtable_t *hashtable_create(key_hash_fn hash_key, key_cmp_fn cmp_keys, + free_fn free_key, free_fn free_value) +{ + hashtable_t *hashtable = malloc(sizeof(hashtable_t)); + if(!hashtable) + return NULL; + + if(hashtable_init(hashtable, hash_key, cmp_keys, free_key, free_value)) + { + free(hashtable); + return NULL; + } + + return hashtable; +} + +void hashtable_destroy(hashtable_t *hashtable) +{ + hashtable_close(hashtable); + free(hashtable); +} + +int hashtable_init(hashtable_t *hashtable, + key_hash_fn hash_key, key_cmp_fn cmp_keys, + free_fn free_key, free_fn free_value) +{ + unsigned int i; + + hashtable->size = 0; + hashtable->num_buckets = 0; /* index to primes[] */ + hashtable->buckets = malloc(num_buckets(hashtable) * sizeof(bucket_t)); + if(!hashtable->buckets) + return -1; + + list_init(&hashtable->list); + + hashtable->hash_key = hash_key; + hashtable->cmp_keys = cmp_keys; + hashtable->free_key = free_key; + hashtable->free_value = free_value; + + for(i = 0; i < num_buckets(hashtable); i++) + { + hashtable->buckets[i].first = hashtable->buckets[i].last = + &hashtable->list; + } + + return 0; +} + +void hashtable_close(hashtable_t *hashtable) +{ + hashtable_do_clear(hashtable); + free(hashtable->buckets); +} + +int hashtable_set(hashtable_t *hashtable, void *key, void *value) +{ + pair_t *pair; + bucket_t *bucket; + unsigned int hash, index; + + /* rehash if the load ratio exceeds 1 */ + if(hashtable->size >= num_buckets(hashtable)) + if(hashtable_do_rehash(hashtable)) + return -1; + + hash = hashtable->hash_key(key); + index = hash % num_buckets(hashtable); + bucket = &hashtable->buckets[index]; + pair = hashtable_find_pair(hashtable, bucket, key, hash); + + if(pair) + { + if(hashtable->free_key) + hashtable->free_key(key); + if(hashtable->free_value) + hashtable->free_value(pair->value); + pair->value = value; + } + else + { + pair = malloc(sizeof(pair_t)); + if(!pair) + return -1; + + pair->key = key; + pair->value = value; + pair->hash = hash; + list_init(&pair->list); + + insert_to_bucket(hashtable, bucket, &pair->list); + + hashtable->size++; + } + return 0; +} + +void *hashtable_get(hashtable_t *hashtable, const void *key) +{ + pair_t *pair; + unsigned int hash; + bucket_t *bucket; + + hash = hashtable->hash_key(key); + bucket = &hashtable->buckets[hash % num_buckets(hashtable)]; + + pair = hashtable_find_pair(hashtable, bucket, key, hash); + if(!pair) + return NULL; + + return pair->value; +} + +int hashtable_del(hashtable_t *hashtable, const void *key) +{ + unsigned int hash = hashtable->hash_key(key); + return hashtable_do_del(hashtable, key, hash); +} + +void hashtable_clear(hashtable_t *hashtable) +{ + unsigned int i; + + hashtable_do_clear(hashtable); + + for(i = 0; i < num_buckets(hashtable); i++) + { + hashtable->buckets[i].first = hashtable->buckets[i].last = + &hashtable->list; + } + + list_init(&hashtable->list); + hashtable->size = 0; +} + +void *hashtable_iter(hashtable_t *hashtable) +{ + return hashtable_iter_next(hashtable, &hashtable->list); +} + +void *hashtable_iter_at(hashtable_t *hashtable, const void *key) +{ + pair_t *pair; + unsigned int hash; + bucket_t *bucket; + + hash = hashtable->hash_key(key); + bucket = &hashtable->buckets[hash % num_buckets(hashtable)]; + + pair = hashtable_find_pair(hashtable, bucket, key, hash); + if(!pair) + return NULL; + + return &pair->list; +} + +void *hashtable_iter_next(hashtable_t *hashtable, void *iter) +{ + list_t *list = (list_t *)iter; + if(list->next == &hashtable->list) + return NULL; + return list->next; +} + +void *hashtable_iter_key(void *iter) +{ + pair_t *pair = list_to_pair((list_t *)iter); + return pair->key; +} + +void *hashtable_iter_value(void *iter) +{ + pair_t *pair = list_to_pair((list_t *)iter); + return pair->value; +} + +void hashtable_iter_set(hashtable_t *hashtable, void *iter, void *value) +{ + pair_t *pair = list_to_pair((list_t *)iter); + + if(hashtable->free_value) + hashtable->free_value(pair->value); + + pair->value = value; +} diff --git a/compat/jansson/hashtable.h b/compat/jansson/hashtable.h new file mode 100644 index 0000000..f03a769 --- /dev/null +++ b/compat/jansson/hashtable.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef HASHTABLE_H +#define HASHTABLE_H + +typedef unsigned int (*key_hash_fn)(const void *key); +typedef int (*key_cmp_fn)(const void *key1, const void *key2); +typedef void (*free_fn)(void *key); + +struct hashtable_list { + struct hashtable_list *prev; + struct hashtable_list *next; +}; + +struct hashtable_pair { + void *key; + void *value; + unsigned int hash; + struct hashtable_list list; +}; + +struct hashtable_bucket { + struct hashtable_list *first; + struct hashtable_list *last; +}; + +typedef struct hashtable { + unsigned int size; + struct hashtable_bucket *buckets; + unsigned int num_buckets; /* index to primes[] */ + struct hashtable_list list; + + key_hash_fn hash_key; + key_cmp_fn cmp_keys; /* returns non-zero for equal keys */ + free_fn free_key; + free_fn free_value; +} hashtable_t; + +/** + * hashtable_create - Create a hashtable object + * + * @hash_key: The key hashing function + * @cmp_keys: The key compare function. Returns non-zero for equal and + * zero for unequal unequal keys + * @free_key: If non-NULL, called for a key that is no longer referenced. + * @free_value: If non-NULL, called for a value that is no longer referenced. + * + * Returns a new hashtable object that should be freed with + * hashtable_destroy when it's no longer used, or NULL on failure (out + * of memory). + */ +hashtable_t *hashtable_create(key_hash_fn hash_key, key_cmp_fn cmp_keys, + free_fn free_key, free_fn free_value); + +/** + * hashtable_destroy - Destroy a hashtable object + * + * @hashtable: The hashtable + * + * Destroys a hashtable created with hashtable_create(). + */ +void hashtable_destroy(hashtable_t *hashtable); + +/** + * hashtable_init - Initialize a hashtable object + * + * @hashtable: The (statically allocated) hashtable object + * @hash_key: The key hashing function + * @cmp_keys: The key compare function. Returns non-zero for equal and + * zero for unequal unequal keys + * @free_key: If non-NULL, called for a key that is no longer referenced. + * @free_value: If non-NULL, called for a value that is no longer referenced. + * + * Initializes a statically allocated hashtable object. The object + * should be cleared with hashtable_close when it's no longer used. + * + * Returns 0 on success, -1 on error (out of memory). + */ +int hashtable_init(hashtable_t *hashtable, + key_hash_fn hash_key, key_cmp_fn cmp_keys, + free_fn free_key, free_fn free_value); + +/** + * hashtable_close - Release all resources used by a hashtable object + * + * @hashtable: The hashtable + * + * Destroys a statically allocated hashtable object. + */ +void hashtable_close(hashtable_t *hashtable); + +/** + * hashtable_set - Add/modify value in hashtable + * + * @hashtable: The hashtable object + * @key: The key + * @value: The value + * + * If a value with the given key already exists, its value is replaced + * with the new value. + * + * Key and value are "stealed" in the sense that hashtable frees them + * automatically when they are no longer used. The freeing is + * accomplished by calling free_key and free_value functions that were + * supplied to hashtable_new. In case one or both of the free + * functions is NULL, the corresponding item is not "stealed". + * + * Returns 0 on success, -1 on failure (out of memory). + */ +int hashtable_set(hashtable_t *hashtable, void *key, void *value); + +/** + * hashtable_get - Get a value associated with a key + * + * @hashtable: The hashtable object + * @key: The key + * + * Returns value if it is found, or NULL otherwise. + */ +void *hashtable_get(hashtable_t *hashtable, const void *key); + +/** + * hashtable_del - Remove a value from the hashtable + * + * @hashtable: The hashtable object + * @key: The key + * + * Returns 0 on success, or -1 if the key was not found. + */ +int hashtable_del(hashtable_t *hashtable, const void *key); + +/** + * hashtable_clear - Clear hashtable + * + * @hashtable: The hashtable object + * + * Removes all items from the hashtable. + */ +void hashtable_clear(hashtable_t *hashtable); + +/** + * hashtable_iter - Iterate over hashtable + * + * @hashtable: The hashtable object + * + * Returns an opaque iterator to the first element in the hashtable. + * The iterator should be passed to hashtable_iter_* functions. + * The hashtable items are not iterated over in any particular order. + * + * There's no need to free the iterator in any way. The iterator is + * valid as long as the item that is referenced by the iterator is not + * deleted. Other values may be added or deleted. In particular, + * hashtable_iter_next() may be called on an iterator, and after that + * the key/value pair pointed by the old iterator may be deleted. + */ +void *hashtable_iter(hashtable_t *hashtable); + +/** + * hashtable_iter_at - Return an iterator at a specific key + * + * @hashtable: The hashtable object + * @key: The key that the iterator should point to + * + * Like hashtable_iter() but returns an iterator pointing to a + * specific key. + */ +void *hashtable_iter_at(hashtable_t *hashtable, const void *key); + +/** + * hashtable_iter_next - Advance an iterator + * + * @hashtable: The hashtable object + * @iter: The iterator + * + * Returns a new iterator pointing to the next element in the + * hashtable or NULL if the whole hastable has been iterated over. + */ +void *hashtable_iter_next(hashtable_t *hashtable, void *iter); + +/** + * hashtable_iter_key - Retrieve the key pointed by an iterator + * + * @iter: The iterator + */ +void *hashtable_iter_key(void *iter); + +/** + * hashtable_iter_value - Retrieve the value pointed by an iterator + * + * @iter: The iterator + */ +void *hashtable_iter_value(void *iter); + +/** + * hashtable_iter_set - Set the value pointed by an iterator + * + * @iter: The iterator + * @value: The value to set + */ +void hashtable_iter_set(hashtable_t *hashtable, void *iter, void *value); + +#endif diff --git a/compat/jansson/jansson.h b/compat/jansson/jansson.h new file mode 100644 index 0000000..4c526fe --- /dev/null +++ b/compat/jansson/jansson.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef JANSSON_H +#define JANSSON_H + +#include + +#ifndef __cplusplus +#define JSON_INLINE inline +#else +#define JSON_INLINE inline +extern "C" { +#endif + +/* types */ + +typedef enum { + JSON_OBJECT, + JSON_ARRAY, + JSON_STRING, + JSON_INTEGER, + JSON_REAL, + JSON_TRUE, + JSON_FALSE, + JSON_NULL +} json_type; + +typedef struct { + json_type type; + unsigned long refcount; +} json_t; + +#define json_typeof(json) ((json)->type) +#define json_is_object(json) (json && json_typeof(json) == JSON_OBJECT) +#define json_is_array(json) (json && json_typeof(json) == JSON_ARRAY) +#define json_is_string(json) (json && json_typeof(json) == JSON_STRING) +#define json_is_integer(json) (json && json_typeof(json) == JSON_INTEGER) +#define json_is_real(json) (json && json_typeof(json) == JSON_REAL) +#define json_is_number(json) (json_is_integer(json) || json_is_real(json)) +#define json_is_true(json) (json && json_typeof(json) == JSON_TRUE) +#define json_is_false(json) (json && json_typeof(json) == JSON_FALSE) +#define json_is_boolean(json) (json_is_true(json) || json_is_false(json)) +#define json_is_null(json) (json && json_typeof(json) == JSON_NULL) + +/* construction, destruction, reference counting */ + +json_t *json_object(void); +json_t *json_array(void); +json_t *json_string(const char *value); +json_t *json_string_nocheck(const char *value); +json_t *json_integer(int value); +json_t *json_real(double value); +json_t *json_true(void); +json_t *json_false(void); +json_t *json_null(void); + +static JSON_INLINE +json_t *json_incref(json_t *json) +{ + if(json && json->refcount != (unsigned int)-1) + ++json->refcount; + return json; +} + +/* do not call json_delete directly */ +void json_delete(json_t *json); + +static JSON_INLINE +void json_decref(json_t *json) +{ + if(json && json->refcount != (unsigned int)-1 && --json->refcount == 0) + json_delete(json); +} + + +/* getters, setters, manipulation */ + +unsigned int json_object_size(const json_t *object); +json_t *json_object_get(const json_t *object, const char *key); +int json_object_set_new(json_t *object, const char *key, json_t *value); +int json_object_set_new_nocheck(json_t *object, const char *key, json_t *value); +int json_object_del(json_t *object, const char *key); +int json_object_clear(json_t *object); +int json_object_update(json_t *object, json_t *other); +void *json_object_iter(json_t *object); +void *json_object_iter_at(json_t *object, const char *key); +void *json_object_iter_next(json_t *object, void *iter); +const char *json_object_iter_key(void *iter); +json_t *json_object_iter_value(void *iter); +int json_object_iter_set_new(json_t *object, void *iter, json_t *value); + +static JSON_INLINE +int json_object_set(json_t *object, const char *key, json_t *value) +{ + return json_object_set_new(object, key, json_incref(value)); +} + +static JSON_INLINE +int json_object_set_nocheck(json_t *object, const char *key, json_t *value) +{ + return json_object_set_new_nocheck(object, key, json_incref(value)); +} + +static inline +int json_object_iter_set(json_t *object, void *iter, json_t *value) +{ + return json_object_iter_set_new(object, iter, json_incref(value)); +} + +unsigned int json_array_size(const json_t *array); +json_t *json_array_get(const json_t *array, unsigned int index); +int json_array_set_new(json_t *array, unsigned int index, json_t *value); +int json_array_append_new(json_t *array, json_t *value); +int json_array_insert_new(json_t *array, unsigned int index, json_t *value); +int json_array_remove(json_t *array, unsigned int index); +int json_array_clear(json_t *array); +int json_array_extend(json_t *array, json_t *other); + +static JSON_INLINE +int json_array_set(json_t *array, unsigned int index, json_t *value) +{ + return json_array_set_new(array, index, json_incref(value)); +} + +static JSON_INLINE +int json_array_append(json_t *array, json_t *value) +{ + return json_array_append_new(array, json_incref(value)); +} + +static JSON_INLINE +int json_array_insert(json_t *array, unsigned int index, json_t *value) +{ + return json_array_insert_new(array, index, json_incref(value)); +} + +const char *json_string_value(const json_t *string); +int json_integer_value(const json_t *integer); +double json_real_value(const json_t *real); +double json_number_value(const json_t *json); + +int json_string_set(json_t *string, const char *value); +int json_string_set_nocheck(json_t *string, const char *value); +int json_integer_set(json_t *integer, int value); +int json_real_set(json_t *real, double value); + + +/* equality */ + +int json_equal(json_t *value1, json_t *value2); + + +/* copying */ + +json_t *json_copy(json_t *value); +json_t *json_deep_copy(json_t *value); + + +/* loading, printing */ + +#define JSON_ERROR_TEXT_LENGTH 160 + +typedef struct { + char text[JSON_ERROR_TEXT_LENGTH]; + int line; +} json_error_t; + +json_t *json_loads(const char *input, json_error_t *error); +json_t *json_loadf(FILE *input, json_error_t *error); +json_t *json_load_file(const char *path, json_error_t *error); + +#define JSON_INDENT(n) (n & 0xFF) +#define JSON_COMPACT 0x100 +#define JSON_ENSURE_ASCII 0x200 +#define JSON_SORT_KEYS 0x400 +#define JSON_PRESERVE_ORDER 0x800 + +char *json_dumps(const json_t *json, unsigned long flags); +int json_dumpf(const json_t *json, FILE *output, unsigned long flags); +int json_dump_file(const json_t *json, const char *path, unsigned long flags); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/compat/jansson/jansson_private.h b/compat/jansson/jansson_private.h new file mode 100644 index 0000000..4490702 --- /dev/null +++ b/compat/jansson/jansson_private.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef JANSSON_PRIVATE_H +#define JANSSON_PRIVATE_H + +#include "jansson.h" +#include "hashtable.h" + +#define container_of(ptr_, type_, member_) \ + ((type_ *)((char *)ptr_ - (size_t)&((type_ *)0)->member_)) + +typedef struct { + json_t json; + hashtable_t hashtable; + unsigned long serial; + int visited; +} json_object_t; + +typedef struct { + json_t json; + unsigned int size; + unsigned int entries; + json_t **table; + int visited; +} json_array_t; + +typedef struct { + json_t json; + char *value; +} json_string_t; + +typedef struct { + json_t json; + double value; +} json_real_t; + +typedef struct { + json_t json; + int value; +} json_integer_t; + +#define json_to_object(json_) container_of(json_, json_object_t, json) +#define json_to_array(json_) container_of(json_, json_array_t, json) +#define json_to_string(json_) container_of(json_, json_string_t, json) +#define json_to_real(json_) container_of(json_, json_real_t, json) +#define json_to_integer(json_) container_of(json_, json_integer_t, json) + +typedef struct { + unsigned long serial; + char key[]; +} object_key_t; + +const object_key_t *jsonp_object_iter_fullkey(void *iter); + +#endif diff --git a/compat/jansson/load.c b/compat/jansson/load.c new file mode 100644 index 0000000..9d2f051 --- /dev/null +++ b/compat/jansson/load.c @@ -0,0 +1,883 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#if __GNUC__ >= 8 +#pragma GCC diagnostic ignored "-Wformat-truncation" +#endif + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "jansson_private.h" +#include "strbuffer.h" +#include "utf.h" + +#define TOKEN_INVALID -1 +#define TOKEN_EOF 0 +#define TOKEN_STRING 256 +#define TOKEN_INTEGER 257 +#define TOKEN_REAL 258 +#define TOKEN_TRUE 259 +#define TOKEN_FALSE 260 +#define TOKEN_NULL 261 + +/* read one byte from stream, return EOF on end of file */ +typedef int (*get_func)(void *data); + +/* return non-zero if end of file has been reached */ +typedef int (*eof_func)(void *data); + +typedef struct { + get_func get; + eof_func eof; + void *data; + int stream_pos; + char buffer[5]; + int buffer_pos; +} stream_t; + + +typedef struct { + stream_t stream; + strbuffer_t saved_text; + int token; + int line, column; + union { + char *string; + int integer; + double real; + } value; +} lex_t; + + +/*** error reporting ***/ + +static void error_init(json_error_t *error) +{ + if(error) + { + error->text[0] = '\0'; + error->line = -1; + } +} + +static void error_set(json_error_t *error, const lex_t *lex, + const char *msg, ...) +{ + va_list ap; + char text[JSON_ERROR_TEXT_LENGTH]; + + if(!error || error->text[0] != '\0') { + /* error already set */ + return; + } + + va_start(ap, msg); + vsnprintf(text, JSON_ERROR_TEXT_LENGTH, msg, ap); + va_end(ap); + + if(lex) + { + const char *saved_text = strbuffer_value(&lex->saved_text); + error->line = lex->line; + if(saved_text && saved_text[0]) + { + if(lex->saved_text.length <= 20) { + snprintf(error->text, JSON_ERROR_TEXT_LENGTH, + "%s near '%s'", text, saved_text); + } + else + snprintf(error->text, JSON_ERROR_TEXT_LENGTH, "%s", text); + } + else + { + snprintf(error->text, JSON_ERROR_TEXT_LENGTH, + "%s near end of file", text); + } + } + else + { + error->line = -1; + snprintf(error->text, JSON_ERROR_TEXT_LENGTH, "%s", text); + } +} + + +/*** lexical analyzer ***/ + +static void +stream_init(stream_t *stream, get_func get, eof_func eof, void *data) +{ + stream->get = get; + stream->eof = eof; + stream->data = data; + stream->stream_pos = 0; + stream->buffer[0] = '\0'; + stream->buffer_pos = 0; +} + +static char stream_get(stream_t *stream, json_error_t *error) +{ + char c; + + if(!stream->buffer[stream->buffer_pos]) + { + stream->buffer[0] = stream->get(stream->data); + stream->buffer_pos = 0; + + c = stream->buffer[0]; + + if((unsigned char)c >= 0x80 && c != (char)EOF) + { + /* multi-byte UTF-8 sequence */ + int i, count; + + count = utf8_check_first(c); + if(!count) + goto out; + + assert(count >= 2); + + for(i = 1; i < count; i++) + stream->buffer[i] = stream->get(stream->data); + + if(!utf8_check_full(stream->buffer, count, NULL)) + goto out; + + stream->stream_pos += count; + stream->buffer[count] = '\0'; + } + else { + stream->buffer[1] = '\0'; + stream->stream_pos++; + } + } + + return stream->buffer[stream->buffer_pos++]; + +out: + error_set(error, NULL, "unable to decode byte 0x%x at position %d", + (unsigned char)c, stream->stream_pos); + + stream->buffer[0] = EOF; + stream->buffer[1] = '\0'; + stream->buffer_pos = 1; + + return EOF; +} + +static void stream_unget(stream_t *stream, char c) +{ + assert(stream->buffer_pos > 0); + stream->buffer_pos--; + assert(stream->buffer[stream->buffer_pos] == c); +} + + +static int lex_get(lex_t *lex, json_error_t *error) +{ + return stream_get(&lex->stream, error); +} + +static int lex_eof(lex_t *lex) +{ + return lex->stream.eof(lex->stream.data); +} + +static void lex_save(lex_t *lex, char c) +{ + strbuffer_append_byte(&lex->saved_text, c); +} + +static int lex_get_save(lex_t *lex, json_error_t *error) +{ + char c = stream_get(&lex->stream, error); + lex_save(lex, c); + return c; +} + +static void lex_unget_unsave(lex_t *lex, char c) +{ + char d; + stream_unget(&lex->stream, c); + d = strbuffer_pop(&lex->saved_text); + assert(c == d); +} + +static void lex_save_cached(lex_t *lex) +{ + while(lex->stream.buffer[lex->stream.buffer_pos] != '\0') + { + lex_save(lex, lex->stream.buffer[lex->stream.buffer_pos]); + lex->stream.buffer_pos++; + } +} + +/* assumes that str points to 'u' plus at least 4 valid hex digits */ +static int32_t decode_unicode_escape(const char *str) +{ + int i; + int32_t value = 0; + + assert(str[0] == 'u'); + + for(i = 1; i <= 4; i++) { + char c = str[i]; + value <<= 4; + if(isdigit(c)) + value += c - '0'; + else if(islower(c)) + value += c - 'a' + 10; + else if(isupper(c)) + value += c - 'A' + 10; + else + assert(0); + } + + return value; +} + +static void lex_scan_string(lex_t *lex, json_error_t *error) +{ + char c; + const char *p; + char *t; + int i; + + lex->value.string = NULL; + lex->token = TOKEN_INVALID; + + c = lex_get_save(lex, error); + + while(c != '"') { + if(c == (char)EOF) { + lex_unget_unsave(lex, c); + if(lex_eof(lex)) + error_set(error, lex, "premature end of input"); + goto out; + } + + else if((unsigned char)c <= 0x1F) { + /* control character */ + lex_unget_unsave(lex, c); + if(c == '\n') + error_set(error, lex, "unexpected newline", c); + else + error_set(error, lex, "control character 0x%x", c); + goto out; + } + + else if(c == '\\') { + c = lex_get_save(lex, error); + if(c == 'u') { + c = lex_get_save(lex, error); + for(i = 0; i < 4; i++) { + if(!isxdigit(c)) { + lex_unget_unsave(lex, c); + error_set(error, lex, "invalid escape"); + goto out; + } + c = lex_get_save(lex, error); + } + } + else if(c == '"' || c == '\\' || c == '/' || c == 'b' || + c == 'f' || c == 'n' || c == 'r' || c == 't') + c = lex_get_save(lex, error); + else { + lex_unget_unsave(lex, c); + error_set(error, lex, "invalid escape"); + goto out; + } + } + else + c = lex_get_save(lex, error); + } + + /* the actual value is at most of the same length as the source + string, because: + - shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte + - a single \uXXXX escape (length 6) is converted to at most 3 bytes + - two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair + are converted to 4 bytes + */ + lex->value.string = malloc(lex->saved_text.length + 1); + if(!lex->value.string) { + /* this is not very nice, since TOKEN_INVALID is returned */ + goto out; + } + + /* the target */ + t = lex->value.string; + + /* + 1 to skip the " */ + p = strbuffer_value(&lex->saved_text) + 1; + + while(*p != '"') { + if(*p == '\\') { + p++; + if(*p == 'u') { + char buffer[4]; + int length; + int32_t value; + + value = decode_unicode_escape(p); + p += 5; + + if(0xD800 <= value && value <= 0xDBFF) { + /* surrogate pair */ + if(*p == '\\' && *(p + 1) == 'u') { + int32_t value2 = decode_unicode_escape(++p); + p += 5; + + if(0xDC00 <= value2 && value2 <= 0xDFFF) { + /* valid second surrogate */ + value = + ((value - 0xD800) << 10) + + (value2 - 0xDC00) + + 0x10000; + } + else { + /* invalid second surrogate */ + error_set(error, lex, + "invalid Unicode '\\u%04X\\u%04X'", + value, value2); + goto out; + } + } + else { + /* no second surrogate */ + error_set(error, lex, "invalid Unicode '\\u%04X'", + value); + goto out; + } + } + else if(0xDC00 <= value && value <= 0xDFFF) { + error_set(error, lex, "invalid Unicode '\\u%04X'", value); + goto out; + } + else if(value == 0) + { + error_set(error, lex, "\\u0000 is not allowed"); + goto out; + } + + if(utf8_encode(value, buffer, &length)) + assert(0); + + memcpy(t, buffer, length); + t += length; + } + else { + switch(*p) { + case '"': case '\\': case '/': + *t = *p; break; + case 'b': *t = '\b'; break; + case 'f': *t = '\f'; break; + case 'n': *t = '\n'; break; + case 'r': *t = '\r'; break; + case 't': *t = '\t'; break; + default: assert(0); + } + t++; + p++; + } + } + else + *(t++) = *(p++); + } + *t = '\0'; + lex->token = TOKEN_STRING; + return; + +out: + free(lex->value.string); +} + +static int lex_scan_number(lex_t *lex, char c, json_error_t *error) +{ + const char *saved_text; + char *end; + double value; + + lex->token = TOKEN_INVALID; + + if(c == '-') + c = lex_get_save(lex, error); + + if(c == '0') { + c = lex_get_save(lex, error); + if(isdigit(c)) { + lex_unget_unsave(lex, c); + goto out; + } + } + else if(isdigit(c)) { + c = lex_get_save(lex, error); + while(isdigit(c)) + c = lex_get_save(lex, error); + } + else { + lex_unget_unsave(lex, c); + goto out; + } + + if(c != '.' && c != 'E' && c != 'e') { + long value; + + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + value = strtol(saved_text, &end, 10); + assert(end == saved_text + lex->saved_text.length); + + if((value == LONG_MAX && errno == ERANGE) || value > INT_MAX) { + error_set(error, lex, "too big integer"); + goto out; + } + else if((value == LONG_MIN && errno == ERANGE) || value < INT_MIN) { + error_set(error, lex, "too big negative integer"); + goto out; + } + + lex->token = TOKEN_INTEGER; + lex->value.integer = (int)value; + return 0; + } + + if(c == '.') { + c = lex_get(lex, error); + if(!isdigit(c)) + goto out; + lex_save(lex, c); + + c = lex_get_save(lex, error); + while(isdigit(c)) + c = lex_get_save(lex, error); + } + + if(c == 'E' || c == 'e') { + c = lex_get_save(lex, error); + if(c == '+' || c == '-') + c = lex_get_save(lex, error); + + if(!isdigit(c)) { + lex_unget_unsave(lex, c); + goto out; + } + + c = lex_get_save(lex, error); + while(isdigit(c)) + c = lex_get_save(lex, error); + } + + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + value = strtod(saved_text, &end); + assert(end == saved_text + lex->saved_text.length); + + if(errno == ERANGE && value != 0) { + error_set(error, lex, "real number overflow"); + goto out; + } + + lex->token = TOKEN_REAL; + lex->value.real = value; + return 0; + +out: + return -1; +} + +static int lex_scan(lex_t *lex, json_error_t *error) +{ + char c; + + strbuffer_clear(&lex->saved_text); + + if(lex->token == TOKEN_STRING) { + free(lex->value.string); + lex->value.string = NULL; + } + + c = lex_get(lex, error); + while(c == ' ' || c == '\t' || c == '\n' || c == '\r') + { + if(c == '\n') + lex->line++; + + c = lex_get(lex, error); + } + + if(c == (char)EOF) { + if(lex_eof(lex)) + lex->token = TOKEN_EOF; + else + lex->token = TOKEN_INVALID; + goto out; + } + + lex_save(lex, c); + + if(c == '{' || c == '}' || c == '[' || c == ']' || c == ':' || c == ',') + lex->token = c; + + else if(c == '"') + lex_scan_string(lex, error); + + else if(isdigit(c) || c == '-') { + if(lex_scan_number(lex, c, error)) + goto out; + } + + else if(isupper(c) || islower(c)) { + /* eat up the whole identifier for clearer error messages */ + const char *saved_text; + + c = lex_get_save(lex, error); + while(isupper(c) || islower(c)) + c = lex_get_save(lex, error); + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + + if(strcmp(saved_text, "true") == 0) + lex->token = TOKEN_TRUE; + else if(strcmp(saved_text, "false") == 0) + lex->token = TOKEN_FALSE; + else if(strcmp(saved_text, "null") == 0) + lex->token = TOKEN_NULL; + else + lex->token = TOKEN_INVALID; + } + + else { + /* save the rest of the input UTF-8 sequence to get an error + message of valid UTF-8 */ + lex_save_cached(lex); + lex->token = TOKEN_INVALID; + } + +out: + return lex->token; +} + +static char *lex_steal_string(lex_t *lex) +{ + char *result = NULL; + if(lex->token == TOKEN_STRING) + { + result = lex->value.string; + lex->value.string = NULL; + } + return result; +} + +static int lex_init(lex_t *lex, get_func get, eof_func eof, void *data) +{ + stream_init(&lex->stream, get, eof, data); + if(strbuffer_init(&lex->saved_text)) + return -1; + + lex->token = TOKEN_INVALID; + lex->line = 1; + + return 0; +} + +static void lex_close(lex_t *lex) +{ + if(lex->token == TOKEN_STRING) + free(lex->value.string); + strbuffer_close(&lex->saved_text); +} + + +/*** parser ***/ + +static json_t *parse_value(lex_t *lex, json_error_t *error); + +static json_t *parse_object(lex_t *lex, json_error_t *error) +{ + json_t *object = json_object(); + if(!object) + return NULL; + + lex_scan(lex, error); + if(lex->token == '}') + return object; + + while(1) { + char *key; + json_t *value; + + if(lex->token != TOKEN_STRING) { + error_set(error, lex, "string or '}' expected"); + goto error; + } + + key = lex_steal_string(lex); + if(!key) + return NULL; + + lex_scan(lex, error); + if(lex->token != ':') { + free(key); + error_set(error, lex, "':' expected"); + goto error; + } + + lex_scan(lex, error); + value = parse_value(lex, error); + if(!value) { + free(key); + goto error; + } + + if(json_object_set_nocheck(object, key, value)) { + free(key); + json_decref(value); + goto error; + } + + json_decref(value); + free(key); + + lex_scan(lex, error); + if(lex->token != ',') + break; + + lex_scan(lex, error); + } + + if(lex->token != '}') { + error_set(error, lex, "'}' expected"); + goto error; + } + + return object; + +error: + json_decref(object); + return NULL; +} + +static json_t *parse_array(lex_t *lex, json_error_t *error) +{ + json_t *array = json_array(); + if(!array) + return NULL; + + lex_scan(lex, error); + if(lex->token == ']') + return array; + + while(lex->token) { + json_t *elem = parse_value(lex, error); + if(!elem) + goto error; + + if(json_array_append(array, elem)) { + json_decref(elem); + goto error; + } + json_decref(elem); + + lex_scan(lex, error); + if(lex->token != ',') + break; + + lex_scan(lex, error); + } + + if(lex->token != ']') { + error_set(error, lex, "']' expected"); + goto error; + } + + return array; + +error: + json_decref(array); + return NULL; +} + +static json_t *parse_value(lex_t *lex, json_error_t *error) +{ + json_t *json; + + switch(lex->token) { + case TOKEN_STRING: { + json = json_string_nocheck(lex->value.string); + break; + } + + case TOKEN_INTEGER: { + json = json_integer(lex->value.integer); + break; + } + + case TOKEN_REAL: { + json = json_real(lex->value.real); + break; + } + + case TOKEN_TRUE: + json = json_true(); + break; + + case TOKEN_FALSE: + json = json_false(); + break; + + case TOKEN_NULL: + json = json_null(); + break; + + case '{': + json = parse_object(lex, error); + break; + + case '[': + json = parse_array(lex, error); + break; + + case TOKEN_INVALID: + error_set(error, lex, "invalid token"); + return NULL; + + default: + error_set(error, lex, "unexpected token"); + return NULL; + } + + if(!json) + return NULL; + + return json; +} + +static json_t *parse_json(lex_t *lex, json_error_t *error) +{ + error_init(error); + + lex_scan(lex, error); + if(lex->token != '[' && lex->token != '{') { + error_set(error, lex, "'[' or '{' expected"); + return NULL; + } + + return parse_value(lex, error); +} + +typedef struct +{ + const char *data; + int pos; +} string_data_t; + +static int string_get(void *data) +{ + char c; + string_data_t *stream = (string_data_t *)data; + c = stream->data[stream->pos]; + if(c == '\0') + return EOF; + else + { + stream->pos++; + return c; + } +} + +static int string_eof(void *data) +{ + string_data_t *stream = (string_data_t *)data; + return (stream->data[stream->pos] == '\0'); +} + +json_t *json_loads(const char *string, json_error_t *error) +{ + lex_t lex; + json_t *result; + + string_data_t stream_data = { + .data = string, + .pos = 0 + }; + + if(lex_init(&lex, string_get, string_eof, (void *)&stream_data)) + return NULL; + + result = parse_json(&lex, error); + if(!result) + goto out; + + lex_scan(&lex, error); + if(lex.token != TOKEN_EOF) { + error_set(error, &lex, "end of file expected"); + json_decref(result); + result = NULL; + } + +out: + lex_close(&lex); + return result; +} + +json_t *json_loadf(FILE *input, json_error_t *error) +{ + lex_t lex; + json_t *result; + + if(lex_init(&lex, (get_func)fgetc, (eof_func)feof, input)) + return NULL; + + result = parse_json(&lex, error); + if(!result) + goto out; + + lex_scan(&lex, error); + if(lex.token != TOKEN_EOF) { + error_set(error, &lex, "end of file expected"); + json_decref(result); + result = NULL; + } + +out: + lex_close(&lex); + return result; +} + +json_t *json_load_file(const char *path, json_error_t *error) +{ + json_t *result; + FILE *fp; + + error_init(error); + + fp = fopen(path, "r"); + if(!fp) + { + error_set(error, NULL, "unable to open %s: %s", + path, strerror(errno)); + return NULL; + } + + result = json_loadf(fp, error); + + fclose(fp); + return result; +} diff --git a/compat/jansson/strbuffer.c b/compat/jansson/strbuffer.c new file mode 100644 index 0000000..3496024 --- /dev/null +++ b/compat/jansson/strbuffer.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#define _GNU_SOURCE +#include +#include +#include "strbuffer.h" +#include "util.h" + +#define STRBUFFER_MIN_SIZE 16 +#define STRBUFFER_FACTOR 2 + +int strbuffer_init(strbuffer_t *strbuff) +{ + strbuff->size = STRBUFFER_MIN_SIZE; + strbuff->length = 0; + + strbuff->value = malloc(strbuff->size); + if(!strbuff->value) + return -1; + + /* initialize to empty */ + strbuff->value[0] = '\0'; + return 0; +} + +void strbuffer_close(strbuffer_t *strbuff) +{ + free(strbuff->value); + strbuff->size = 0; + strbuff->length = 0; + strbuff->value = NULL; +} + +void strbuffer_clear(strbuffer_t *strbuff) +{ + strbuff->length = 0; + strbuff->value[0] = '\0'; +} + +const char *strbuffer_value(const strbuffer_t *strbuff) +{ + return strbuff->value; +} + +char *strbuffer_steal_value(strbuffer_t *strbuff) +{ + char *result = strbuff->value; + strbuffer_init(strbuff); + return result; +} + +int strbuffer_append(strbuffer_t *strbuff, const char *string) +{ + return strbuffer_append_bytes(strbuff, string, strlen(string)); +} + +int strbuffer_append_byte(strbuffer_t *strbuff, char byte) +{ + return strbuffer_append_bytes(strbuff, &byte, 1); +} + +int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, int size) +{ + if(strbuff->length + size >= strbuff->size) + { + strbuff->size = max(strbuff->size * STRBUFFER_FACTOR, + strbuff->length + size + 1); + + strbuff->value = realloc(strbuff->value, strbuff->size); + if(!strbuff->value) + return -1; + } + + memcpy(strbuff->value + strbuff->length, data, size); + strbuff->length += size; + strbuff->value[strbuff->length] = '\0'; + + return 0; +} + +char strbuffer_pop(strbuffer_t *strbuff) +{ + if(strbuff->length > 0) { + char c = strbuff->value[--strbuff->length]; + strbuff->value[strbuff->length] = '\0'; + return c; + } + else + return '\0'; +} diff --git a/compat/jansson/strbuffer.h b/compat/jansson/strbuffer.h new file mode 100644 index 0000000..f4c5f77 --- /dev/null +++ b/compat/jansson/strbuffer.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef STRBUFFER_H +#define STRBUFFER_H + +typedef struct { + char *value; + int length; /* bytes used */ + int size; /* bytes allocated */ +} strbuffer_t; + +int strbuffer_init(strbuffer_t *strbuff); +void strbuffer_close(strbuffer_t *strbuff); + +void strbuffer_clear(strbuffer_t *strbuff); + +const char *strbuffer_value(const strbuffer_t *strbuff); +char *strbuffer_steal_value(strbuffer_t *strbuff); + +int strbuffer_append(strbuffer_t *strbuff, const char *string); +int strbuffer_append_byte(strbuffer_t *strbuff, char byte); +int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, int size); + +char strbuffer_pop(strbuffer_t *strbuff); + +#endif diff --git a/compat/jansson/utf.c b/compat/jansson/utf.c new file mode 100644 index 0000000..92484d0 --- /dev/null +++ b/compat/jansson/utf.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include "utf.h" + +int utf8_encode(int32_t codepoint, char *buffer, int *size) +{ + if(codepoint < 0) + return -1; + else if(codepoint < 0x80) + { + buffer[0] = (char)codepoint; + *size = 1; + } + else if(codepoint < 0x800) + { + buffer[0] = 0xC0 + ((codepoint & 0x7C0) >> 6); + buffer[1] = 0x80 + ((codepoint & 0x03F)); + *size = 2; + } + else if(codepoint < 0x10000) + { + buffer[0] = 0xE0 + ((codepoint & 0xF000) >> 12); + buffer[1] = 0x80 + ((codepoint & 0x0FC0) >> 6); + buffer[2] = 0x80 + ((codepoint & 0x003F)); + *size = 3; + } + else if(codepoint <= 0x10FFFF) + { + buffer[0] = 0xF0 + ((codepoint & 0x1C0000) >> 18); + buffer[1] = 0x80 + ((codepoint & 0x03F000) >> 12); + buffer[2] = 0x80 + ((codepoint & 0x000FC0) >> 6); + buffer[3] = 0x80 + ((codepoint & 0x00003F)); + *size = 4; + } + else + return -1; + + return 0; +} + +int utf8_check_first(char byte) +{ + unsigned char u = (unsigned char)byte; + + if(u < 0x80) + return 1; + + if(0x80 <= u && u <= 0xBF) { + /* second, third or fourth byte of a multi-byte + sequence, i.e. a "continuation byte" */ + return 0; + } + else if(u == 0xC0 || u == 0xC1) { + /* overlong encoding of an ASCII byte */ + return 0; + } + else if(0xC2 <= u && u <= 0xDF) { + /* 2-byte sequence */ + return 2; + } + + else if(0xE0 <= u && u <= 0xEF) { + /* 3-byte sequence */ + return 3; + } + else if(0xF0 <= u && u <= 0xF4) { + /* 4-byte sequence */ + return 4; + } + else { /* u >= 0xF5 */ + /* Restricted (start of 4-, 5- or 6-byte sequence) or invalid + UTF-8 */ + return 0; + } +} + +int utf8_check_full(const char *buffer, int size, int32_t *codepoint) +{ + int i; + int32_t value = 0; + unsigned char u = (unsigned char)buffer[0]; + + if(size == 2) + { + value = u & 0x1F; + } + else if(size == 3) + { + value = u & 0xF; + } + else if(size == 4) + { + value = u & 0x7; + } + else + return 0; + + for(i = 1; i < size; i++) + { + u = (unsigned char)buffer[i]; + + if(u < 0x80 || u > 0xBF) { + /* not a continuation byte */ + return 0; + } + + value = (value << 6) + (u & 0x3F); + } + + if(value > 0x10FFFF) { + /* not in Unicode range */ + return 0; + } + + else if(0xD800 <= value && value <= 0xDFFF) { + /* invalid code point (UTF-16 surrogate halves) */ + return 0; + } + + else if((size == 2 && value < 0x80) || + (size == 3 && value < 0x800) || + (size == 4 && value < 0x10000)) { + /* overlong encoding */ + return 0; + } + + if(codepoint) + *codepoint = value; + + return 1; +} + +const char *utf8_iterate(const char *buffer, int32_t *codepoint) +{ + int count; + int32_t value; + + if(!*buffer) + return buffer; + + count = utf8_check_first(buffer[0]); + if(count <= 0) + return NULL; + + if(count == 1) + value = (unsigned char)buffer[0]; + else + { + if(!utf8_check_full(buffer, count, &value)) + return NULL; + } + + if(codepoint) + *codepoint = value; + + return buffer + count; +} + +int utf8_check_string(const char *string, int length) +{ + int i; + + if(length == -1) + length = strlen(string); + + for(i = 0; i < length; i++) + { + int count = utf8_check_first(string[i]); + if(count == 0) + return 0; + else if(count > 1) + { + if(i + count > length) + return 0; + + if(!utf8_check_full(&string[i], count, NULL)) + return 0; + + i += count - 1; + } + } + + return 1; +} diff --git a/compat/jansson/utf.h b/compat/jansson/utf.h new file mode 100644 index 0000000..d0ae6e9 --- /dev/null +++ b/compat/jansson/utf.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef UTF_H +#define UTF_H + +#include + +#ifdef HAVE_INTTYPES_H +/* inttypes.h includes stdint.h in a standard environment, so there's +no need to include stdint.h separately. If inttypes.h doesn't define +int32_t, it's defined in config.h. */ +#include +#endif + +int utf8_encode(int codepoint, char *buffer, int *size); + +int utf8_check_first(char byte); +int utf8_check_full(const char *buffer, int size, int32_t *codepoint); +const char *utf8_iterate(const char *buffer, int32_t *codepoint); + +int utf8_check_string(const char *string, int length); + +#endif diff --git a/compat/jansson/util.h b/compat/jansson/util.h new file mode 100644 index 0000000..06a547b --- /dev/null +++ b/compat/jansson/util.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef UTIL_H +#define UTIL_H + +#define max(a, b) ((a) > (b) ? (a) : (b)) + +#endif diff --git a/compat/jansson/value.c b/compat/jansson/value.c new file mode 100644 index 0000000..e024fdb --- /dev/null +++ b/compat/jansson/value.c @@ -0,0 +1,976 @@ +/* + * Copyright (c) 2009, 2010 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#define _GNU_SOURCE + +#include + +#include +#include + +#include +#include "hashtable.h" +#include "jansson_private.h" +#include "utf.h" +#include "util.h" + + +static inline void json_init(json_t *json, json_type type) +{ + json->type = type; + json->refcount = 1; +} + + +/*** object ***/ + +/* This macro just returns a pointer that's a few bytes backwards from + string. This makes it possible to pass a pointer to object_key_t + when only the string inside it is used, without actually creating + an object_key_t instance. */ +#define string_to_key(string) container_of(string, object_key_t, key) + +static unsigned int hash_key(const void *ptr) +{ + const char *str = ((const object_key_t *)ptr)->key; + + unsigned int hash = 5381; + unsigned int c; + + while((c = (unsigned int)*str)) + { + hash = ((hash << 5) + hash) + c; + str++; + } + + return hash; +} + +static int key_equal(const void *ptr1, const void *ptr2) +{ + return strcmp(((const object_key_t *)ptr1)->key, + ((const object_key_t *)ptr2)->key) == 0; +} + +static void value_decref(void *value) +{ + json_decref((json_t *)value); +} + +json_t *json_object(void) +{ + json_object_t *object = malloc(sizeof(json_object_t)); + if(!object) + return NULL; + json_init(&object->json, JSON_OBJECT); + + if(hashtable_init(&object->hashtable, hash_key, key_equal, + free, value_decref)) + { + free(object); + return NULL; + } + + object->serial = 0; + object->visited = 0; + + return &object->json; +} + +static void json_delete_object(json_object_t *object) +{ + hashtable_close(&object->hashtable); + free(object); +} + +unsigned int json_object_size(const json_t *json) +{ + json_object_t *object; + + if(!json_is_object(json)) + return -1; + + object = json_to_object(json); + return object->hashtable.size; +} + +json_t *json_object_get(const json_t *json, const char *key) +{ + json_object_t *object; + + if(!json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_get(&object->hashtable, string_to_key(key)); +} + +int json_object_set_new_nocheck(json_t *json, const char *key, json_t *value) +{ + json_object_t *object; + object_key_t *k; + + if(!key || !value) + return -1; + + if(!json_is_object(json) || json == value) + { + json_decref(value); + return -1; + } + object = json_to_object(json); + + k = malloc(sizeof(object_key_t) + strlen(key) + 1); + if(!k) + return -1; + + k->serial = object->serial++; + strcpy(k->key, key); + + if(hashtable_set(&object->hashtable, k, value)) + { + json_decref(value); + return -1; + } + + return 0; +} + +int json_object_set_new(json_t *json, const char *key, json_t *value) +{ + if(!key || !utf8_check_string(key, -1)) + { + json_decref(value); + return -1; + } + + return json_object_set_new_nocheck(json, key, value); +} + +int json_object_del(json_t *json, const char *key) +{ + json_object_t *object; + + if(!json_is_object(json)) + return -1; + + object = json_to_object(json); + return hashtable_del(&object->hashtable, string_to_key(key)); +} + +int json_object_clear(json_t *json) +{ + json_object_t *object; + + if(!json_is_object(json)) + return -1; + + object = json_to_object(json); + hashtable_clear(&object->hashtable); + + return 0; +} + +int json_object_update(json_t *object, json_t *other) +{ + void *iter; + + if(!json_is_object(object) || !json_is_object(other)) + return -1; + + iter = json_object_iter(other); + while(iter) { + const char *key; + json_t *value; + + key = json_object_iter_key(iter); + value = json_object_iter_value(iter); + + if(json_object_set_nocheck(object, key, value)) + return -1; + + iter = json_object_iter_next(other, iter); + } + + return 0; +} + +void *json_object_iter(json_t *json) +{ + json_object_t *object; + + if(!json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_iter(&object->hashtable); +} + +void *json_object_iter_at(json_t *json, const char *key) +{ + json_object_t *object; + + if(!key || !json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_iter_at(&object->hashtable, string_to_key(key)); +} + +void *json_object_iter_next(json_t *json, void *iter) +{ + json_object_t *object; + + if(!json_is_object(json) || iter == NULL) + return NULL; + + object = json_to_object(json); + return hashtable_iter_next(&object->hashtable, iter); +} + +const object_key_t *jsonp_object_iter_fullkey(void *iter) +{ + if(!iter) + return NULL; + + return hashtable_iter_key(iter); +} + +const char *json_object_iter_key(void *iter) +{ + if(!iter) + return NULL; + + return jsonp_object_iter_fullkey(iter)->key; +} + +json_t *json_object_iter_value(void *iter) +{ + if(!iter) + return NULL; + + return (json_t *)hashtable_iter_value(iter); +} + +int json_object_iter_set_new(json_t *json, void *iter, json_t *value) +{ + json_object_t *object; + + if(!json_is_object(json) || !iter || !value) + return -1; + + object = json_to_object(json); + hashtable_iter_set(&object->hashtable, iter, value); + + return 0; +} + +static int json_object_equal(json_t *object1, json_t *object2) +{ + void *iter; + + if(json_object_size(object1) != json_object_size(object2)) + return 0; + + iter = json_object_iter(object1); + while(iter) + { + const char *key; + json_t *value1, *value2; + + key = json_object_iter_key(iter); + value1 = json_object_iter_value(iter); + value2 = json_object_get(object2, key); + + if(!json_equal(value1, value2)) + return 0; + + iter = json_object_iter_next(object1, iter); + } + + return 1; +} + +static json_t *json_object_copy(json_t *object) +{ + json_t *result; + void *iter; + + result = json_object(); + if(!result) + return NULL; + + iter = json_object_iter(object); + while(iter) + { + const char *key; + json_t *value; + + key = json_object_iter_key(iter); + value = json_object_iter_value(iter); + json_object_set_nocheck(result, key, value); + + iter = json_object_iter_next(object, iter); + } + + return result; +} + +static json_t *json_object_deep_copy(json_t *object) +{ + json_t *result; + void *iter; + + result = json_object(); + if(!result) + return NULL; + + iter = json_object_iter(object); + while(iter) + { + const char *key; + json_t *value; + + key = json_object_iter_key(iter); + value = json_object_iter_value(iter); + json_object_set_new_nocheck(result, key, json_deep_copy(value)); + + iter = json_object_iter_next(object, iter); + } + + return result; +} + + +/*** array ***/ + +json_t *json_array(void) +{ + json_array_t *array = malloc(sizeof(json_array_t)); + if(!array) + return NULL; + json_init(&array->json, JSON_ARRAY); + + array->entries = 0; + array->size = 8; + + array->table = malloc(array->size * sizeof(json_t *)); + if(!array->table) { + free(array); + return NULL; + } + + array->visited = 0; + + return &array->json; +} + +static void json_delete_array(json_array_t *array) +{ + unsigned int i; + + for(i = 0; i < array->entries; i++) + json_decref(array->table[i]); + + free(array->table); + free(array); +} + +unsigned int json_array_size(const json_t *json) +{ + if(!json_is_array(json)) + return 0; + + return json_to_array(json)->entries; +} + +json_t *json_array_get(const json_t *json, unsigned int index) +{ + json_array_t *array; + if(!json_is_array(json)) + return NULL; + array = json_to_array(json); + + if(index >= array->entries) + return NULL; + + return array->table[index]; +} + +int json_array_set_new(json_t *json, unsigned int index, json_t *value) +{ + json_array_t *array; + + if(!value) + return -1; + + if(!json_is_array(json) || json == value) + { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if(index >= array->entries) + { + json_decref(value); + return -1; + } + + json_decref(array->table[index]); + array->table[index] = value; + + return 0; +} + +static void array_move(json_array_t *array, unsigned int dest, + unsigned int src, unsigned int count) +{ + memmove(&array->table[dest], &array->table[src], count * sizeof(json_t *)); +} + +static void array_copy(json_t **dest, unsigned int dpos, + json_t **src, unsigned int spos, + unsigned int count) +{ + memcpy(&dest[dpos], &src[spos], count * sizeof(json_t *)); +} + +static json_t **json_array_grow(json_array_t *array, + unsigned int amount, + int copy) +{ + unsigned int new_size; + json_t **old_table, **new_table; + + if(array->entries + amount <= array->size) + return array->table; + + old_table = array->table; + + new_size = max(array->size + amount, array->size * 2); + new_table = malloc(new_size * sizeof(json_t *)); + if(!new_table) + return NULL; + + array->size = new_size; + array->table = new_table; + + if(copy) { + array_copy(array->table, 0, old_table, 0, array->entries); + free(old_table); + return array->table; + } + + return old_table; +} + +int json_array_append_new(json_t *json, json_t *value) +{ + json_array_t *array; + + if(!value) + return -1; + + if(!json_is_array(json) || json == value) + { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if(!json_array_grow(array, 1, 1)) { + json_decref(value); + return -1; + } + + array->table[array->entries] = value; + array->entries++; + + return 0; +} + +int json_array_insert_new(json_t *json, unsigned int index, json_t *value) +{ + json_array_t *array; + json_t **old_table; + + if(!value) + return -1; + + if(!json_is_array(json) || json == value) { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if(index > array->entries) { + json_decref(value); + return -1; + } + + old_table = json_array_grow(array, 1, 0); + if(!old_table) { + json_decref(value); + return -1; + } + + if(old_table != array->table) { + array_copy(array->table, 0, old_table, 0, index); + array_copy(array->table, index + 1, old_table, index, + array->entries - index); + free(old_table); + } + else + array_move(array, index + 1, index, array->entries - index); + + array->table[index] = value; + array->entries++; + + return 0; +} + +int json_array_remove(json_t *json, unsigned int index) +{ + json_array_t *array; + + if(!json_is_array(json)) + return -1; + array = json_to_array(json); + + if(index >= array->entries) + return -1; + + json_decref(array->table[index]); + + array_move(array, index, index + 1, array->entries - index); + array->entries--; + + return 0; +} + +int json_array_clear(json_t *json) +{ + json_array_t *array; + unsigned int i; + + if(!json_is_array(json)) + return -1; + array = json_to_array(json); + + for(i = 0; i < array->entries; i++) + json_decref(array->table[i]); + + array->entries = 0; + return 0; +} + +int json_array_extend(json_t *json, json_t *other_json) +{ + json_array_t *array, *other; + unsigned int i; + + if(!json_is_array(json) || !json_is_array(other_json)) + return -1; + array = json_to_array(json); + other = json_to_array(other_json); + + if(!json_array_grow(array, other->entries, 1)) + return -1; + + for(i = 0; i < other->entries; i++) + json_incref(other->table[i]); + + array_copy(array->table, array->entries, other->table, 0, other->entries); + + array->entries += other->entries; + return 0; +} + +static int json_array_equal(json_t *array1, json_t *array2) +{ + unsigned int i, size; + + size = json_array_size(array1); + if(size != json_array_size(array2)) + return 0; + + for(i = 0; i < size; i++) + { + json_t *value1, *value2; + + value1 = json_array_get(array1, i); + value2 = json_array_get(array2, i); + + if(!json_equal(value1, value2)) + return 0; + } + + return 1; +} + +static json_t *json_array_copy(json_t *array) +{ + json_t *result; + unsigned int i; + + result = json_array(); + if(!result) + return NULL; + + for(i = 0; i < json_array_size(array); i++) + json_array_append(result, json_array_get(array, i)); + + return result; +} + +static json_t *json_array_deep_copy(json_t *array) +{ + json_t *result; + unsigned int i; + + result = json_array(); + if(!result) + return NULL; + + for(i = 0; i < json_array_size(array); i++) + json_array_append_new(result, json_deep_copy(json_array_get(array, i))); + + return result; +} + +/*** string ***/ + +json_t *json_string_nocheck(const char *value) +{ + json_string_t *string; + + if(!value) + return NULL; + + string = malloc(sizeof(json_string_t)); + if(!string) + return NULL; + json_init(&string->json, JSON_STRING); + + string->value = strdup(value); + if(!string->value) { + free(string); + return NULL; + } + + return &string->json; +} + +json_t *json_string(const char *value) +{ + if(!value || !utf8_check_string(value, -1)) + return NULL; + + return json_string_nocheck(value); +} + +const char *json_string_value(const json_t *json) +{ + if(!json_is_string(json)) + return NULL; + + return json_to_string(json)->value; +} + +int json_string_set_nocheck(json_t *json, const char *value) +{ + char *dup; + json_string_t *string; + + dup = strdup(value); + if(!dup) + return -1; + + string = json_to_string(json); + free(string->value); + string->value = dup; + + return 0; +} + +int json_string_set(json_t *json, const char *value) +{ + if(!value || !utf8_check_string(value, -1)) + return -1; + + return json_string_set_nocheck(json, value); +} + +static void json_delete_string(json_string_t *string) +{ + free(string->value); + free(string); +} + +static int json_string_equal(json_t *string1, json_t *string2) +{ + return strcmp(json_string_value(string1), json_string_value(string2)) == 0; +} + +static json_t *json_string_copy(json_t *string) +{ + return json_string_nocheck(json_string_value(string)); +} + + +/*** integer ***/ + +json_t *json_integer(int value) +{ + json_integer_t *integer = malloc(sizeof(json_integer_t)); + if(!integer) + return NULL; + json_init(&integer->json, JSON_INTEGER); + + integer->value = value; + return &integer->json; +} + +int json_integer_value(const json_t *json) +{ + if(!json_is_integer(json)) + return 0; + + return json_to_integer(json)->value; +} + +int json_integer_set(json_t *json, int value) +{ + if(!json_is_integer(json)) + return -1; + + json_to_integer(json)->value = value; + + return 0; +} + +static void json_delete_integer(json_integer_t *integer) +{ + free(integer); +} + +static int json_integer_equal(json_t *integer1, json_t *integer2) +{ + return json_integer_value(integer1) == json_integer_value(integer2); +} + +static json_t *json_integer_copy(json_t *integer) +{ + return json_integer(json_integer_value(integer)); +} + + +/*** real ***/ + +json_t *json_real(double value) +{ + json_real_t *real = malloc(sizeof(json_real_t)); + if(!real) + return NULL; + json_init(&real->json, JSON_REAL); + + real->value = value; + return &real->json; +} + +double json_real_value(const json_t *json) +{ + if(!json_is_real(json)) + return 0; + + return json_to_real(json)->value; +} + +int json_real_set(json_t *json, double value) +{ + if(!json_is_real(json)) + return 0; + + json_to_real(json)->value = value; + + return 0; +} + +static void json_delete_real(json_real_t *real) +{ + free(real); +} + +static int json_real_equal(json_t *real1, json_t *real2) +{ + return json_real_value(real1) == json_real_value(real2); +} + +static json_t *json_real_copy(json_t *real) +{ + return json_real(json_real_value(real)); +} + + +/*** number ***/ + +double json_number_value(const json_t *json) +{ + if(json_is_integer(json)) + return json_integer_value(json); + else if(json_is_real(json)) + return json_real_value(json); + else + return 0.0; +} + + +/*** simple values ***/ + +json_t *json_true(void) +{ + static json_t the_true = { + .type = JSON_TRUE, + .refcount = (unsigned int)-1 + }; + return &the_true; +} + + +json_t *json_false(void) +{ + static json_t the_false = { + .type = JSON_FALSE, + .refcount = (unsigned int)-1 + }; + return &the_false; +} + + +json_t *json_null(void) +{ + static json_t the_null = { + .type = JSON_NULL, + .refcount = (unsigned int)-1 + }; + return &the_null; +} + + +/*** deletion ***/ + +void json_delete(json_t *json) +{ + if(json_is_object(json)) + json_delete_object(json_to_object(json)); + + else if(json_is_array(json)) + json_delete_array(json_to_array(json)); + + else if(json_is_string(json)) + json_delete_string(json_to_string(json)); + + else if(json_is_integer(json)) + json_delete_integer(json_to_integer(json)); + + else if(json_is_real(json)) + json_delete_real(json_to_real(json)); + + /* json_delete is not called for true, false or null */ +} + + +/*** equality ***/ + +int json_equal(json_t *json1, json_t *json2) +{ + if(!json1 || !json2) + return 0; + + if(json_typeof(json1) != json_typeof(json2)) + return 0; + + /* this covers true, false and null as they are singletons */ + if(json1 == json2) + return 1; + + if(json_is_object(json1)) + return json_object_equal(json1, json2); + + if(json_is_array(json1)) + return json_array_equal(json1, json2); + + if(json_is_string(json1)) + return json_string_equal(json1, json2); + + if(json_is_integer(json1)) + return json_integer_equal(json1, json2); + + if(json_is_real(json1)) + return json_real_equal(json1, json2); + + return 0; +} + + +/*** copying ***/ + +json_t *json_copy(json_t *json) +{ + if(!json) + return NULL; + + if(json_is_object(json)) + return json_object_copy(json); + + if(json_is_array(json)) + return json_array_copy(json); + + if(json_is_string(json)) + return json_string_copy(json); + + if(json_is_integer(json)) + return json_integer_copy(json); + + if(json_is_real(json)) + return json_real_copy(json); + + if(json_is_true(json) || json_is_false(json) || json_is_null(json)) + return json; + + return NULL; +} + +json_t *json_deep_copy(json_t *json) +{ + if(!json) + return NULL; + + if(json_is_object(json)) + return json_object_deep_copy(json); + + if(json_is_array(json)) + return json_array_deep_copy(json); + + /* for the rest of the types, deep copying doesn't differ from + shallow copying */ + + if(json_is_string(json)) + return json_string_copy(json); + + if(json_is_integer(json)) + return json_integer_copy(json); + + if(json_is_real(json)) + return json_real_copy(json); + + if(json_is_true(json) || json_is_false(json) || json_is_null(json)) + return json; + + return NULL; +} diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..0895b67 --- /dev/null +++ b/configure.ac @@ -0,0 +1,167 @@ +AC_INIT([nolambocoin], [1.0]) + +AC_PREREQ([2.59c]) +AC_CANONICAL_SYSTEM +AC_CONFIG_SRCDIR([cpu-miner.c]) +AM_INIT_AUTOMAKE([foreign]) +AC_CONFIG_HEADERS([cpuminer-config.h]) + +dnl Make sure anyone changing configure.ac/Makefile.am has a clue +AM_MAINTAINER_MODE + +EXTERNAL_CFLAGS="$CFLAGS" + +dnl Checks for programs +AC_PROG_CC_C99 +AC_PROG_GCC_TRADITIONAL +AM_PROG_CC_C_O +AM_PROG_AS +AC_PROG_RANLIB + +if test -n "$EXTERNAL_CFLAGS"; then + CFLAGS="$EXTERNAL_CFLAGS" +else + CFLAGS='-Wall -O2 -fomit-frame-pointer' +fi + +dnl Checks for header files +AC_HEADER_STDC +AC_CHECK_HEADERS([sys/endian.h sys/param.h syslog.h]) +# sys/sysctl.h requires sys/types.h on FreeBSD +# sys/sysctl.h requires sys/param.h on OpenBSD +AC_CHECK_HEADERS([sys/sysctl.h], [], [], +[#include +#ifdef HAVE_SYS_PARAM_H +#include +#endif +]) + +AC_CHECK_DECLS([be32dec, le32dec, be32enc, le32enc], [], [], +[AC_INCLUDES_DEFAULT +#ifdef HAVE_SYS_ENDIAN_H +#include +#endif +]) + +AC_FUNC_ALLOCA +AC_CHECK_FUNCS([getopt_long]) + +PTHREAD_FLAGS="-pthread" +WS2_LIBS="" + +# conditional builds for all platforms +case $target in +*-*-mingw*) + have_win32=true + PTHREAD_FLAGS="" + WS2_LIBS="-lws2_32" + ;; + +*linux*) + if test -z "$LIBCURL"; then + LIBCURL="-lcurl" + fi + + # libcurl install path (for mingw : --with-curl=/usr/local) + AC_ARG_WITH([curl], + [ --with-curl=PATH prefix where curl is installed [default=/usr]]) + + if test -n "$with_curl"; then + LIBCURL_CFLAGS="$LIBCURL_CFLAGS -I$with_curl/include" + LIBCURL_CPPFLAGS="$LIBCURL_CPPFLAGS -I$with_curl/include" + LIBCURL_LDFLAGS="$LIBCURL_LDFLAGS -L$with_curl/lib" + fi + + # SSL install path (for mingw : --with-crypto=/usr/local/ssl) + AC_ARG_WITH([crypto], + [ --with-crypto=PATH prefix where openssl crypto is installed [default=/usr]]) + + if test -n "$with_crypto" ; then + LIBCURL_CFLAGS="$LIBCURL_CFLAGS -I$with_crypto/include" + LIBCURL_CPPFLAGS="$LIBCURL_CPPFLAGS -I$with_crypto/include" + LIBCURL_LDFLAGS="-L$with_crypto/lib $LIBCURL_LDFLAGS" + LIBCURL="$LIBCURL -lssl -lcrypto" + fi + + CFLAGS="$CFLAGS $LIBCURL_CFLAGS" + CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS" + LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS" + + AC_SUBST(LIBCURL) + # AC_SUBST(LIBCURL_CFLAGS) + # AC_SUBST(LIBCURL_CPPFLAGS) + # AC_SUBST(LIBCURL_LDFLAGS) + ;; + +*-apple-*) + if test -z "$LIBCURL"; then + LIBCURL="-lcurl" + fi + + # libcurl install path (for mingw : --with-curl=/usr/local) + AC_ARG_WITH([curl], + [ --with-curl=PATH prefix where curl is installed [default=/usr]]) + + if test -n "$with_curl"; then + LIBCURL_CFLAGS="$LIBCURL_CFLAGS -I$with_curl/include" + LIBCURL_CPPFLAGS="$LIBCURL_CPPFLAGS -I$with_curl/include" + LIBCURL_LDFLAGS="$LIBCURL_LDFLAGS -L$with_curl/lib" + fi + + # SSL install path (for mingw : --with-crypto=/usr/local/ssl) + AC_ARG_WITH([crypto], + [ --with-crypto=PATH prefix where openssl crypto is installed [default=/usr]]) + + if test -n "$with_crypto" ; then + LIBCURL_CFLAGS="$LIBCURL_CFLAGS -I$with_crypto/include" + LIBCURL_CPPFLAGS="$LIBCURL_CPPFLAGS -I$with_crypto/include" + LIBCURL_LDFLAGS="-L$with_crypto/lib $LIBCURL_LDFLAGS" + LIBCURL="$LIBCURL -lssl -lcrypto" + fi + + CFLAGS="$CFLAGS $LIBCURL_CFLAGS" + CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS" + LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS" + + AC_SUBST(LIBCURL) + # AC_SUBST(LIBCURL_CFLAGS) + # AC_SUBST(LIBCURL_CPPFLAGS) + # AC_SUBST(LIBCURL_LDFLAGS) + ;; +esac + +case $target in + *-*-mingw*) +# LIBCURL_CHECK_CONFIG(, 7.15.2, , +# [AC_MSG_ERROR([Missing required libcurl >= 7.15.2])]) + ;; +esac + +AC_CHECK_LIB(jansson, json_loads, request_jansson=false, request_jansson=true) +AC_CHECK_LIB([pthread], [pthread_create], PTHREAD_LIBS="-lpthread", + AC_CHECK_LIB([pthreadGC2], [pthread_create], PTHREAD_LIBS="-lpthreadGC2", + AC_CHECK_LIB([pthreadGC1], [pthread_create], PTHREAD_LIBS="-lpthreadGC1", + AC_CHECK_LIB([pthreadGC], [pthread_create], PTHREAD_LIBS="-lpthreadGC" +)))) + +AM_CONDITIONAL([WANT_JANSSON], [test x$request_jansson = xtrue]) +AM_CONDITIONAL([HAVE_WINDOWS], [test x$have_win32 = xtrue]) + +if test x$request_jansson = xtrue +then + JANSSON_LIBS="compat/jansson/libjansson.a" +else + JANSSON_LIBS=-ljansson +fi + +AC_SUBST(JANSSON_LIBS) +AC_SUBST(PTHREAD_FLAGS) +AC_SUBST(PTHREAD_LIBS) +AC_SUBST(WS2_LIBS) + +AC_CONFIG_FILES([ + Makefile + compat/Makefile + compat/jansson/Makefile + ]) +AC_OUTPUT diff --git a/cpu-miner.c b/cpu-miner.c new file mode 100644 index 0000000..340dc29 --- /dev/null +++ b/cpu-miner.c @@ -0,0 +1,2102 @@ +/* + * Copyright 2010 Jeff Garzik + * Copyright 2012-2017 pooler + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "cpuminer-config.h" +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef WIN32 +#include +#else +#include +#include +#include +#if HAVE_SYS_SYSCTL_H +#include +#if HAVE_SYS_PARAM_H +#include +#endif +#include +#endif +#endif +#include +#include +#include "compat.h" +#include "miner.h" + +#define PROGRAM_NAME "sugarmaker" +#define LP_SCANTIME 60 + +#ifdef __linux /* Linux specific policy and affinity management */ +#include +static inline void drop_policy(void) +{ + struct sched_param param; + param.sched_priority = 0; + +#ifdef SCHED_IDLE + if (unlikely(sched_setscheduler(0, SCHED_IDLE, ¶m) == -1)) +#endif +#ifdef SCHED_BATCH + sched_setscheduler(0, SCHED_BATCH, ¶m); +#endif +} + +static inline void affine_to_cpu(int id, int cpu) +{ + cpu_set_t set; + + CPU_ZERO(&set); + CPU_SET(cpu, &set); + sched_setaffinity(0, sizeof(set), &set); +} +#elif defined(__FreeBSD__) /* FreeBSD specific policy and affinity management */ +#include +static inline void drop_policy(void) +{ +} + +static inline void affine_to_cpu(int id, int cpu) +{ + cpuset_t set; + CPU_ZERO(&set); + CPU_SET(cpu, &set); + cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(cpuset_t), &set); +} +#else +static inline void drop_policy(void) +{ +} + +static inline void affine_to_cpu(int id, int cpu) +{ +} +#endif + +enum workio_commands { + WC_GET_WORK, + WC_SUBMIT_WORK, +}; + +struct workio_cmd { + enum workio_commands cmd; + struct thr_info *thr; + union { + struct work *work; + } u; +}; + +enum algos { + ALGO_SUGAR_YESPOWER_1_0_1, + ALGO_ISO_YESPOWER_1_0_1, + ALGO_NULL_YESPOWER_1_0_1, + ALGO_URX_YESPOWER_1_0_1, + ALGO_LITB_YESPOWER_1_0_1, + ALGO_IOTS_YESPOWER_1_0_1, + ALGO_ITC_YESPOWER_1_0_1, + ALGO_YTN_YESPOWER_1_0_1, + ALGO_MBC_YESPOWER_1_0_1, + ALGO_ARM_YESPOWER_1_0_1, +}; + +static const char *algo_names[] = { + [ALGO_SUGAR_YESPOWER_1_0_1] = "YespowerSugar", + [ALGO_ISO_YESPOWER_1_0_1] = "YespowerIso", + [ALGO_NULL_YESPOWER_1_0_1] = "YespowerNull", + [ALGO_URX_YESPOWER_1_0_1] = "YespowerUrx", + [ALGO_LITB_YESPOWER_1_0_1] = "YespowerLitb", + [ALGO_IOTS_YESPOWER_1_0_1] = "YespowerIots", + [ALGO_ITC_YESPOWER_1_0_1] = "YespowerItc", + [ALGO_YTN_YESPOWER_1_0_1] = "yespowerr16", + [ALGO_MBC_YESPOWER_1_0_1] = "YespowerMbc", + [ALGO_ARM_YESPOWER_1_0_1] = "YespowerARM", +}; + +bool opt_debug = false; +bool opt_protocol = false; +static bool opt_benchmark = false; +bool opt_redirect = true; +bool want_longpoll = true; +bool have_longpoll = false; +bool have_gbt = true; +bool allow_getwork = false; +bool want_stratum = true; +bool have_stratum = false; +bool use_syslog = false; +static bool opt_background = false; +static bool opt_quiet = false; +static int opt_retries = -1; +static int opt_fail_pause = 30; +int opt_timeout = 0; +static int opt_scantime = 5; +static enum algos opt_algo = ALGO_YTN_YESPOWER_1_0_1; +static int opt_n_threads; +static int num_processors; +static char *rpc_url; +static char *rpc_userpass; +static char *rpc_user, *rpc_pass; +static int pk_script_size; +static unsigned char pk_script[42]; +static char coinbase_sig[101] = ""; +char *opt_cert; +char *opt_proxy; +long opt_proxy_type; +struct thr_info *thr_info; +static int work_thr_id; +int longpoll_thr_id = -1; +int stratum_thr_id = -1; +struct work_restart *work_restart = NULL; +static struct stratum_ctx stratum; + +pthread_mutex_t applog_lock; +static pthread_mutex_t stats_lock; + +static unsigned long accepted_count = 0L; +static unsigned long rejected_count = 0L; +static double *thr_hashrates; + +#ifdef HAVE_GETOPT_LONG +#include +#else +struct option { + const char *name; + int has_arg; + int *flag; + int val; +}; +#endif + +static char const usage[] = "\ +Usage: " PROGRAM_NAME " [OPTIONS]\n\ +Options:\n\ + -a, --algo=ALGO specify the algorithm to use\n\ + yespowerr16: Yenten (default)\n\ + YespowerIso: IsotopeC\n\ + YespowerNull: CranePay, Bellcoin, Veco, SwampCoin\n\ + YespowerUrx: UraniumX\n\ + YespowerLitb: LightBit\n\ + YespowerIots: IOTS\n\ + YespowerItc: Intercoin\n\ + YespowerSugar: Sugarchain\n\ + YespowerMbc: power2b for MicroBitcoin\n\ + YespowerARM: NoLamboCoin + -o, --url=URL URL of mining server\n\ + -O, --userpass=U:P username:password pair for mining server\n\ + -u, --user=USERNAME username for mining server\n\ + -p, --pass=PASSWORD password for mining server\n\ + --cert=FILE certificate for mining server using SSL\n\ + -x, --proxy=[PROTOCOL://]HOST[:PORT] connect through a proxy\n\ + -t, --threads=N number of miner threads (default: number of processors)\n\ + -r, --retries=N number of times to retry if a network call fails\n\ + (default: retry indefinitely)\n\ + -R, --retry-pause=N time to pause between retries, in seconds (default: 30)\n\ + -T, --timeout=N timeout for long polling, in seconds (default: none)\n\ + -s, --scantime=N upper bound on time spent scanning current work when\n\ + long polling is unavailable, in seconds (default: 5)\n\ + --coinbase-addr=ADDR payout address for solo mining\n\ + --coinbase-sig=TEXT data to insert in the coinbase when possible\n\ + --no-longpoll disable long polling support\n\ + --no-getwork disable getwork support\n\ + --no-gbt disable getblocktemplate support\n\ + --no-stratum disable X-Stratum support\n\ + --no-redirect ignore requests to change the URL of the mining server\n\ + -q, --quiet disable per-thread hashmeter output\n\ + -D, --debug enable debug output\n\ + -P, --protocol-dump verbose dump of protocol-level activities\n" +#ifdef HAVE_SYSLOG_H +"\ + -S, --syslog use system log for output messages\n" +#endif +#ifndef WIN32 +"\ + -B, --background run the miner in the background\n" +#endif +"\ + --benchmark run in offline benchmark mode\n\ + -c, --config=FILE load a JSON-format configuration file\n\ + -V, --version display version information and exit\n\ + -h, --help display this help text and exit\n\ +"; + +static char const short_options[] = +#ifndef WIN32 + "B" +#endif +#ifdef HAVE_SYSLOG_H + "S" +#endif + "a:c:Dhp:Px:qr:R:s:t:T:o:u:O:V"; + +static struct option const options[] = { + { "algo", 1, NULL, 'a' }, +#ifndef WIN32 + { "background", 0, NULL, 'B' }, +#endif + { "benchmark", 0, NULL, 1005 }, + { "cert", 1, NULL, 1001 }, + { "coinbase-addr", 1, NULL, 1013 }, + { "coinbase-sig", 1, NULL, 1015 }, + { "config", 1, NULL, 'c' }, + { "debug", 0, NULL, 'D' }, + { "help", 0, NULL, 'h' }, + { "no-gbt", 0, NULL, 1011 }, + { "no-getwork", 0, NULL, 1010 }, + { "no-longpoll", 0, NULL, 1003 }, + { "no-redirect", 0, NULL, 1009 }, + { "no-stratum", 0, NULL, 1007 }, + { "pass", 1, NULL, 'p' }, + { "protocol-dump", 0, NULL, 'P' }, + { "proxy", 1, NULL, 'x' }, + { "quiet", 0, NULL, 'q' }, + { "retries", 1, NULL, 'r' }, + { "retry-pause", 1, NULL, 'R' }, + { "scantime", 1, NULL, 's' }, +#ifdef HAVE_SYSLOG_H + { "syslog", 0, NULL, 'S' }, +#endif + { "threads", 1, NULL, 't' }, + { "timeout", 1, NULL, 'T' }, + { "url", 1, NULL, 'o' }, + { "user", 1, NULL, 'u' }, + { "userpass", 1, NULL, 'O' }, + { "version", 0, NULL, 'V' }, + { 0, 0, 0, 0 } +}; + +struct work { + uint32_t data[32]; + uint32_t target[8]; + + int height; + char *txs; + char *workid; + + char *job_id; + size_t xnonce2_len; + unsigned char *xnonce2; +}; + +static struct work g_work; +static time_t g_work_time; +static pthread_mutex_t g_work_lock; +static bool submit_old = false; +static char *lp_id; + +static inline void work_free(struct work *w) +{ + free(w->txs); + free(w->workid); + free(w->job_id); + free(w->xnonce2); +} + +static inline void work_copy(struct work *dest, const struct work *src) +{ + memcpy(dest, src, sizeof(struct work)); + if (src->txs) + dest->txs = strdup(src->txs); + if (src->workid) + dest->workid = strdup(src->workid); + if (src->job_id) + dest->job_id = strdup(src->job_id); + if (src->xnonce2) { + dest->xnonce2 = malloc(src->xnonce2_len); + memcpy(dest->xnonce2, src->xnonce2, src->xnonce2_len); + } +} + +static bool jobj_binary(const json_t *obj, const char *key, + void *buf, size_t buflen) +{ + const char *hexstr; + json_t *tmp; + + tmp = json_object_get(obj, key); + if (unlikely(!tmp)) { + applog(LOG_ERR, "JSON key '%s' not found", key); + return false; + } + hexstr = json_string_value(tmp); + if (unlikely(!hexstr)) { + applog(LOG_ERR, "JSON key '%s' is not a string", key); + return false; + } + if (!hex2bin(buf, hexstr, buflen)) + return false; + + return true; +} + +static bool work_decode(const json_t *val, struct work *work) +{ + int i; + + if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) { + applog(LOG_ERR, "JSON invalid data"); + goto err_out; + } + if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) { + applog(LOG_ERR, "JSON invalid target"); + goto err_out; + } + + for (i = 0; i < ARRAY_SIZE(work->data); i++) + work->data[i] = le32dec(work->data + i); + for (i = 0; i < ARRAY_SIZE(work->target); i++) + work->target[i] = le32dec(work->target + i); + + return true; + +err_out: + return false; +} + +static bool gbt_work_decode(const json_t *val, struct work *work) +{ + int i, n; + uint32_t version, curtime, bits; + uint32_t prevhash[8]; + uint32_t target[8]; + int cbtx_size; + unsigned char *cbtx = NULL; + int tx_count, tx_size; + unsigned char txc_vi[9]; + unsigned char (*merkle_tree)[32] = NULL; + bool coinbase_append = false; + bool submit_coinbase = false; + bool segwit = false; + json_t *tmp, *txa; + bool rc = false; + + tmp = json_object_get(val, "rules"); + if (tmp && json_is_array(tmp)) { + n = json_array_size(tmp); + for (i = 0; i < n; i++) { + const char *s = json_string_value(json_array_get(tmp, i)); + if (!s) + continue; + if (!strcmp(s, "segwit") || !strcmp(s, "!segwit")) + segwit = true; + } + } + + tmp = json_object_get(val, "mutable"); + if (tmp && json_is_array(tmp)) { + n = json_array_size(tmp); + for (i = 0; i < n; i++) { + const char *s = json_string_value(json_array_get(tmp, i)); + if (!s) + continue; + if (!strcmp(s, "coinbase/append")) + coinbase_append = true; + else if (!strcmp(s, "submit/coinbase")) + submit_coinbase = true; + } + } + + tmp = json_object_get(val, "height"); + if (!tmp || !json_is_integer(tmp)) { + applog(LOG_ERR, "JSON invalid height"); + goto out; + } + work->height = json_integer_value(tmp); + + tmp = json_object_get(val, "version"); + if (!tmp || !json_is_integer(tmp)) { + applog(LOG_ERR, "JSON invalid version"); + goto out; + } + version = json_integer_value(tmp); + + if (unlikely(!jobj_binary(val, "previousblockhash", prevhash, sizeof(prevhash)))) { + applog(LOG_ERR, "JSON invalid previousblockhash"); + goto out; + } + + tmp = json_object_get(val, "curtime"); + if (!tmp || !json_is_integer(tmp)) { + applog(LOG_ERR, "JSON invalid curtime"); + goto out; + } + curtime = json_integer_value(tmp); + + if (unlikely(!jobj_binary(val, "bits", &bits, sizeof(bits)))) { + applog(LOG_ERR, "JSON invalid bits"); + goto out; + } + + /* find count and size of transactions */ + txa = json_object_get(val, "transactions"); + if (!txa || !json_is_array(txa)) { + applog(LOG_ERR, "JSON invalid transactions"); + goto out; + } + tx_count = json_array_size(txa); + tx_size = 0; + for (i = 0; i < tx_count; i++) { + const json_t *tx = json_array_get(txa, i); + const char *tx_hex = json_string_value(json_object_get(tx, "data")); + if (!tx_hex) { + applog(LOG_ERR, "JSON invalid transactions"); + goto out; + } + tx_size += strlen(tx_hex) / 2; + } + + /* build coinbase transaction */ + tmp = json_object_get(val, "coinbasetxn"); + if (tmp) { + const char *cbtx_hex = json_string_value(json_object_get(tmp, "data")); + cbtx_size = cbtx_hex ? strlen(cbtx_hex) / 2 : 0; + cbtx = malloc(cbtx_size + 100); + if (cbtx_size < 60 || !hex2bin(cbtx, cbtx_hex, cbtx_size)) { + applog(LOG_ERR, "JSON invalid coinbasetxn"); + goto out; + } + } else { + int64_t cbvalue; + if (!pk_script_size) { + if (allow_getwork) { + applog(LOG_INFO, "No payout address provided, switching to getwork"); + have_gbt = false; + } else + applog(LOG_ERR, "No payout address provided"); + goto out; + } + tmp = json_object_get(val, "coinbasevalue"); + if (!tmp || !json_is_number(tmp)) { + applog(LOG_ERR, "JSON invalid coinbasevalue"); + goto out; + } + cbvalue = json_is_integer(tmp) ? json_integer_value(tmp) : json_number_value(tmp); + cbtx = malloc(256); + le32enc((uint32_t *)cbtx, 1); /* version */ + cbtx[4] = 1; /* in-counter */ + memset(cbtx+5, 0x00, 32); /* prev txout hash */ + le32enc((uint32_t *)(cbtx+37), 0xffffffff); /* prev txout index */ + cbtx_size = 43; + /* BIP 34: height in coinbase */ + for (n = work->height; n; n >>= 8) { + cbtx[cbtx_size++] = n & 0xff; + if (n < 0x100 && n >= 0x80) + cbtx[cbtx_size++] = 0; + } + cbtx[42] = cbtx_size - 43; + cbtx[41] = cbtx_size - 42; /* scriptsig length */ + le32enc((uint32_t *)(cbtx+cbtx_size), 0xffffffff); /* sequence */ + cbtx_size += 4; + cbtx[cbtx_size++] = segwit ? 2 : 1; /* out-counter */ + le32enc((uint32_t *)(cbtx+cbtx_size), (uint32_t)cbvalue); /* value */ + le32enc((uint32_t *)(cbtx+cbtx_size+4), cbvalue >> 32); + cbtx_size += 8; + cbtx[cbtx_size++] = pk_script_size; /* txout-script length */ + memcpy(cbtx+cbtx_size, pk_script, pk_script_size); + cbtx_size += pk_script_size; + if (segwit) { + unsigned char (*wtree)[32] = calloc(tx_count + 2, 32); + memset(cbtx+cbtx_size, 0, 8); /* value */ + cbtx_size += 8; + cbtx[cbtx_size++] = 38; /* txout-script length */ + cbtx[cbtx_size++] = 0x6a; /* txout-script */ + cbtx[cbtx_size++] = 0x24; + cbtx[cbtx_size++] = 0xaa; + cbtx[cbtx_size++] = 0x21; + cbtx[cbtx_size++] = 0xa9; + cbtx[cbtx_size++] = 0xed; + for (i = 0; i < tx_count; i++) { + const json_t *tx = json_array_get(txa, i); + const json_t *hash = json_object_get(tx, "hash"); + if (!hash || !hex2bin(wtree[1+i], json_string_value(hash), 32)) { + applog(LOG_ERR, "JSON invalid transaction hash"); + free(wtree); + goto out; + } + memrev(wtree[1+i], 32); + } + n = tx_count + 1; + while (n > 1) { + if (n % 2) + memcpy(wtree[n], wtree[n-1], 32); + n = (n + 1) / 2; + for (i = 0; i < n; i++) + sha256d(wtree[i], wtree[2*i], 64); + } + memset(wtree[1], 0, 32); /* witness reserved value = 0 */ + sha256d(cbtx+cbtx_size, wtree[0], 64); + cbtx_size += 32; + free(wtree); + } + le32enc((uint32_t *)(cbtx+cbtx_size), 0); /* lock time */ + cbtx_size += 4; + coinbase_append = true; + } + if (coinbase_append) { + unsigned char xsig[100]; + int xsig_len = 0; + if (*coinbase_sig) { + n = strlen(coinbase_sig); + if (cbtx[41] + xsig_len + n <= 100) { + memcpy(xsig+xsig_len, coinbase_sig, n); + xsig_len += n; + } else { + applog(LOG_WARNING, "Signature does not fit in coinbase, skipping"); + } + } + tmp = json_object_get(val, "coinbaseaux"); + if (tmp && json_is_object(tmp)) { + void *iter = json_object_iter(tmp); + while (iter) { + unsigned char buf[100]; + const char *s = json_string_value(json_object_iter_value(iter)); + n = s ? strlen(s) / 2 : 0; + if (!s || n > 100 || !hex2bin(buf, s, n)) { + applog(LOG_ERR, "JSON invalid coinbaseaux"); + break; + } + if (cbtx[41] + xsig_len + n <= 100) { + memcpy(xsig+xsig_len, buf, n); + xsig_len += n; + } + iter = json_object_iter_next(tmp, iter); + } + } + if (xsig_len) { + unsigned char *ssig_end = cbtx + 42 + cbtx[41]; + int push_len = cbtx[41] + xsig_len < 76 ? 1 : + cbtx[41] + 2 + xsig_len > 100 ? 0 : 2; + n = xsig_len + push_len; + memmove(ssig_end + n, ssig_end, cbtx_size - 42 - cbtx[41]); + cbtx[41] += n; + if (push_len == 2) + *(ssig_end++) = 0x4c; /* OP_PUSHDATA1 */ + if (push_len) + *(ssig_end++) = xsig_len; + memcpy(ssig_end, xsig, xsig_len); + cbtx_size += n; + } + } + + n = varint_encode(txc_vi, 1 + tx_count); + work->txs = malloc(2 * (n + cbtx_size + tx_size) + 1); + bin2hex(work->txs, txc_vi, n); + bin2hex(work->txs + 2*n, cbtx, cbtx_size); + + /* generate merkle root */ + merkle_tree = malloc(32 * ((1 + tx_count + 1) & ~1)); + sha256d(merkle_tree[0], cbtx, cbtx_size); + for (i = 0; i < tx_count; i++) { + tmp = json_array_get(txa, i); + const char *tx_hex = json_string_value(json_object_get(tmp, "data")); + const int tx_size = tx_hex ? strlen(tx_hex) / 2 : 0; + if (segwit) { + const char *txid = json_string_value(json_object_get(tmp, "txid")); + if (!txid || !hex2bin(merkle_tree[1 + i], txid, 32)) { + applog(LOG_ERR, "JSON invalid transaction txid"); + goto out; + } + memrev(merkle_tree[1 + i], 32); + } else { + unsigned char *tx = malloc(tx_size); + if (!tx_hex || !hex2bin(tx, tx_hex, tx_size)) { + applog(LOG_ERR, "JSON invalid transactions"); + free(tx); + goto out; + } + sha256d(merkle_tree[1 + i], tx, tx_size); + free(tx); + } + if (!submit_coinbase) + strcat(work->txs, tx_hex); + } + n = 1 + tx_count; + while (n > 1) { + if (n % 2) { + memcpy(merkle_tree[n], merkle_tree[n-1], 32); + ++n; + } + n /= 2; + for (i = 0; i < n; i++) + sha256d(merkle_tree[i], merkle_tree[2*i], 64); + } + + /* assemble block header */ + work->data[0] = swab32(version); + for (i = 0; i < 8; i++) + work->data[8 - i] = le32dec(prevhash + i); + for (i = 0; i < 8; i++) + work->data[9 + i] = be32dec((uint32_t *)merkle_tree[0] + i); + work->data[17] = swab32(curtime); + work->data[18] = le32dec(&bits); + memset(work->data + 19, 0x00, 52); + work->data[20] = 0x80000000; + work->data[31] = 0x00000280; + + if (unlikely(!jobj_binary(val, "target", target, sizeof(target)))) { + applog(LOG_ERR, "JSON invalid target"); + goto out; + } + for (i = 0; i < ARRAY_SIZE(work->target); i++) + work->target[7 - i] = be32dec(target + i); + + tmp = json_object_get(val, "workid"); + if (tmp) { + if (!json_is_string(tmp)) { + applog(LOG_ERR, "JSON invalid workid"); + goto out; + } + work->workid = strdup(json_string_value(tmp)); + } + + /* Long polling */ + tmp = json_object_get(val, "longpollid"); + if (want_longpoll && json_is_string(tmp)) { + free(lp_id); + lp_id = strdup(json_string_value(tmp)); + if (!have_longpoll) { + char *lp_uri; + tmp = json_object_get(val, "longpolluri"); + lp_uri = strdup(json_is_string(tmp) ? json_string_value(tmp) : rpc_url); + have_longpoll = true; + tq_push(thr_info[longpoll_thr_id].q, lp_uri); + } + } + + rc = true; + +out: + free(cbtx); + free(merkle_tree); + return rc; +} + +static void share_result(int result, const char *reason) +{ + char s[345]; + double hashrate; + int i; + + hashrate = 0.; + pthread_mutex_lock(&stats_lock); + for (i = 0; i < opt_n_threads; i++) + hashrate += thr_hashrates[i]; + result ? accepted_count++ : rejected_count++; + pthread_mutex_unlock(&stats_lock); + + sprintf(s, hashrate >= 1e3 ? "%.0f" : "%.1f", hashrate); + applog(LOG_INFO, "accepted: %lu/%lu (%.2f%%), %s hash/s %s", + accepted_count, + accepted_count + rejected_count, + 100. * accepted_count / (accepted_count + rejected_count), + s, + result ? "(yay!!!)" : "(booooo)"); + + if (opt_debug && reason) + applog(LOG_DEBUG, "DEBUG: reject reason: %s", reason); +} + +static bool submit_upstream_work(CURL *curl, struct work *work) +{ + json_t *val, *res, *reason; + char data_str[2 * sizeof(work->data) + 1]; + char s[345]; + int i; + bool rc = false; + + /* pass if the previous hash is not the current previous hash */ + if (!submit_old && memcmp(work->data + 1, g_work.data + 1, 32)) { + if (opt_debug) + applog(LOG_DEBUG, "DEBUG: stale work detected, discarding"); + return true; + } + + if (have_stratum) { + unsigned char ntime[4], nonce[4]; + char ntimestr[9], noncestr[9], *xnonce2str, *req; + + le32enc(ntime, work->data[17]); + le32enc(nonce, work->data[19]); + bin2hex(ntimestr, ntime, 4); + bin2hex(noncestr, nonce, 4); + xnonce2str = abin2hex(work->xnonce2, work->xnonce2_len); + req = malloc(256 + strlen(rpc_user) + strlen(work->job_id) + 2 * work->xnonce2_len); + sprintf(req, + "{\"method\": \"mining.submit\", \"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\":4}", + rpc_user, work->job_id, xnonce2str, ntimestr, noncestr); + free(xnonce2str); + + rc = stratum_send_line(&stratum, req); + free(req); + if (unlikely(!rc)) { + applog(LOG_ERR, "submit_upstream_work stratum_send_line failed"); + goto out; + } + } else if (work->txs) { + char *req; + + for (i = 0; i < ARRAY_SIZE(work->data); i++) + be32enc(work->data + i, work->data[i]); + bin2hex(data_str, (unsigned char *)work->data, 80); + if (work->workid) { + char *params; + val = json_object(); + json_object_set_new(val, "workid", json_string(work->workid)); + params = json_dumps(val, 0); + json_decref(val); + req = malloc(128 + 2*80 + strlen(work->txs) + strlen(params)); + sprintf(req, + "{\"method\": \"submitblock\", \"params\": [\"%s%s\", %s], \"id\":1}\r\n", + data_str, work->txs, params); + free(params); + } else { + req = malloc(128 + 2*80 + strlen(work->txs)); + sprintf(req, + "{\"method\": \"submitblock\", \"params\": [\"%s%s\"], \"id\":1}\r\n", + data_str, work->txs); + } + val = json_rpc_call(curl, rpc_url, rpc_userpass, req, NULL, 0); + free(req); + if (unlikely(!val)) { + applog(LOG_ERR, "submit_upstream_work json_rpc_call failed"); + goto out; + } + + res = json_object_get(val, "result"); + if (json_is_object(res)) { + char *res_str; + bool sumres = false; + void *iter = json_object_iter(res); + while (iter) { + if (json_is_null(json_object_iter_value(iter))) { + sumres = true; + break; + } + iter = json_object_iter_next(res, iter); + } + res_str = json_dumps(res, 0); + share_result(sumres, res_str); + free(res_str); + } else + share_result(json_is_null(res), json_string_value(res)); + + json_decref(val); + } else { + /* build hex string */ + for (i = 0; i < ARRAY_SIZE(work->data); i++) + le32enc(work->data + i, work->data[i]); + bin2hex(data_str, (unsigned char *)work->data, sizeof(work->data)); + + /* build JSON-RPC request */ + sprintf(s, + "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n", + data_str); + + /* issue JSON-RPC request */ + val = json_rpc_call(curl, rpc_url, rpc_userpass, s, NULL, 0); + if (unlikely(!val)) { + applog(LOG_ERR, "submit_upstream_work json_rpc_call failed"); + goto out; + } + + res = json_object_get(val, "result"); + reason = json_object_get(val, "reject-reason"); + share_result(json_is_true(res), reason ? json_string_value(reason) : NULL); + + json_decref(val); + } + + rc = true; + +out: + return rc; +} + +static const char *getwork_req = + "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n"; + +#define GBT_CAPABILITIES "[\"coinbasetxn\", \"coinbasevalue\", \"longpoll\", \"workid\"]" +#define GBT_RULES "[\"segwit\"]" + +static const char *gbt_req = + "{\"method\": \"getblocktemplate\", \"params\": [{\"capabilities\": " + GBT_CAPABILITIES ", \"rules\": " GBT_RULES "}], \"id\":0}\r\n"; +static const char *gbt_lp_req = + "{\"method\": \"getblocktemplate\", \"params\": [{\"capabilities\": " + GBT_CAPABILITIES ", \"rules\": " GBT_RULES ", \"longpollid\": \"%s\"}], \"id\":0}\r\n"; + +static bool get_upstream_work(CURL *curl, struct work *work) +{ + json_t *val; + int err; + bool rc; + struct timeval tv_start, tv_end, diff; + +start: + gettimeofday(&tv_start, NULL); + val = json_rpc_call(curl, rpc_url, rpc_userpass, + have_gbt ? gbt_req : getwork_req, + &err, have_gbt ? JSON_RPC_QUIET_404 : 0); + gettimeofday(&tv_end, NULL); + + if (have_stratum) { + if (val) + json_decref(val); + return true; + } + + if (!have_gbt && !allow_getwork) { + applog(LOG_ERR, "No usable protocol"); + if (val) + json_decref(val); + return false; + } + + if (have_gbt && allow_getwork && !val && err == CURLE_OK) { + applog(LOG_INFO, "getblocktemplate failed, falling back to getwork"); + have_gbt = false; + goto start; + } + + if (!val) + return false; + + if (have_gbt) { + rc = gbt_work_decode(json_object_get(val, "result"), work); + if (!have_gbt) { + json_decref(val); + goto start; + } + } else + rc = work_decode(json_object_get(val, "result"), work); + + if (opt_debug && rc) { + timeval_subtract(&diff, &tv_end, &tv_start); + applog(LOG_DEBUG, "DEBUG: got new work in %d ms", + diff.tv_sec * 1000 + diff.tv_usec / 1000); + } + + json_decref(val); + + return rc; +} + +static void workio_cmd_free(struct workio_cmd *wc) +{ + if (!wc) + return; + + switch (wc->cmd) { + case WC_SUBMIT_WORK: + work_free(wc->u.work); + free(wc->u.work); + break; + default: /* do nothing */ + break; + } + + memset(wc, 0, sizeof(*wc)); /* poison */ + free(wc); +} + +static bool workio_get_work(struct workio_cmd *wc, CURL *curl) +{ + struct work *ret_work; + int failures = 0; + + ret_work = calloc(1, sizeof(*ret_work)); + if (!ret_work) + return false; + + /* obtain new work from bitcoin via JSON-RPC */ + while (!get_upstream_work(curl, ret_work)) { + if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) { + applog(LOG_ERR, "json_rpc_call failed, terminating workio thread"); + free(ret_work); + return false; + } + + /* pause, then restart work-request loop */ + applog(LOG_ERR, "json_rpc_call failed, retry after %d seconds", + opt_fail_pause); + sleep(opt_fail_pause); + } + + /* send work to requesting thread */ + if (!tq_push(wc->thr->q, ret_work)) + free(ret_work); + + return true; +} + +static bool workio_submit_work(struct workio_cmd *wc, CURL *curl) +{ + int failures = 0; + + /* submit solution to bitcoin via JSON-RPC */ + while (!submit_upstream_work(curl, wc->u.work)) { + if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) { + applog(LOG_ERR, "...terminating workio thread"); + return false; + } + + /* pause, then restart work-request loop */ + applog(LOG_ERR, "...retry after %d seconds", + opt_fail_pause); + sleep(opt_fail_pause); + } + + return true; +} + +static void *workio_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + CURL *curl; + bool ok = true; + + curl = curl_easy_init(); + if (unlikely(!curl)) { + applog(LOG_ERR, "CURL initialization failed"); + return NULL; + } + + while (ok) { + struct workio_cmd *wc; + + /* wait for workio_cmd sent to us, on our queue */ + wc = tq_pop(mythr->q, NULL); + if (!wc) { + ok = false; + break; + } + + /* process workio_cmd */ + switch (wc->cmd) { + case WC_GET_WORK: + ok = workio_get_work(wc, curl); + break; + case WC_SUBMIT_WORK: + ok = workio_submit_work(wc, curl); + break; + + default: /* should never happen */ + ok = false; + break; + } + + workio_cmd_free(wc); + } + + tq_freeze(mythr->q); + curl_easy_cleanup(curl); + + return NULL; +} + +static bool get_work(struct thr_info *thr, struct work *work) +{ + struct workio_cmd *wc; + struct work *work_heap; + + if (opt_benchmark) { + memset(work->data, 0x55, 76); + work->data[17] = swab32(time(NULL)); + memset(work->data + 19, 0x00, 52); + work->data[20] = 0x80000000; + work->data[31] = 0x00000280; + memset(work->target, 0x00, sizeof(work->target)); + return true; + } + + /* fill out work request message */ + wc = calloc(1, sizeof(*wc)); + if (!wc) + return false; + + wc->cmd = WC_GET_WORK; + wc->thr = thr; + + /* send work request to workio thread */ + if (!tq_push(thr_info[work_thr_id].q, wc)) { + workio_cmd_free(wc); + return false; + } + + /* wait for response, a unit of work */ + work_heap = tq_pop(thr->q, NULL); + if (!work_heap) + return false; + + /* copy returned work into storage provided by caller */ + memcpy(work, work_heap, sizeof(*work)); + free(work_heap); + + return true; +} + +static bool submit_work(struct thr_info *thr, const struct work *work_in) +{ + struct workio_cmd *wc; + + /* fill out work request message */ + wc = calloc(1, sizeof(*wc)); + if (!wc) + return false; + + wc->u.work = malloc(sizeof(*work_in)); + if (!wc->u.work) + goto err_out; + + wc->cmd = WC_SUBMIT_WORK; + wc->thr = thr; + work_copy(wc->u.work, work_in); + + /* send solution to workio thread */ + if (!tq_push(thr_info[work_thr_id].q, wc)) + goto err_out; + + return true; + +err_out: + workio_cmd_free(wc); + return false; +} + +static void stratum_gen_work(struct stratum_ctx *sctx, struct work *work) +{ + unsigned char merkle_root[64]; + int i; + + pthread_mutex_lock(&sctx->work_lock); + + free(work->job_id); + work->job_id = strdup(sctx->job.job_id); + work->xnonce2_len = sctx->xnonce2_size; + work->xnonce2 = realloc(work->xnonce2, sctx->xnonce2_size); + memcpy(work->xnonce2, sctx->job.xnonce2, sctx->xnonce2_size); + + /* Generate merkle root */ + sha256d(merkle_root, sctx->job.coinbase, sctx->job.coinbase_size); + for (i = 0; i < sctx->job.merkle_count; i++) { + memcpy(merkle_root + 32, sctx->job.merkle[i], 32); + sha256d(merkle_root, merkle_root, 64); + } + + /* Increment extranonce2 */ + for (i = 0; i < sctx->xnonce2_size && !++sctx->job.xnonce2[i]; i++); + + /* Assemble block header */ + memset(work->data, 0, 128); + work->data[0] = le32dec(sctx->job.version); + for (i = 0; i < 8; i++) + work->data[1 + i] = le32dec((uint32_t *)sctx->job.prevhash + i); + for (i = 0; i < 8; i++) + work->data[9 + i] = be32dec((uint32_t *)merkle_root + i); + work->data[17] = le32dec(sctx->job.ntime); + work->data[18] = le32dec(sctx->job.nbits); + work->data[20] = 0x80000000; + work->data[31] = 0x00000280; + + pthread_mutex_unlock(&sctx->work_lock); + + if (opt_debug) { + char *xnonce2str = abin2hex(work->xnonce2, work->xnonce2_len); + applog(LOG_DEBUG, "DEBUG: job_id='%s' extranonce2=%s ntime=%08x", + work->job_id, xnonce2str, swab32(work->data[17])); + free(xnonce2str); + } + + diff_to_target(work->target, sctx->job.diff / 65536.0); +} + +static void *miner_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + int thr_id = mythr->id; + struct work work = {{0}}; + uint32_t max_nonce; + uint32_t start_nonce = 0xffffffffU / opt_n_threads * thr_id; + uint32_t end_nonce = 0xffffffffU / opt_n_threads * (thr_id + 1) - 0x20; + char s[16]; + int i; + + /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE + * and if that fails, then SCHED_BATCH. No need for this to be an + * error if it fails */ + if (!opt_benchmark) { + setpriority(PRIO_PROCESS, 0, 19); + drop_policy(); + } + + /* Cpu affinity only makes sense if the number of threads is a multiple + * of the number of CPUs */ + if (num_processors > 1 && opt_n_threads % num_processors == 0) { + if (!opt_quiet) + applog(LOG_INFO, "Binding thread %d to cpu %d", + thr_id, thr_id % num_processors); + affine_to_cpu(thr_id, thr_id % num_processors); + } + + while (1) { + unsigned long hashes_done; + struct timeval tv_start, tv_end, diff; + int64_t max64; + int rc; + + if (have_stratum) { + while (time(NULL) >= g_work_time + 120) + sleep(1); + pthread_mutex_lock(&g_work_lock); + if (work.data[19] >= end_nonce && !memcmp(work.data, g_work.data, 76)) + stratum_gen_work(&stratum, &g_work); + } else { + int min_scantime = have_longpoll ? LP_SCANTIME : opt_scantime; + /* obtain new work from internal workio thread */ + pthread_mutex_lock(&g_work_lock); + if (!have_stratum && + (time(NULL) - g_work_time >= min_scantime || + work.data[19] >= end_nonce)) { + work_free(&g_work); + if (unlikely(!get_work(mythr, &g_work))) { + applog(LOG_ERR, "work retrieval failed, exiting " + "mining thread %d", mythr->id); + pthread_mutex_unlock(&g_work_lock); + goto out; + } + g_work_time = have_stratum ? 0 : time(NULL); + } + if (have_stratum) { + pthread_mutex_unlock(&g_work_lock); + continue; + } + } + if (memcmp(work.data, g_work.data, 76)) { + work_free(&work); + work_copy(&work, &g_work); + work.data[19] = start_nonce; + } else + work.data[19]++; + pthread_mutex_unlock(&g_work_lock); + work_restart[thr_id].restart = 0; + + /* adjust max_nonce to meet target scan time */ + if (have_stratum) + max64 = LP_SCANTIME; + else + max64 = g_work_time + (have_longpoll ? LP_SCANTIME : opt_scantime) + - time(NULL); + max64 *= thr_hashrates[thr_id]; + if (max64 <= 0) { + switch (opt_algo) { + case ALGO_SUGAR_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_ISO_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_NULL_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_URX_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_LITB_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_IOTS_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_ITC_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_YTN_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_MBC_YESPOWER_1_0_1: + max64 = 499; + break; + case ALGO_ARM_YESPOWER_1_0_1: + max64 = 499; + break; + } + } + if (work.data[19] + max64 > end_nonce) + max_nonce = end_nonce; + else + max_nonce = work.data[19] + max64; + + hashes_done = 0; + gettimeofday(&tv_start, NULL); + + /* scan nonces for a proof-of-work hash */ + switch (opt_algo) { + + case ALGO_SUGAR_YESPOWER_1_0_1: + rc = scanhash_sugar_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_ISO_YESPOWER_1_0_1: + rc = scanhash_iso_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_NULL_YESPOWER_1_0_1: + rc = scanhash_null_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_URX_YESPOWER_1_0_1: + rc = scanhash_urx_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_LITB_YESPOWER_1_0_1: + rc = scanhash_litb_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_IOTS_YESPOWER_1_0_1: + rc = scanhash_iots_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_ITC_YESPOWER_1_0_1: + rc = scanhash_itc_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + case ALGO_YTN_YESPOWER_1_0_1: + rc = scanhash_ytn_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_MBC_YESPOWER_1_0_1: + rc = scanhash_mbc_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + case ALGO_ARM_YESPOWER_1_0_1: + rc = scanhash_arm_yespower( + thr_id, work.data, work.target, max_nonce, &hashes_done + ); + break; + + default: + /* should never happen */ + goto out; + } + + /* record scanhash elapsed time */ + gettimeofday(&tv_end, NULL); + timeval_subtract(&diff, &tv_end, &tv_start); + if (diff.tv_usec || diff.tv_sec) { + pthread_mutex_lock(&stats_lock); + thr_hashrates[thr_id] = + hashes_done / (diff.tv_sec + 1e-6 * diff.tv_usec); + pthread_mutex_unlock(&stats_lock); + } + if (!opt_quiet) { + sprintf(s, thr_hashrates[thr_id] >= 1e3 ? "%.0f" : "%.1f", + thr_hashrates[thr_id]); + applog(LOG_INFO, "thread %d: %lu hashes, %s hash/s", + thr_id, hashes_done, s); + } + if (opt_benchmark && thr_id == opt_n_threads - 1) { + double hashrate = 0.; + pthread_mutex_lock(&stats_lock); + for (i = 0; i < opt_n_threads && thr_hashrates[i]; i++) + hashrate += thr_hashrates[i]; + pthread_mutex_unlock(&stats_lock); + if (i == opt_n_threads) { + sprintf(s, hashrate >= 1e3 ? "%.0f" : "%.1f", hashrate); + applog(LOG_INFO, "Total: %s hash/s", s); + } + } + + /* if nonce found, submit work */ + if (rc && !opt_benchmark && !submit_work(mythr, &work)) + break; + } + +out: + tq_freeze(mythr->q); + + return NULL; +} + +static void restart_threads(void) +{ + int i; + + for (i = 0; i < opt_n_threads; i++) + work_restart[i].restart = 1; +} + +static void *longpoll_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + CURL *curl = NULL; + char *copy_start, *hdr_path = NULL, *lp_url = NULL; + bool need_slash = false; + + curl = curl_easy_init(); + if (unlikely(!curl)) { + applog(LOG_ERR, "CURL initialization failed"); + goto out; + } + +start: + hdr_path = tq_pop(mythr->q, NULL); + if (!hdr_path) + goto out; + + /* full URL */ + if (strstr(hdr_path, "://")) { + lp_url = hdr_path; + hdr_path = NULL; + } + + /* absolute path, on current server */ + else { + copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path; + if (rpc_url[strlen(rpc_url) - 1] != '/') + need_slash = true; + + lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2); + if (!lp_url) + goto out; + + sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start); + } + + applog(LOG_INFO, "Long-polling activated for %s", lp_url); + + while (1) { + json_t *val, *res, *soval; + char *req = NULL; + int err; + + if (have_gbt) { + req = malloc(strlen(gbt_lp_req) + strlen(lp_id) + 1); + sprintf(req, gbt_lp_req, lp_id); + } + val = json_rpc_call(curl, lp_url, rpc_userpass, + req ? req : getwork_req, &err, + JSON_RPC_LONGPOLL); + free(req); + if (have_stratum) { + if (val) + json_decref(val); + goto out; + } + if (likely(val)) { + bool rc; + // LOG_DISABLED: SOLO + // applog(LOG_INFO, "LONGPOLL pushed new work"); + res = json_object_get(val, "result"); + soval = json_object_get(res, "submitold"); + submit_old = soval ? json_is_true(soval) : false; + pthread_mutex_lock(&g_work_lock); + work_free(&g_work); + if (have_gbt) + rc = gbt_work_decode(res, &g_work); + else + rc = work_decode(res, &g_work); + if (rc) { + time(&g_work_time); + restart_threads(); + } + pthread_mutex_unlock(&g_work_lock); + json_decref(val); + } else { + pthread_mutex_lock(&g_work_lock); + g_work_time -= LP_SCANTIME; + pthread_mutex_unlock(&g_work_lock); + if (err == CURLE_OPERATION_TIMEDOUT) { + restart_threads(); + } else { + have_longpoll = false; + restart_threads(); + free(hdr_path); + free(lp_url); + lp_url = NULL; + sleep(opt_fail_pause); + goto start; + } + } + } + +out: + free(hdr_path); + free(lp_url); + tq_freeze(mythr->q); + if (curl) + curl_easy_cleanup(curl); + + return NULL; +} + +static bool stratum_handle_response(char *buf) +{ + json_t *val, *err_val, *res_val, *id_val; + json_error_t err; + bool ret = false; + + val = JSON_LOADS(buf, &err); + if (!val) { + applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); + goto out; + } + + res_val = json_object_get(val, "result"); + err_val = json_object_get(val, "error"); + id_val = json_object_get(val, "id"); + + if (!id_val || json_is_null(id_val) || !res_val) + goto out; + + share_result(json_is_true(res_val), + err_val ? json_string_value(json_array_get(err_val, 1)) : NULL); + + ret = true; +out: + if (val) + json_decref(val); + + return ret; +} + +static void *stratum_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + char *s; + + stratum.url = tq_pop(mythr->q, NULL); + if (!stratum.url) + goto out; + applog(LOG_INFO, "Starting Stratum on %s", stratum.url); + + while (1) { + int failures = 0; + + while (!stratum.curl) { + pthread_mutex_lock(&g_work_lock); + g_work_time = 0; + pthread_mutex_unlock(&g_work_lock); + restart_threads(); + + if (!stratum_connect(&stratum, stratum.url) || + !stratum_subscribe(&stratum) || + !stratum_authorize(&stratum, rpc_user, rpc_pass)) { + stratum_disconnect(&stratum); + if (opt_retries >= 0 && ++failures > opt_retries) { + applog(LOG_ERR, "...terminating workio thread"); + tq_push(thr_info[work_thr_id].q, NULL); + goto out; + } + applog(LOG_ERR, "...retry after %d seconds", opt_fail_pause); + sleep(opt_fail_pause); + } + } + + if (stratum.job.job_id && + (!g_work_time || strcmp(stratum.job.job_id, g_work.job_id))) { + pthread_mutex_lock(&g_work_lock); + stratum_gen_work(&stratum, &g_work); + time(&g_work_time); + pthread_mutex_unlock(&g_work_lock); + if (stratum.job.clean) { + // LOG_DISABLED: STRATUM(POOL) + // applog(LOG_INFO, "Stratum requested work restart"); + restart_threads(); + } + } + + if (!stratum_socket_full(&stratum, 120)) { + applog(LOG_ERR, "Stratum connection timed out"); + s = NULL; + } else + s = stratum_recv_line(&stratum); + if (!s) { + stratum_disconnect(&stratum); + applog(LOG_ERR, "Stratum connection interrupted"); + continue; + } + if (!stratum_handle_method(&stratum, s)) + stratum_handle_response(s); + free(s); + } + +out: + return NULL; +} + +static void show_version_and_exit(void) +{ + printf(PACKAGE_STRING "\nFeatures:" +#if defined(__i386__) || defined(__x86_64__) +#ifdef __x86_64__ + " x86_64" +#else + " i386" +#endif +#ifdef __SSE2__ + " SSE2" +#endif +#ifdef __SSE4_1__ + " SSE4.1" +#endif +#ifdef __AVX__ + " AVX" +#endif +#ifdef __XOP__ + " XOP" +#endif +#else + " generic" +#endif + "\n"); + + printf("%s\n", curl_version()); +#ifdef JANSSON_VERSION + printf("libjansson %s\n", JANSSON_VERSION); +#endif + exit(0); +} + +static void show_usage_and_exit(int status) +{ + if (status) + fprintf(stderr, "Try `" PROGRAM_NAME " --help' for more information.\n"); + else + printf(usage); + exit(status); +} + +static void strhide(char *s) +{ + if (*s) *s++ = 'x'; + while (*s) *s++ = '\0'; +} + +static void parse_config(json_t *config, char *pname, char *ref); + +static void parse_arg(int key, char *arg, char *pname) +{ + char *p; + int v, i; + + switch(key) { + case 'a': + for (i = 0; i < ARRAY_SIZE(algo_names); i++) { + v = strlen(algo_names[i]); + if (!strncmp(arg, algo_names[i], v)) { + if (arg[v] == '\0') { + opt_algo = i; + break; + } + } + } + if (i == ARRAY_SIZE(algo_names)) { + fprintf(stderr, "%s: unknown algorithm -- '%s'\n", + pname, arg); + show_usage_and_exit(1); + } + break; + case 'B': + opt_background = true; + break; + case 'c': { + json_error_t err; + json_t *config = JSON_LOAD_FILE(arg, &err); + if (!json_is_object(config)) { + if (err.line < 0) + fprintf(stderr, "%s: %s\n", pname, err.text); + else + fprintf(stderr, "%s: %s:%d: %s\n", + pname, arg, err.line, err.text); + exit(1); + } + parse_config(config, pname, arg); + json_decref(config); + break; + } + case 'q': + opt_quiet = true; + break; + case 'D': + opt_debug = true; + break; + case 'p': + free(rpc_pass); + rpc_pass = strdup(arg); + strhide(arg); + break; + case 'P': + opt_protocol = true; + break; + case 'r': + v = atoi(arg); + if (v < -1 || v > 9999) /* sanity check */ + show_usage_and_exit(1); + opt_retries = v; + break; + case 'R': + v = atoi(arg); + if (v < 1 || v > 9999) /* sanity check */ + show_usage_and_exit(1); + opt_fail_pause = v; + break; + case 's': + v = atoi(arg); + if (v < 1 || v > 9999) /* sanity check */ + show_usage_and_exit(1); + opt_scantime = v; + break; + case 'T': + v = atoi(arg); + if (v < 1 || v > 99999) /* sanity check */ + show_usage_and_exit(1); + opt_timeout = v; + break; + case 't': + v = atoi(arg); + if (v < 1 || v > 9999) /* sanity check */ + show_usage_and_exit(1); + opt_n_threads = v; + break; + case 'u': + free(rpc_user); + rpc_user = strdup(arg); + break; + case 'o': { /* --url */ + char *ap, *hp; + ap = strstr(arg, "://"); + ap = ap ? ap + 3 : arg; + hp = strrchr(arg, '@'); + if (hp) { + *hp = '\0'; + p = strchr(ap, ':'); + if (p) { + free(rpc_userpass); + rpc_userpass = strdup(ap); + free(rpc_user); + rpc_user = calloc(p - ap + 1, 1); + strncpy(rpc_user, ap, p - ap); + free(rpc_pass); + rpc_pass = strdup(++p); + if (*p) *p++ = 'x'; + v = strlen(hp + 1) + 1; + memmove(p + 1, hp + 1, v); + memset(p + v, 0, hp - p); + hp = p; + } else { + free(rpc_user); + rpc_user = strdup(ap); + } + *hp++ = '@'; + } else + hp = ap; + if (ap != arg) { + if (strncasecmp(arg, "http://", 7) && + strncasecmp(arg, "https://", 8) && + strncasecmp(arg, "stratum+tcp://", 14) && + strncasecmp(arg, "stratum+tcps://", 15)) { + fprintf(stderr, "%s: unknown protocol -- '%s'\n", + pname, arg); + show_usage_and_exit(1); + } + free(rpc_url); + rpc_url = strdup(arg); + strcpy(rpc_url + (ap - arg), hp); + } else { + if (*hp == '\0' || *hp == '/') { + fprintf(stderr, "%s: invalid URL -- '%s'\n", + pname, arg); + show_usage_and_exit(1); + } + free(rpc_url); + rpc_url = malloc(strlen(hp) + 8); + sprintf(rpc_url, "http://%s", hp); + } + have_stratum = !opt_benchmark && !strncasecmp(rpc_url, "stratum", 7); + break; + } + case 'O': /* --userpass */ + p = strchr(arg, ':'); + if (!p) { + fprintf(stderr, "%s: invalid username:password pair -- '%s'\n", + pname, arg); + show_usage_and_exit(1); + } + free(rpc_userpass); + rpc_userpass = strdup(arg); + free(rpc_user); + rpc_user = calloc(p - arg + 1, 1); + strncpy(rpc_user, arg, p - arg); + free(rpc_pass); + rpc_pass = strdup(++p); + strhide(p); + break; + case 'x': /* --proxy */ + if (!strncasecmp(arg, "socks4://", 9)) + opt_proxy_type = CURLPROXY_SOCKS4; + else if (!strncasecmp(arg, "socks5://", 9)) + opt_proxy_type = CURLPROXY_SOCKS5; +#if LIBCURL_VERSION_NUM >= 0x071200 + else if (!strncasecmp(arg, "socks4a://", 10)) + opt_proxy_type = CURLPROXY_SOCKS4A; + else if (!strncasecmp(arg, "socks5h://", 10)) + opt_proxy_type = CURLPROXY_SOCKS5_HOSTNAME; +#endif + else + opt_proxy_type = CURLPROXY_HTTP; + free(opt_proxy); + opt_proxy = strdup(arg); + break; + case 1001: + free(opt_cert); + opt_cert = strdup(arg); + break; + case 1005: + opt_benchmark = true; + want_longpoll = false; + want_stratum = false; + have_stratum = false; + break; + case 1003: + want_longpoll = false; + break; + case 1007: + want_stratum = false; + break; + case 1009: + opt_redirect = false; + break; + case 1010: + allow_getwork = false; + break; + case 1011: + have_gbt = false; + break; + case 1013: /* --coinbase-addr */ + pk_script_size = address_to_script(pk_script, sizeof(pk_script), arg); + if (!pk_script_size) { + fprintf(stderr, "%s: invalid address -- '%s'\n", + pname, arg); + show_usage_and_exit(1); + } + break; + case 1015: /* --coinbase-sig */ + if (strlen(arg) + 1 > sizeof(coinbase_sig)) { + fprintf(stderr, "%s: coinbase signature too long\n", pname); + show_usage_and_exit(1); + } + strcpy(coinbase_sig, arg); + break; + case 'S': + use_syslog = true; + break; + case 'V': + show_version_and_exit(); + case 'h': + show_usage_and_exit(0); + default: + show_usage_and_exit(1); + } +} + +static void parse_config(json_t *config, char *pname, char *ref) +{ + int i; + char *s; + json_t *val; + + for (i = 0; i < ARRAY_SIZE(options); i++) { + if (!options[i].name) + break; + + val = json_object_get(config, options[i].name); + if (!val) + continue; + + if (options[i].has_arg && json_is_string(val)) { + if (!strcmp(options[i].name, "config")) { + fprintf(stderr, "%s: %s: option '%s' not allowed here\n", + pname, ref, options[i].name); + exit(1); + } + s = strdup(json_string_value(val)); + if (!s) + break; + parse_arg(options[i].val, s, pname); + free(s); + } else if (!options[i].has_arg && json_is_true(val)) { + parse_arg(options[i].val, "", pname); + } else { + fprintf(stderr, "%s: invalid argument for option '%s'\n", + pname, options[i].name); + exit(1); + } + } +} + +static void parse_cmdline(int argc, char *argv[]) +{ + int key; + + while (1) { +#if HAVE_GETOPT_LONG + key = getopt_long(argc, argv, short_options, options, NULL); +#else + key = getopt(argc, argv, short_options); +#endif + if (key < 0) + break; + + parse_arg(key, optarg, argv[0]); + } + if (optind < argc) { + fprintf(stderr, "%s: unsupported non-option argument -- '%s'\n", + argv[0], argv[optind]); + show_usage_and_exit(1); + } +} + +#ifndef WIN32 +static void signal_handler(int sig) +{ + switch (sig) { + case SIGHUP: + applog(LOG_INFO, "SIGHUP received"); + break; + case SIGINT: + applog(LOG_INFO, "SIGINT received, exiting"); + exit(0); + break; + case SIGTERM: + applog(LOG_INFO, "SIGTERM received, exiting"); + exit(0); + break; + } +} +#endif + +static void show_credits() { + printf("\n"); + printf(" *** "PACKAGE_NAME" "PACKAGE_VERSION" by NoLamboCoin ***\n"); + printf(" Multi-threaded CPU miner for NoLamboCoin Coin and other Yespower variants\n"); + printf("\n"); + printf(" Authors:\n"); + printf(" Jeff Garzik jeff@garzik.org\n"); + printf(" Pooler pooler@litecoinpool.org\n"); + printf(" Alexander Peslyak solar@openwall.com\n"); + printf(" Kanon 60179867+decryp2kanon@users.noreply.github.com\n"); + printf(" Yentencoin\n"); + printf(" NoLamboCoin\n"); + printf("\n"); + printf(" Download Latest Release:\n"); + printf(" https://github.com/NoLamboCoin/...\n"); + printf("\n"); + printf(" Have Fun with Rpi Mining\n"); + printf("\n"); +} + +int main(int argc, char *argv[]) +{ + struct thr_info *thr; + long flags; + int i; + + show_credits(); + + rpc_user = strdup(""); + rpc_pass = strdup(""); + + /* parse command line */ + parse_cmdline(argc, argv); + + if (!opt_benchmark && !rpc_url) { + fprintf(stderr, "%s: no URL supplied\n", argv[0]); + show_usage_and_exit(1); + } + + if (!rpc_userpass) { + rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2); + if (!rpc_userpass) + return 1; + sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass); + } + + pthread_mutex_init(&applog_lock, NULL); + pthread_mutex_init(&stats_lock, NULL); + pthread_mutex_init(&g_work_lock, NULL); + pthread_mutex_init(&stratum.sock_lock, NULL); + pthread_mutex_init(&stratum.work_lock, NULL); + + flags = opt_benchmark || (strncasecmp(rpc_url, "https://", 8) && + strncasecmp(rpc_url, "stratum+tcps://", 15)) + ? (CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL) + : CURL_GLOBAL_ALL; + if (curl_global_init(flags)) { + applog(LOG_ERR, "CURL initialization failed"); + return 1; + } + +#ifndef WIN32 + if (opt_background) { + i = fork(); + if (i < 0) exit(1); + if (i > 0) exit(0); + i = setsid(); + if (i < 0) + applog(LOG_ERR, "setsid() failed (errno = %d)", errno); + i = chdir("/"); + if (i < 0) + applog(LOG_ERR, "chdir() failed (errno = %d)", errno); + signal(SIGHUP, signal_handler); + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + } +#endif + +#if defined(WIN32) + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + num_processors = sysinfo.dwNumberOfProcessors; +#elif defined(_SC_NPROCESSORS_CONF) + num_processors = sysconf(_SC_NPROCESSORS_CONF); +#elif defined(CTL_HW) && defined(HW_NCPU) + int req[] = { CTL_HW, HW_NCPU }; + size_t len = sizeof(num_processors); + sysctl(req, 2, &num_processors, &len, NULL, 0); +#else + num_processors = 1; +#endif + if (num_processors < 1) + num_processors = 1; + +#ifdef HAVE_CPUINFO + have_cpuinfo = !cpuinfo_init(); + + if (!opt_n_threads && have_cpuinfo) + opt_n_threads = cpuinfo.physical; +#endif + + if (!opt_n_threads) + opt_n_threads = num_processors; + +#ifdef HAVE_SYSLOG_H + if (use_syslog) + openlog("cpuminer", LOG_PID, LOG_USER); +#endif + + work_restart = calloc(opt_n_threads, sizeof(*work_restart)); + if (!work_restart) + return 1; + + thr_info = calloc(opt_n_threads + 3, sizeof(*thr)); + if (!thr_info) + return 1; + + thr_hashrates = (double *) calloc(opt_n_threads, sizeof(double)); + if (!thr_hashrates) + return 1; + + /* init workio thread info */ + work_thr_id = opt_n_threads; + thr = &thr_info[work_thr_id]; + thr->id = work_thr_id; + thr->q = tq_new(); + if (!thr->q) + return 1; + + /* start work I/O thread */ + if (pthread_create(&thr->pth, NULL, workio_thread, thr)) { + applog(LOG_ERR, "workio thread create failed"); + return 1; + } + + if (want_longpoll && !have_stratum) { + /* init longpoll thread info */ + longpoll_thr_id = opt_n_threads + 1; + thr = &thr_info[longpoll_thr_id]; + thr->id = longpoll_thr_id; + thr->q = tq_new(); + if (!thr->q) + return 1; + + /* start longpoll thread */ + if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) { + applog(LOG_ERR, "longpoll thread create failed"); + return 1; + } + } + if (want_stratum) { + /* init stratum thread info */ + stratum_thr_id = opt_n_threads + 2; + thr = &thr_info[stratum_thr_id]; + thr->id = stratum_thr_id; + thr->q = tq_new(); + if (!thr->q) + return 1; + + /* start stratum thread */ + if (unlikely(pthread_create(&thr->pth, NULL, stratum_thread, thr))) { + applog(LOG_ERR, "stratum thread create failed"); + return 1; + } + + if (have_stratum) + tq_push(thr_info[stratum_thr_id].q, strdup(rpc_url)); + } + + /* start mining threads */ + for (i = 0; i < opt_n_threads; i++) { + thr = &thr_info[i]; + + thr->id = i; + thr->q = tq_new(); + if (!thr->q) + return 1; + + if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) { + applog(LOG_ERR, "thread %d create failed", i); + return 1; + } + } + + applog(LOG_INFO, "%d miner threads started, " + "using '%s' algorithm.", + opt_n_threads, + algo_names[opt_algo]); + + /* main loop - simply wait for workio thread to exit */ + pthread_join(thr_info[work_thr_id].pth, NULL); + + applog(LOG_INFO, "workio thread dead, exiting."); + + return 0; +} diff --git a/deps-aarch64/deps-aarch64.sh b/deps-aarch64/deps-aarch64.sh new file mode 100644 index 0000000..42a3bc8 --- /dev/null +++ b/deps-aarch64/deps-aarch64.sh @@ -0,0 +1,24 @@ +# WARNING +# Try on Virtual Machine (Ubuntu 16.04) +# https://lxadm.com/Static_compilation_of_cpuminer + +# DEPENDS + +## OPENSSL +wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +tar -xvzf openssl-1.1.0g.tar.gz +cd openssl-1.1.0g/ +./config no-shared +make -j$(nproc) +sudo make install +cd .. + +## CURL +wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +tar -xvzf curl-7.57.0.tar.gz +cd curl-7.57.0/ +./buildconf | grep "buildconf: OK" +./configure --disable-shared | grep "Static=yes" +make -j$(nproc) +sudo make install +cd ../.. diff --git a/deps-armv7l/deps-armv7l.sh b/deps-armv7l/deps-armv7l.sh new file mode 100644 index 0000000..42a3bc8 --- /dev/null +++ b/deps-armv7l/deps-armv7l.sh @@ -0,0 +1,24 @@ +# WARNING +# Try on Virtual Machine (Ubuntu 16.04) +# https://lxadm.com/Static_compilation_of_cpuminer + +# DEPENDS + +## OPENSSL +wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +tar -xvzf openssl-1.1.0g.tar.gz +cd openssl-1.1.0g/ +./config no-shared +make -j$(nproc) +sudo make install +cd .. + +## CURL +wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +tar -xvzf curl-7.57.0.tar.gz +cd curl-7.57.0/ +./buildconf | grep "buildconf: OK" +./configure --disable-shared | grep "Static=yes" +make -j$(nproc) +sudo make install +cd ../.. diff --git a/deps-linux32/deps-linux32.sh b/deps-linux32/deps-linux32.sh new file mode 100644 index 0000000..42a3bc8 --- /dev/null +++ b/deps-linux32/deps-linux32.sh @@ -0,0 +1,24 @@ +# WARNING +# Try on Virtual Machine (Ubuntu 16.04) +# https://lxadm.com/Static_compilation_of_cpuminer + +# DEPENDS + +## OPENSSL +wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +tar -xvzf openssl-1.1.0g.tar.gz +cd openssl-1.1.0g/ +./config no-shared +make -j$(nproc) +sudo make install +cd .. + +## CURL +wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +tar -xvzf curl-7.57.0.tar.gz +cd curl-7.57.0/ +./buildconf | grep "buildconf: OK" +./configure --disable-shared | grep "Static=yes" +make -j$(nproc) +sudo make install +cd ../.. diff --git a/deps-linux64/deps-linux64.sh b/deps-linux64/deps-linux64.sh new file mode 100644 index 0000000..42a3bc8 --- /dev/null +++ b/deps-linux64/deps-linux64.sh @@ -0,0 +1,24 @@ +# WARNING +# Try on Virtual Machine (Ubuntu 16.04) +# https://lxadm.com/Static_compilation_of_cpuminer + +# DEPENDS + +## OPENSSL +wget https://www.openssl.org/source/openssl-1.1.0g.tar.gz +tar -xvzf openssl-1.1.0g.tar.gz +cd openssl-1.1.0g/ +./config no-shared +make -j$(nproc) +sudo make install +cd .. + +## CURL +wget https://github.com/curl/curl/releases/download/curl-7_57_0/curl-7.57.0.tar.gz +tar -xvzf curl-7.57.0.tar.gz +cd curl-7.57.0/ +./buildconf | grep "buildconf: OK" +./configure --disable-shared | grep "Static=yes" +make -j$(nproc) +sudo make install +cd ../.. diff --git a/deps-osx/deps-osx.sh b/deps-osx/deps-osx.sh new file mode 100644 index 0000000..8cd274a --- /dev/null +++ b/deps-osx/deps-osx.sh @@ -0,0 +1,3 @@ +# https://gist.github.com/quagliero/90f493f123c7b1ddba5428ba0146329a + +brew install automake openssl zlib curl jansson make diff --git a/deps-win32/build_win_x86_deps.sh b/deps-win32/build_win_x86_deps.sh new file mode 100644 index 0000000..53b0396 --- /dev/null +++ b/deps-win32/build_win_x86_deps.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e +PREFIX=${PWD}/i686-w64-mingw32 + +CURL_PACKAGE=curl-7.54.1 +CURL_PACKAGE_FILE=${CURL_PACKAGE}.tar.gz +CURL_PACKAGE_FILE_SHA256=cd404b808b253512dafec4fed0fb2cc98370d818a7991826c3021984fc27f9d0 +CURL_CHECKSUM_FILE=${CURL_PACKAGE_FILE}.sha256 + +wget https://curl.haxx.se/download/$CURL_PACKAGE_FILE -O $CURL_PACKAGE_FILE +echo "${CURL_PACKAGE_FILE_SHA256} ${CURL_PACKAGE_FILE}" > $CURL_CHECKSUM_FILE +sha256sum -c $CURL_CHECKSUM_FILE +rm $CURL_CHECKSUM_FILE + +rm -rf pthread-win32 +git clone https://github.com/GerHobbelt/pthread-win32.git + +tar zxvf $CURL_PACKAGE_FILE + +cd $CURL_PACKAGE +./configure --host=i686-w64-mingw32 --disable-shared --enable-static --with-winssl --prefix=$PREFIX +make install + +cd ../pthread-win32/ +cp config.h pthreads_win32_config.h +make -f GNUmakefile CROSS="i686-w64-mingw32-" clean GC-static +cp libpthreadGC2.a ${PREFIX}/lib/libpthread.a +cp pthread.h semaphore.h sched.h ${PREFIX}/include diff --git a/deps-win32/curl-7.54.1.tar.gz b/deps-win32/curl-7.54.1.tar.gz new file mode 100644 index 0000000..c72cc1d Binary files /dev/null and b/deps-win32/curl-7.54.1.tar.gz differ diff --git a/deps-win32/curl-7.54.1/CHANGES b/deps-win32/curl-7.54.1/CHANGES new file mode 100644 index 0000000..4df8ae8 --- /dev/null +++ b/deps-win32/curl-7.54.1/CHANGES @@ -0,0 +1,6295 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + + Changelog + +Version 7.54.1 (14 Jun 2017) + +Daniel Stenberg (14 Jun 2017) +- release: 7.54.1 + +Dan Fandrich (13 Jun 2017) +- mk-lib1521.pl: updated to match the test changes in 916ec30a + +Daniel Stenberg (13 Jun 2017) +- [Stuart Henderson brought this change] + + libressl: OCSP and intermediate certs workaround no longer needed + + lib/vtls/openssl.c has a workaround for a bug with OCSP responses signed + by intermediate certs, this was fixed in LibreSSL in + https://github.com/libressl-portable/openbsd/commit/912c64f68f7ac4f225b7d1fdc8fbd43168912ba0 + + Bug: https://curl.haxx.se/mail/lib-2017-06/0038.html + +- url: fix buffer overwrite with file protocol (CVE-2017-9502) + + Bug: https://github.com/curl/curl/issues/1540 + Advisory: https://curl.haxx.se/docs/adv_20170614.html + + Assisted-by: Ray Satiro + Reported-by: Marcel Raad + +- urlglob: fix division by zero + + The multiply() function that is used to avoid integer overflows, was + itself reason for a possible division by zero error when passed a + specially formatted glob. + + Reported-by: GwanYeong Kim + +- configure: update the copyright year in the output + +- [ygrek brought this change] + + BINDINGS: update SP-Forth and OCaml urls + +Michael Kaufmann (11 Jun 2017) +- FindWin32CACert: Use a temporary buffer on the stack + + Don't malloc() the temporary buffer, and use the correct type: + SearchPath() works with TCHAR, but SearchPathA() works with char. + Set the buffer size to MAX_PATH, because the terminating null byte + is already included in MAX_PATH. + + Reviewed-by: Daniel Stenberg + Reviewed-by: Marcel Raad + + Closes #1548 + +Dan Fandrich (11 Jun 2017) +- test1521: fixed OOM handling + +Daniel Stenberg (9 Jun 2017) +- RELEASE-PROCEDURE: updated future release dates + +- [Paul Harris brought this change] + + gitignore: ignore all vim swap files + + Closes #1561 + +- lib1521: fix compiler warnings on the use of bad 'long' values + + Reported-by: Marcel Raad + Bug: https://github.com/curl/curl/commit/cccac4fb2b20d6ed87da7978408c3ecacc464fe4#commitcomment-22453387 + +- setopt: check CURLOPT_ADDRESS_SCOPE option range + + ... and return error instead of triggering an assert() when being way + out of range. + +Jay Satiro (8 Jun 2017) +- [TheAssassin brought this change] + + cmake: Fix inconsistency regarding mbed TLS include directory + + Previously, one had to set MBEDTLS_INCLUDE_DIR to make CMake find the + headers, but the system complained that mbed TLS wasn't found due to + MBEDTLS_INCLUDE_DIRS (note the trailing s) was not set. This commit + attempts to fix that. + + Closes https://github.com/curl/curl/pull/1541 + +Daniel Stenberg (8 Jun 2017) +- [Ryuichi KAWAMATA brought this change] + + examples/multi-uv.c: fix deprecated symbol + + Closes #1557 + +- asyn-ares: s/Curl_expire_latest/Curl_expire + +- expire: remove Curl_expire_latest() + + With the introduction of expire IDs and the fact that existing timers + can be removed now and thus never expire, the concept with adding a + "latest" timer is not working anymore as it risks to not expire at all. + + So, to be certain the timers actually are in line and will expire, the + plain Curl_expire() needs to be used. The _latest() function was added + as a sort of shortcut in the past that's quite simply not necessary + anymore. + + Follow-up to 31b39c40cf90 + + Reported-by: Paul Harris + + Closes #1555 + +- [Chris Carlmar brought this change] + + configure: fix link with librtmp when specifying path + + Bug: https://curl.haxx.se/mail/lib-2017-06/0017.html + +- file: make speedcheck use current time for checks + + ... as it would previously just get the "now" timestamp before the + transfer starts and then not update it again. + + Closes #1550 + +- metalink: remove unused printf() argument + +- travis: let some builds *not* use --enable-debug + + typecheck-gcc and other things require optimized builds + + Closes #1544 + +- README.md: show the coverall coverage on github + +- lib1521: fix compiler warnings + +- test1521: make the code < 80 columns wide + +- test1121: use stricter types to work with typcheck-gcc + +- typecheck-gcc: allow CURLOPT_STDERR to be NULL too + +- test1521: test *all* curl_easy_setopt options + + mk-lib1521.pl generates a test program (lib1521.c) that calls + curl_easy_setopt() for every known option with a few typical values to + make sure they work (ignoring the return codes). + + Some small changes were necessary to avoid asserts and NULL accesses + when doing this. + + The perl script needs to be manually rerun when we add new options. + + Closes #1543 + +Dan Fandrich (5 Jun 2017) +- test1538: added "verbose logs" keyword + + These error messages are not displayed with --disable-verbose + +Daniel Stenberg (5 Jun 2017) +- test1262: verify ftp download with -z for "if older than this" + +Marcel Raad (5 Jun 2017) +- curl_ntlm_core: use Curl_raw_toupper instead of toupper + + This was the only remaining use of toupper in the entire source code. + + Suggested-by: Daniel Stenberg + +Daniel Stenberg (4 Jun 2017) +- RELEASE-NOTES: synced with 65ba92650 + +Marcel Raad (4 Jun 2017) +- curl_ntlm_core: pass unsigned char to toupper + + Otherwise, clang on Cygwin64 warns: + curl_ntlm_core.c:525:35: error: array subscript is of type 'char' + [-Werror,-Wchar-subscripts] + dest[2 * i] = (unsigned char)(toupper(src[i])); + ^~~~~~~~~~~~~~~ + /usr/include/ctype.h:152:25: note: expanded from macro 'toupper' + (void) __CTYPE_PTR[__x]; (toupper) (__x);}) + ^~~~ + +Jay Satiro (3 Jun 2017) +- [Mahmoud Samir Fayed brought this change] + + BINDINGS: add Ring binding + + Closes https://github.com/curl/curl/pull/1539 + +Daniel Stenberg (4 Jun 2017) +- CONTRIBUTE.md: mention tests done on pull requests + +- travis: add coverage, distcheck and cmake builds + + Closes #1534 + +Marcel Raad (3 Jun 2017) +- libtest: fix int-in-bool-context warnings + + GCC 7 complained: + ‘*’ in boolean context, suggest ‘&&’ instead [-Wint-in-bool-context] + +- libtest: fix implicit-fallthrough warnings with GCC 7 + +- x509asn1: fix implicit-fallthrough warning with GCC 7 + +- curl_sasl: fix unused-variable warning + + This fixes the following warning with CURL_DISABLE_CRYPTO_AUTH, + as seen in the autobuilds: + + curl_sasl.c:417:9: warning: unused variable 'serverdata' + [-Wunused-variable] + +Daniel Stenberg (3 Jun 2017) +- updatemanpages.pl: error out on too old git version + +Marcel Raad (3 Jun 2017) +- cyassl: define build macros before including ssl.h + + cyassl/ssl.h needs the macros from cyassl/options.h, so define them + before including cyassl/ssl.h the first time, which happens in + urldata.h. + This broke the build on Ubuntu Xenial, which comes with WolfSSL 3.4.8 + and therefore redefines the symbols from cyassl/options.h instead of + including the header. + + Closes https://github.com/curl/curl/pull/1536 + +Daniel Stenberg (3 Jun 2017) +- tool_util: remove unused tvdiff_secs and remove tool_ prefix + + Closes #1532 + +- dedotdot: fixed output for ".." and "." only input + + Found when updating test 1395, which I did to increase test coverage of + this source file... + + Closes #1535 + +Marcel Raad (2 Jun 2017) +- mbedtls: make TU-local variable static + + mbedtls_x509_crt_profile_fr is only used locally. + This fixes a missing-variable-declarations warning with clang. + +- MD(4|5): silence cast-align clang warning + + Unaligned access is on purpose here and the warning is harmless on + affected architectures. GCC knows that, while clang warns on all + architectures. + +Daniel Stenberg (2 Jun 2017) +- test1538: fix typo + +- test1538: verify the libcurl strerror API calls + +- curl_endian: remove unused functions + + Closes #1529 + +- test1537: dedicated tests of the URL (un)escape API calls + + Closes #1530 + +- coverage: run event tests too + + ... the torture ones are commented out only because they are slooooow. + +- build: provide easy code coverage measuring + + Closes #1528 + +- typecheck-gcc.h: check CURLINFO_CERTINFO + + ... and update the certinfo.c example accordingly. + + Fixes https://github.com/curl/curl/issues/846 + +- typecheck-gcc.h: check CURLINFO_TLS_SSL_PTR and CURLINFO_TLS_SESSION + + ... so that they get the required "struct curl_tlssessioninfo **" + arguments. + +- typecheck-gcc.h: separate getinfo slist checks from other pointers + + Fixes #1524 + +Marcel Raad (1 Jun 2017) +- curl-compilers.m4: escape square brackets in regex + + Otherwise, they are removed in the final configure file. + Also changed sed to "$SED" like in most other calls in this file. + +- curl-compilers.m4: fix compiler_num for clang + + "clang -dumpversion" always returns "4.2.1", the GCC version that clang + was initially compatible to. Use "clang -v" instead, which returns the + actual clang version. + + Fixes https://github.com/curl/curl/issues/1522 + Closes https://github.com/curl/curl/pull/1523 + +Daniel Stenberg (31 May 2017) +- examples/externalsocket.c: s/closesocket/closecb + + ... since closesocket is a function in WinSock. + + Reported-by: Marcel Raad + Bug: https://github.com/curl/curl/commit/55fcb8485914700132fd1854c9509b66c955efbe#co + mmitcomment-22347818 + +Marcel Raad (31 May 2017) +- lib583: fix compiler warning + + Use CURLMcode for variable 'res' and cast to int where necessary + instead of the other way around. Other tests do the same. + + This fixes the following clang warning: + lib583.c:68:15: warning: cast from function call of type 'CURLMcode' to + non-matching type 'int' [-Wbad-function-cast] + +Daniel Stenberg (31 May 2017) +- CURLOPT_SSH_KEY*.3: typos + + Reported-by: Gisle Vanem + +- CURLOPT_STREAM_DEPENDS.3: typo + +- CURLOPT_FNMATCH_FUNCTION.3: also modified example to avoid fcpp issues + +- CURLOPT_FNMATCH_DATA.3: modified example to avoid fcpp issues + +- opts: more than 100 more examples for man pages... + +- libtest/lib574.c: use correct callback proto + +- examples/sampleconv.c: indent changes, made callbacks static + +- example/externalsocket.c: make it use CLOSESOCKETFUNCTION too + +Marcel Raad (31 May 2017) +- curl-compilers.m4: enable -Wshift-sign-overflow for clang + + clang 2.9+ supports -Wshift-sign-overflow, which warns about undefined + behavior on signed left shifts when shifting by too many places. + + Ref: https://github.com/curl/curl/issues/1516 + Closes https://github.com/curl/curl/pull/1517 + +Daniel Stenberg (31 May 2017) +- CURLOPT_PROXY.3: fix test 1140 breakage + +Jay Satiro (31 May 2017) +- build-wolfssl: Sync config with wolfSSL 3.11 + + wolfSSL configure script relevant changes from 3.10 to 3.11: + + - Async threading support added; disabled by default without async + crypto, which continues to be disabled by default. + + wolfSSL configure script relevant changes from 3.11 to 3.11.1 (beta): + + - TLS 1.3 beta support added; disabled by default. + + For experimenting I put in a comment block the defines needed to enable + TLS 1.3 support (ie the equivalent of --enable-tls13). + +Daniel Stenberg (30 May 2017) +- opts: more examples added to man pages + +- docs: clarify NO_PROXY further + + Fixes #1208 + +- CURLOPT_PROXY.3: describe the environment variables more + +- transfer: init the infilesize from the postfields... + + ... with a strlen() if no size was set, and do this in the pretransfer + function so that the info is set early. Otherwise, the default strlen() + done on the POSTFIELDS data never sets state.infilesize. + + Reported-by: Vincas Razma + Bug: #1294 + +Jay Satiro (29 May 2017) +- test557: fix ubsan runtime error due to int left shift + + - Test curl_msnprintf negative int width arg using INT_MIN instead of + 1 << 31 which is undefined behavior. + + Closes https://github.com/curl/curl/issues/1516 + +- mbedtls: fix variable shadow warning + + vtls/mbedtls.c:804:69: warning: declaration of 'entropy' shadows a global declaration [-Wshadow] + CURLcode Curl_mbedtls_random(struct Curl_easy *data, unsigned char *entropy, + ^~~~~~~ + +Daniel Stenberg (29 May 2017) +- RELEASE-NOTES: synced with 3aaac8c2f + +Dan Fandrich (28 May 2017) +- tests: removed some redundant empty sections + +- runtests.pl: removed feature + + This hasn't been used in over a decade. can still be used to + run commands before the main test. + +Daniel Stenberg (27 May 2017) +- opts: more examples added in option man pages + +Dan Fandrich (27 May 2017) +- runtests.pl: removed unused arguments to valgrindparse + +Daniel Stenberg (25 May 2017) +- TODO: 6.4 is done, send telnet data in chunks + +- [Phil Crump brought this change] + + docs/CURLOPT_SSLVERSION.3: Correct define name in example + + Closes #1509 + +- ssh: fix 'left' may be used uninitialized + + follow-up to f31760e63b4e + + Reported-by: Michael Kaufmann + Bug: https://github.com/curl/curl/pull/1495#issuecomment-303982793 + +Michael Kaufmann (24 May 2017) +- time: fix type conversions and compiler warnings + + Fix bugs and compiler warnings on systems with 32-bit long and + 64-bit time_t. + + Reviewed-by: Daniel Stenberg + + Closes #1499 + +Marcel Raad (24 May 2017) +- examples: fix Wimplicit-fallthrough warnings + + This is contained in -Wextra with GCC 7. + +Daniel Stenberg (24 May 2017) +- [Anatol Belski brought this change] + + winbuild: fix the nghttp2 build + + Closes #1321 + +GitHub (24 May 2017) +- [Sergei Nikulov brought this change] + + LDAP: documentation update per #878 changes (#1506) + +Daniel Stenberg (23 May 2017) +- redirect: store the "would redirect to" URL when max redirs is reached + + Test 1261 added to verify. + + Reported-by: Lloyd Fournier + + Fixes #1489 + Closes #1497 + +GitHub (24 May 2017) +- [Sergei Nikulov brought this change] + + LDAP: fixed checksrc issue + +- [Sergei Nikulov brought this change] + + LDAP: using ldap_bind_s on Windows with methods (#878) + + * LDAP: using ldap_bind_s on Windows with methods(BASIC/DIGEST/NTLM/AUTONEG) + + * ldap: updated per build options handling + + * ldap: fixed logic for auth selection + +Daniel Stenberg (23 May 2017) +- [Akhil Kedia brought this change] + + cmake: fix build on Ubuntu 14.04 + + Fixed a syntax error with setting cache variables (The type and + docstring were missing), resulting in build errors. Quoted the + CURL_CA_PATH and CURL_CA_BUNDLE otherwise the path was written without + quotes in C code, resulting in build errors. + + Closes #1503 + + Signed-off-by: Akhil + +- url: fix declaration of 'pipe' shadows a global declaration + + follow-up to 4cdb1be8246c + +Kamil Dudka (22 May 2017) +- memdebug: fix compilation failure + + .... caused by a typo in the last commit (fixing issue #1504): + + memdebug.c: In function ‘curl_fclose’: + memdebug.c:444:3: error: implicit declaration of function + ‘DEBUGDEBUGASSERT’ [-Werror=implicit-function-declaration] + +Daniel Stenberg (22 May 2017) +- assert: avoid, use DEBUGASSERT instead! + + ... as it does extra checks to actually work. + + Reported-by: jonrumsey at github + Fixes #1504 + +- [Simon Warta brought this change] + + cmake: remove unused variables: GNUTLS_ENABLED, NSS_ENABLED + +- [Simon Warta brought this change] + + cmake: remove CURL_CA_BUNDLE from cmake TODO + +- [Simon Warta brought this change] + + cmake: auto detection of CURL_CA_BUNDLE/CURL_CA_PATH + + Closes #1461 + +- [Simon Warta brought this change] + + cmake: add CURL_CA_BUNDLE/CURL_CA_FALLBACK/CURL_CA_PATH options + +- [Simon Warta brought this change] + + cmake: Add CURL_CA_FALLBACK to curl_config.h.cmake + + This is for symmetry with the autoconf generated curl_config.h.in + +- RELEASE-NOTES: synced with 052a14e3c + +Michael Kaufmann (20 May 2017) +- tests: stabilize test 1034 + + Pass the invalid domain name on stdin. On some systems, the test + framework cannot pass invalid UTF-8 sequences on the command line. + + Closes #1488 + +Daniel Stenberg (20 May 2017) +- ssh: ignore timeouts during disconnect + + ... as otherwise it risks not cleaning up the libssh2 handle properly + which leads to memory leak! + + Assisted-by: Joel Depooter + + Closes #1495 + Closes #1479 + + Bug: https://curl.haxx.se/mail/lib-2017-04/0024.html + +- ghiper.c/hiperfifo.c: add comment about missing timer functionality + + It takes someone to read up on the APIs of these libraries to figure out + how to do this correctly. + + Reported-by: Michael Kaufmann + + Closes #1253 + +- asiohiper.cpp / evhiperfifo.c: deal with negative timerfunction input + + That means delete the timer. + + Reported-by: Michael Kaufmann + Ref: #1253 + +- cmdline-opts/write-out.d: s/-L/--location + + Since the man page generator wants the long option name version to + generate the proper output. + +- [Bernhard M. Wiedemann brought this change] + + mkhelp.pl: do not add current time into curl binary + + ... as part of hugehelpgz rodata to make build reproducible. + + See https://reproducible-builds.org/ for why this is good + + Closes #1490 + +- oauth2-bearer.d: mention the argument + +Nick Zitzmann (16 May 2017) +- darwinssl: Fix exception when processing a client-side certificate file + if no error was raised by the API but the SecIdentityRef was null + + Fixes #1450 + +Daniel Stenberg (16 May 2017) +- curl_sasl: fix build error with CURL_DISABLE_CRYPTO_AUTH + USE_NTLM + + Reported-by: wyattoday at github + Fixes #1487 + +- docs/cmdline-opts/config.d: edit for language + +- RELEASE-NOTES: synced with eb16305e6 + +- [moparisthebest brought this change] + + SecureTransport/DarwinSSL: Implement public key pinning + + Closes #1400 + +- man pages: fix example syntax errors + + follow-up to 5ddad099b42b50 + +- docs/libcurl/opts: added more examples in man pages + +- CURLOPT_HTTPPROXYTUNNEL: clarify, add example + +- curl: show the libcurl release date in --version output + + ... and support and additional "security patched" date for those who + enhance older versions that way. Pass on the define CURL_PATCHSTAMP with + a date for that. + + Building with non-release headers shows the date as [unreleased]. + + Also: this changes the date format generated in the curlver.h file to be + "YYYY-MM-DD" (no name of the day or month, no time, no time zone) to + make it easier on the eye and easier to parse. Example (new) date + string: 2017-05-09 + + Suggested-by: Brian Childs + + Closes #1474 + +Dan Fandrich (13 May 2017) +- url.c: add a compile-time check that CURL_MAX_WRITE_SIZE is large enough + + Some code (e.g. Curl_fillreadbuffer) assumes that this buffer is not + exceedingly tiny and will break if it is. This same check is already + done at run time in the CURLOPT_BUFFERSIZE option. + +- lib510: don't write past the end of the buffer if it's too small + +- tests: added missing keywords "chunked Transfer-Encoding" + +Daniel Stenberg (13 May 2017) +- THANKS: add a few missing names + + ... I found them in the commit logs from the early years + +Dan Fandrich (13 May 2017) +- tests: made a couple of prechecks consistent with others + + Also removed a TODO suggesting caching the precheck results. Tests + showed this would save about 0.1 sec on the total test run time on a + relatively modern system, an unnoticeable gain at the cost of longer and + more complicated code. There would also be a danger that a cached test + result would be inappropriately returned, such as when other test + dependencies (like environment variables) are different or when the + precheck causes side effects (like filesystem changes). + +Daniel Stenberg (12 May 2017) +- FAQ: add 7.4 to toc + + ... and delete trailing whitespace + + Fixes #1484 + +- multi: remove leftover debug infof() calls from e9fd794a6 + +- pipeline: fix mistakenly trying to pipeline POSTs + + The function IsPipeliningPossible() would return TRUE if either + pipelining OR HTTP/2 were possible on a connection, which would lead to + it returning TRUE even for POSTs on HTTP/1 connections. + + It now returns a bitmask so that the caller can differentiate which kind + the connection allows. + + Fixes #1481 + Closes #1483 + Reported-by: stootill at github + +Jay Satiro (12 May 2017) +- [Ron Eldor brought this change] + + mbedtls: Support server renegotiation request + + Tested with servers: IIS 7.5; OpenSSL 1.0.2. + + Closes https://github.com/curl/curl/pull/1475 + +Marcel Raad (11 May 2017) +- cookie_interface: fix -Wcomma warning + + clang 5.0 complains: + possible misuse of comma operator here [-Wcomma] + +- formdata: fix -Wcomma warning + + clang 5.0 complains: + possible misuse of comma operator here [-Wcomma] + + Change the comma to a semicolon to fix that. + +Daniel Stenberg (10 May 2017) +- multi: use a fixed array of timers instead of malloc + + ... since the total amount is low this is faster, easier and reduces + memory overhead. + + Also, Curl_expire_done() can now mark an expire timeout as done so that + it never times out. + + Closes #1472 + +- multi: assign IDs to all timers and make each timer singleton + + A) reduces the timeout lists drastically + + B) prevents a lot of superfluous loops for timers that expires "in vain" + when it has actually already been extended to fire later on + +- [Richard Hsu brought this change] + + tests: remove superfluous test 1399 + + @MarcelRaad noted that `test1399` causes infinite loop on MinGW. + Looking into this, seems like it is related to how Windows handles + CRLF. See https://github.com/curl/curl/commit/9e093f by @mback2k. + Removing `test1399` as it's identical to `test1326` then with such a + fix. + + Test 1399 was broughy by commit 862b02f8947039e + + Closes #1478 + +Dan Fandrich (9 May 2017) +- tests: make test file names more unique + + Include the test number in the names of files written out by tests to + reduce the chance of accidental duplication and to make it more clear + which test is associated with which file. + +- tests: removed redundant --trace-ascii arguments + + This is already added by the test suite; it's not clear why all these + tests had it, unless it's cargo-culting. + +Marcel Raad (9 May 2017) +- tool: fix remaining -Wcast-qual warnings + + Avoid casting away low-level const. + +Daniel Stenberg (9 May 2017) +- formboundary: convert assert into run-time check + + ... to really make sure the boundary fits in the target buffer. + + Fixes unused parameter 'buflen' warning. + + Reported-by: Michael Kaufmann + Bug: https://github.com/curl/curl/pull/1468#issuecomment-300078754 + +Dan Fandrich (9 May 2017) +- tests: list the primary server first in the server section + +Daniel Stenberg (8 May 2017) +- curl: generate the --help output + + ... using the docs/cmdline-opts/gen.pl script, so that we get all the + command line option documentation from the same source. + + The generation of the list has to be done manually and pasted into the + source code. + + Closes #1465 + +- tests: updated for modified fake random + +- [Jay Satiro brought this change] + + rand: treat fake entropy the same regardless of endianness + + When the random seed is purposely made predictable for testing purposes + by using the CURL_ENTROPY environment variable, process that data in an + endian agnostic way so the the initial random seed is the same + regardless of endianness. + + - Change Curl_rand to write to a char array instead of int array. + + - Add Curl_rand_hex to write random hex characters to a buffer. + + Fixes #1315 + Closes #1468 + + Co-authored-by: Daniel Stenberg + Reported-by: Michael Kaufmann + +Dan Fandrich (8 May 2017) +- tests: give each stunnel.conf file a unique name + + Otherwise, subsequent uses of stunnel overwrite the configuration file + of previous invocations so they can no longer be inspected. + +Marcel Raad (8 May 2017) +- tool_msgs: remove wrong cast + + Commit 481e0de00a9003b9c5220b120e3fc302d9b0932d changed the variable + type from int to size_t, so don't cast the result of strlen to int + anymore. + +- tftpd: fix signed/unsigned mismatch warnings + + alarm's argument is unsigned. + +- libtest: fix MinGW-w64 warnings + + long is 32 bits while size_t is 64 bits on MinGW-w64, so + typecheck-gcc.h complains when using size_t for a long option. + Also, curl_socket_t is unsigned long long rather than int. + +Daniel Stenberg (8 May 2017) +- curl.1: depend the build on the Makefile.inc too + + ... to also make it update when we remove files, like we did for + --environment in commit a8e388dd1095. + +- RELEASE-NOTES: synced with e3f84efc32d6b01a + +- runtests: fix "use of undefined value" warning in -R handling + +Marcel Raad (8 May 2017) +- test537: use correct variable type + + Avoids narrowing conversion warnings because rlim_t is usually + unsigned long. + + Closes https://github.com/curl/curl/pull/1469 + +- sendrecv: fix MinGW-w64 warning + + The first argument to select is an int, while curl_socket_t is + unsigned long long when using WinSock. It's ignored anyway [1]. + + [1] https://msdn.microsoft.com/en-us/library/windows/desktop/ms740141.aspx + +- tool_parsecfg: fix -Wcast-qual warning + + Don't convert string literal to char * before assigning it to + const char *. + +- asyn-thread: fix unused macro warnings + + Don't do anything in this file if CURLRES_THREADED is not defined. + +- tftp: silence bad-function-cast warning + + The cases this warns about are handled elsewhere, so just use an + intermediate variable to silence the warning. + +Daniel Stenberg (7 May 2017) +- [canavan at github brought this change] + + buildconf: fix hang on IRIX + + Apparently, /usr/bin/m4 ignores the --version parameter and waits for + input from stdin. + + Fixes #1471 + +- opts: fix bad example formatting \n => \\n + + ...to render properly nroff. + +- opts: examples added to 8 more libcurl option man pages + +- curl: remove tool_writeenv.[ch] + + ... and USE_ENVIRONMENT and --environment. It was once added for RISC OS + support and its platform specific behavior has been annoying ever + since. Added in commit c3c8bbd3b2688da8e, mostly unchanged since + then. Most probably not actually used for years. + + Closes #1463 + +Dan Fandrich (6 May 2017) +- runtests.pl: simplify the datacheck read section + + Also, document that numbered datacheck sections are possible. + +Marcel Raad (5 May 2017) +- tests: fix -Wcast-qual warnings + + Avoid casting string literals to non-const char *. + +Daniel Stenberg (5 May 2017) +- docs/opts: 24 more man pages now have examples + +- docs/opts: 23 more man pages now have examples + +- tests/server: run checksrc by default in debug-builds + +- curl_slist_append.3: clarify a NULL input creates a new list + +Marcel Raad (5 May 2017) +- unit1305: fix compiler warning + + calloc and ai_addrlen expect different (usually unsigned) types. + +Daniel Stenberg (5 May 2017) +- runtests: use -R for random order + + Suggested-by: Dan Fandrich + +- runtests: add -o to run test cases in scrambled order + + ... instead of numerical order. + + Closes #1466 + +Dan Fandrich (4 May 2017) +- sockfilt.c: shortened too long line + +Marcel Raad (4 May 2017) +- tests/server: make string literals const + + assign string literals to const char * instead of char * in order to + avoid a lot of these warnings: + cast from 'const char *' to 'char *' drops const qualifier + [-Wcast-qual] + +Dan Fandrich (4 May 2017) +- schannel: return a more specific error code for SEC_E_UNTRUSTED_ROOT + +- test557: set a known good numeric locale + + Windows does not allow setting the locale with environment variables (as + the test attempted to do), so the test failed when run with a user + locale that has a comma as radixchar. Changed the test to call + setlocale() explicitly to ensure that a known working locale is set even + on Windows. + +Daniel Stenberg (4 May 2017) +- curl: fix warning "comma at end of enumerator list" + +- test559: verify use of minimum CURLOPT_BUFFERSIZE + +Marcel Raad (4 May 2017) +- curl_setup_once: use SEND_QUAL_ARG2 for swrite + + SEND_QUAL_ARG2 had to be set, but was never used. Use it in swrite to + avoid warnings about casting away low-level const. + + Closes https://github.com/curl/curl/pull/1464 + +Daniel Stenberg (4 May 2017) +- CURLINFO_REDIRECT_URL.3: add example + +- CURLINFO_EFFECTIVE_URL.3: add example + +Marcel Raad (3 May 2017) +- lib: fix compiler warnings + + Fix the following warnings when building the tests by using the correct + types: + cast from 'const char *' to 'void *' drops const qualifier + [-Wcast-qual] + implicit conversion changes signedness [-Wsign-conversion] + +- typecheck-gcc: add support for CURLINFO_SOCKET + + Closes https://github.com/curl/curl/pull/1452 + +- typecheck-gcc: add missing string options + + Closes https://github.com/curl/curl/pull/1452 + +Daniel Stenberg (3 May 2017) +- abstract-unix-socket.d: shorten the help text to fit within 79 cols + +- RELEASE-NOTES: synced with 862b02f89 + +- [Richard Hsu brought this change] + + Telnet: Write full buffer instead of byte-by-byte + + Previous TODO wanting to write in chunks. We should support writing more + at once since some TELNET servers may respond immediately upon first + byte written such as WHOIS servers. + + Closes #1389 + +- curl: non-boolean command line args reject --no- prefixes + + ... and instead properly respond with an error message to the user + instead of silently ignoring. + + Fixes #1453 + Closes #1458 + +Marcel Raad (2 May 2017) +- testpart: remove _MPRINTF_REPLACE + + Support for _MPRINTF_REPLACE in mprintf.h was removed in + 55452ebdff47f98bf3cc383f1dfc3623fcaefefd, replaced with curl_printf.h. + +Dan Fandrich (2 May 2017) +- gtls: fixed a lingering BUFSIZE reference + +Daniel Stenberg (2 May 2017) +- ssh: fix compiler warning from e40e9d7f0de + +- url: let CURLOPT_BUFFERSIZE realloc to smaller sizes too + + Closes #1449 + +- BUFSIZE: rename to READBUFFER_*, make separate MASTERBUF_SIZE + +- openssl: use local stack for temp storage + +- sendf: remove use of BUFSIZE from debug data conversions + + The buffer can have other sizes. + +- buffer: use data->set.buffer_size instead of BUFSIZE + + ... to properly use the dynamically set buffer size! + +- krb5: use private buffer for temp string, not receive buffer + +- upload: UPLOAD_BUFSIZE is now for the upload buffer + +- unit1606: do not print/access buffer + + It was a wrong assumption that it could do that! + +- http-proxy: use a dedicated CONNECT response buffer + + To make it suitably independent of the receive buffer and its flexible + size. + +- transfer: fix minor buffer_size mistake + +- failf: use private buffer, don't clobber receive buffer + +- pingpong: use the set buffer size + +- http2: use the correct set buffer size + +- http: don't clobber the receive buffer for timecond + +- buffer_size: make sure it always has the correct size + + Removes the need for CURL_BUFSIZE + +- file: use private buffer for C-L output + + ... instead of clobbering the download buffer. + +- CURLOPT_BUFFERSIZE: 1024 bytes is now the minimum size + + The buffer is needed to receive FTP, HTTP CONNECT responses etc so + already at this size things risk breaking and smaller is certainly not + wise. + +- ftp: use private buffer for temp storage, not receive buffer + +- http: use private user:password output buffer + + Don't clobber the receive buffer. + +Marcel Raad (1 May 2017) +- anyauthput: remove unused code + + The definition of TRUE was introduced in + 4a728747e6f8845e500910e397dfc99aaf4a7984 and is not used anymore since + e664cd5826d43930fcc5b5dbaedbec94af33184b. + The usage of intptr_t was removed in + 32e38b8f42477cf5ce3c3fef2fcc9db82f7fb7be. + +Jay Satiro (1 May 2017) +- tool: Fix missing prototype warnings for CURL_DOES_CONVERSIONS + + - Include tool_convert.h where needed. + + Bug: https://github.com/curl/curl/issues/1460 + Reported-by: Gisle Vanem + +- curl_setup: Ensure no more than one IDN lib is enabled + + Prior to this change it was possible for libcurl to be built with both + Windows' native IDN lib (normaliz) and libidn2 enabled. It appears that + doesn't offer any benefit --and could cause a bug-- since libcurl's IDN + handling is written to use either one but not both. + + Bug: https://github.com/curl/curl/issues/1441#issuecomment-297689856 + Reported-by: Gisle Vanem + +Marcel Raad (1 May 2017) +- getpart: use correct variable type + + This fixes the following clang warning: + getpart.c:201:17: warning: cast from function call of type 'CURLcode' + to non-matching type 'int' [-Wbad-function-cast] + +- tests: declare TU-local variables static + + This fixes missing-variable-declarations warnings when building with + clang. + +- tool_cb_prg: fix double-promotion warning + + clang complains: + tool_cb_prg.c:86:22: error: implicit conversion increases + floating-point precision: 'float' to 'double' + [-Werror,-Wdouble-promotion] + + Fix this by using a double instead of a float constant. + +Dan Fandrich (1 May 2017) +- examples: fixed too long line and too long string warnings + +Marcel Raad (30 Apr 2017) +- examples: declare TU-local variables static + + This fixes missing-variable-declarations warnings when building with + clang. + +- http2: declare TU-local variables static + + This fixes the following clang warnings: + + http2.c:184:27: error: no previous extern declaration for non-static + variable 'Curl_handler_http2' [-Werror,-Wmissing-variable-declarations] + http2.c:204:27: error: no previous extern declaration for non-static + variable 'Curl_handler_http2_ssl' + [-Werror,-Wmissing-variable-declarations] + +Dan Fandrich (30 Apr 2017) +- unit1604: fixed indentation + +- unit1604: fixed compilation under Windows, broken in the previous commit + +- tests: fixed OOM handling of unit tests to abort test + + It's dangerous to continue to run the test when a memory alloc fails. + +Marcel Raad (29 Apr 2017) +- curl_rtmp: fix missing-variable-declarations warnings + + clang complains: + + curl_rtmp.c:61:27: error: no previous extern declaration for non-static variable 'Curl_handler_rtmp' [-Werror,-Wmissing-variable-declarations] + curl_rtmp.c:81:27: error: no previous extern declaration for non-static variable 'Curl_handler_rtmpt' [-Werror,-Wmissing-variable-declarations] + curl_rtmp.c:101:27: error: no previous extern declaration for non-static variable 'Curl_handler_rtmpe' [-Werror,-Wmissing-variable-declarations] + curl_rtmp.c:121:27: error: no previous extern declaration for non-static variable 'Curl_handler_rtmpte' [-Werror,-Wmissing-variable-declarations] + curl_rtmp.c:141:27: error: no previous extern declaration for non-static variable 'Curl_handler_rtmps' [-Werror,-Wmissing-variable-declarations] + curl_rtmp.c:161:27: error: no previous extern declaration for non-static variable 'Curl_handler_rtmpts' [-Werror,-Wmissing-variable-declarations] + + Fix this by including the header file. + +Dan Fandrich (29 Apr 2017) +- url: fixed a memory leak on OOM while setting CURLOPT_BUFFERSIZE + +- tests: added --remote-time tests for remaining protocols that support it + +- runtests.pl: support multiline commands + +- tool_operate: use utimes instead of obsolescent utime when available + +- test1443: test --remote-time + +- http-proxy: removed unused argument in CURL_DISABLE_PROXY case + + Missed in commit 55c3c02e + +Daniel Stenberg (27 Apr 2017) +- cookie_interface.c: changed the other domain to example.com too + +- cookie_interface.c: fix cookie domain so the example works + +Dan Fandrich (26 Apr 2017) +- Makefile: fix make dist + + Commit 80a87e8a broke 'make dist' as it can't handle installing from + absolute target names. Rearranged the dependencies so the absolute name + is used for building but the relative name is use for distributing. + +Marcel Raad (26 Apr 2017) +- lib: remove unused code + + This fixes the following clang warnings: + macro is not used [-Wunused-macros] + will never be executed [-Wunreachable-code] + + Closes https://github.com/curl/curl/pull/1448 + +Daniel Stenberg (26 Apr 2017) +- http-proxy: remove unused argument from Curl_proxyCONNECT() + +- [Martin Kepplinger brought this change] + + url: declare get_protocol_family() static + + get_protocol_family() is not defined static even though there is a + static local forward declaration. Let's simply make the definition match + it's declaration. + + Bug: https://curl.haxx.se/mail/lib-2017-04/0127.html + +- examples: ftpuploadfrommem.c + + Uploads data to an FTP site, directly from memory. + + Closes #1451 + +Kamil Dudka (25 Apr 2017) +- nss: load libnssckbi.so if no other trust is specified + + The module contains a more comprehensive set of trust information than + supported by nss-pem, because libnssckbi.so also includes information + about distrusted certificates. + + Reviewed-by: Kai Engert + Closes #1414 + +- nss: factorize out nss_{un,}load_module to separate fncs + + No change of behavior is intended by this commit. + +- nss: do not leak PKCS #11 slot while loading a key + + It could prevent nss-pem from being unloaded later on. + + Bug: https://bugzilla.redhat.com/1444860 + +Marcel Raad (25 Apr 2017) +- typecheck-gcc: fix _curl_is_slist_info + + Info values starting with CURLINFO_SOCKET expect a curl_socket_t, not a + curl_slist argument. + + This fixes the following GCC warning when building the examples with + --enable-optimize: + + ../../include/curl/typecheck-gcc.h:126:42: warning: call to + ‘_curl_easy_getinfo_err_curl_slist’ declared with attribute warning: + curl_easy_getinfo expects a pointer to 'struct curl_slist *' for this + info [enabled by default] + sendrecv.c:90:11: note: in expansion of macro ‘curl_easy_getinfo’ + res = curl_easy_getinfo(curl, CURLINFO_ACTIVESOCKET, &sockfd); + + Closes https://github.com/curl/curl/pull/1447 + +Daniel Stenberg (25 Apr 2017) +- curl: set a 100K buffer size by default + + Test command 'time curl http://localhost/80GB -so /dev/null' on a Debian + Linux. + + Before (middle performing run out 9): + + real 0m28.078s + user 0m11.240s + sys 0m12.876s + + After (middle performing run out 9) + + real 0m26.356s (93.9%) + user 0m5.324s (47.4%) + sys 0m8.368s (65.0%) + + Also, doing SFTP over a 200 millsecond latency link is now about 6 times + faster. + + Closes #1446 + +- transfer: remove 'uploadbuf' pointer and cleanup readwrite_upload() + + The data->req.uploadbuf struct member served no good purpose, instead we + use ->state.uploadbuffer directly. It makes it clearer in the code which + buffer that's being used. + + Removed the 'SingleRequest *' argument from the readwrite_upload() proto + as it can be derived from the Curl_easy struct. Also made the code in + the readwrite_upload() function use the 'k->' shortcut to all references + to struct fields in 'data->req', which previously was made with a mix of + both. + +Jay Satiro (25 Apr 2017) +- configure: stop prepending to LDFLAGS, CPPFLAGS + + - Change prepends to appends because user's LDFLAGS and CPPFLAGS should + always come first so they're searched before ours. + + Bug: https://github.com/curl/curl/issues/1420 + Reported-by: Helmut K. C. Tessarek + +Marcel Raad (25 Apr 2017) +- if2ip: fix -Wcast-align warning + + Follow-up to 119037325de02579f5c58256ca2ed2a0aa592c86, which fixed the + warning in the HAVE_GETIFADDRS block, but not in the + HAVE_IOCTL_SIOCGIFADDR block. + +Dan Fandrich (24 Apr 2017) +- Makefile: avoid use of GNU-specific form of $< + + $< is only allowed in implicit rules in some non-GNU makes (e.g. BSD, + AIX) so avoid use elsewhere by referencing the dependent curl.1 file + directly instead. This is somewhat tricky because the file is supplied + in the packaged tar ball (but not in git) but must still be able to be + rebuilt when its dependencies change. The right thing must happen in + both tar ball and git source trees, as well as in both in-tree and + out-of-tree builds. + +Kamil Dudka (24 Apr 2017) +- nss: adapt to the new Curl_llist API + + This commit fixes compilation failure caused by + cbae73e1dd95946597ea74ccb580c30f78e3fa73. + +Marcel Raad (24 Apr 2017) +- curl-compilers.m4: accept -Og and -Ofast GCC flags + + -Og, introduced in GCC 4.8, optimizes for debugging experience. + -Ofast, introduced in GCC 4.7, builds on -O3 and enables further + optimizations breaking strict standards compliance. + When specified in CFLAGS, these were always overridden by -O0 or -O2. + Fix this by adding them to flags_opt_all. + + Ref: https://gcc.gnu.org/onlinedocs/gcc-4.8.0/gcc/Optimize-Options.html + Ref: https://github.com/curl/curl/pull/1404#issuecomment-296401570 + Closes https://github.com/curl/curl/pull/1440 + +Daniel Stenberg (24 Apr 2017) +- RELEASE-NOTES: synced with c68fed875 + +- configure: fix the -ldl check for openssl, add -lpthread check + + The check for if -ldl is needed to build with (a statically built) + openssl was broken. This repairs the check, and adds a check for + -lpthread as well since OpenSSL 1.1.0+ does in fact require -lpthread so + only adding -ldl for a static openssl build is no longer enough. + + Reported-by: Jay Satiro + Ref: #1426 + Closes #1427 + +- llist: fix a comment after cbae73e1dd9 + + Pointed-it-by: Kevin Ji + URL: https://github.com/curl/curl/commit/cbae73e1dd95946597ea74ccb580c30f78e3fa73#commitcomment-21872622 + +Jay Satiro (22 Apr 2017) +- schannel: Don't treat encrypted partial record as pending data + + - Track when the cached encrypted data contains only a partial record + that can't be decrypted without more data (SEC_E_INCOMPLETE_MESSAGE). + + - Change Curl_schannel_data_pending to return false in such a case. + + Other SSL libraries have pending data functions that behave similarly. + + Ref: https://github.com/curl/curl/pull/1387 + + Closes https://github.com/curl/curl/pull/1392 + +Daniel Stenberg (22 Apr 2017) +- [Alan Jenkins brought this change] + + multi: clarify condition in curl_multi_wait + + `if(nfds || extra_nfds) {` is followed by `malloc(nfds * ...)`. + + If `extra_fs` could be non-zero when `nfds` was zero, then we have + `malloc(0)` which is allowed to return `NULL`. But, malloc returning + NULL can be confusing. In this code, the next line would treat the NULL + as an allocation failure. + + It turns out, if `nfds` is zero then `extra_nfds` must also be zero. + The final value of `nfds` includes `extra_nfds`. So the test for + `extra_nfds` is redundant. It can only confuse the reader. + + Closes #1439 + +Marcel Raad (22 Apr 2017) +- lib: fix maybe-uninitialized warnings + + With -Og, GCC complains: + + easy.c:628:7: error: ‘mcode’ may be used uninitialized in this function [-Werror=maybe-uninitialized] + + ../lib/strcase.h:35:29: error: ‘tok_buf’ may be used uninitialized in this function [-Werror=maybe-uninitialized] + vauth/digest.c:208:9: note: ‘tok_buf’ was declared here + + ../lib/strcase.h:35:29: error: ‘tok_buf’ may be used uninitialized in this function [-Werror=maybe-uninitialized] + vauth/digest.c:566:15: note: ‘tok_buf’ was declared here + + Fix this by initializing the variables. + +Dan Fandrich (22 Apr 2017) +- gnutls: removed some code when --disable-verbose is configured + + This reduces the binary size and fixes a compile warning. + +Daniel Stenberg (22 Apr 2017) +- llist: no longer uses malloc + + The 'list element' struct now has to be within the data that is being + added to the list. Removes 16.6% (tiny) mallocs from a simple HTTP + transfer. (96 => 80) + + Also removed return codes since the llist functions can't fail now. + + Test 1300 updated accordingly. + + Closes #1435 + +Marcel Raad (21 Apr 2017) +- typecheck-gcc: handle function pointers properly + + All the callbacks passed to curl_easy_setopt are defined as function + pointers. The possibility to pass both functions and function pointers + was handled for the callbacks that typecheck-gcc.h defined as + compatible, but not for the public callback types themselves. + + This makes all compatible callback types defined in typecheck-gcc.h + function pointers too and checks all functions uniformly with + _curl_callback_compatible, which handles both functions and function + pointers. + + A symptom of the problem was a warning in tool_operate.c with + --disable-libcurl-option and without --enable-debug as that file + passes the callback functions to curl_easy_setopt directly. + + Fixes https://github.com/curl/curl/issues/1403 + Closes https://github.com/curl/curl/pull/1404 + +Dan Fandrich (21 Apr 2017) +- mbedtls: enable NTLM (& SMB) even if MD4 support is unavailable + + In that case, use libcurl's internal MD4 routine. This fixes tests 1013 + and 1014 which were failing due to configure assuming NTLM and SMB were + always available whenever mbed TLS was in use (which is now true). + +Daniel Stenberg (21 Apr 2017) +- tests: remove the html and PDF versions from the tarball + +- openssl: fix memory leak in servercert + + ... when failing to get the server certificate. + +- Revert "src/Makefile.am: avoid explicit $<" + + This reverts commit 5b4cbcf11d5100ff793a8e9edbaa6fe1fc7495f5. + + Since it broke out-of-tree builds from tarballs. See discussion in #1432 + +- bump: start working on next release + +- src/Makefile.am: avoid explicit $< + + ... since apparently "BSD make" doesn't support it. + + Reported-by: Thomas Klausner + Fixes #1432 + +Version 7.54.0 (19 Apr 2017) + +Daniel Stenberg (19 Apr 2017) +- THANKS: add contributors from 7.54.0 release notes + +- RELEASE-NOTES: curl 7.54.0 + +Marcel Raad (18 Apr 2017) +- nss: fix MinGW compiler warnings + + This fixes 3 warnings issued by MinGW: + 1. PR_ImportTCPSocket actually has a paramter of type PROsfd instead of + PRInt32, which is 64 bits on Windows. Fixed this by including the + corresponding header file instead of redeclaring the function, which is + supported even though it is in the private include folder. [1] + 2. In 64-bit mode, size_t is 64 bits while CK_ULONG is 32 bits, so an explicit + narrowing cast is needed. + 3. Curl_timeleft returns time_t instead of long since commit + 21aa32d30dbf319f2d336e0cb68d3a3235869fbb. + + [1] https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_ImportTCPSocket + + Closes https://github.com/curl/curl/pull/1393 + +Daniel Stenberg (18 Apr 2017) +- [Jay Satiro brought this change] + + TLS: Fix switching off SSL session id when client cert is used + + Move the sessionid flag to ssl_primary_config so that ssl and proxy_ssl + will each have their own sessionid flag. + + Regression since HTTPS-Proxy support was added in cb4e2be. Prior to that + this issue had been fixed in 247d890, CVE-2016-5419. + + Bug: https://github.com/curl/curl/issues/1341 + Reported-by: lijian996@users.noreply.github.com + + The new incarnation of this bug is called CVE-2017-7468 and is documented + here: https://curl.haxx.se/docs/adv_20170419.html + +- [David Benjamin brought this change] + + openssl: don't try to print nonexistant peer private keys + + X.509 certificates carry public keys, not private keys. Fields + corresponding to the private half of the key will always be NULL. + + Closes #1425 + +- [David Benjamin brought this change] + + openssl: fix thread-safety bugs in error-handling + + ERR_error_string with NULL parameter is not thread-safe. The library + writes the string into some static buffer. Two threads doing this at + once may clobber each other and run into problems. Switch to + ERR_error_string_n which avoids this problem and is explicitly + bounds-checked. + + Also clean up some remnants of OpenSSL 0.9.5 around here. A number of + comments (fixed buffer size, explaining that ERR_error_string_n was + added in a particular version) date to when ossl_strerror tried to + support pre-ERR_error_string_n OpenSSLs. + + Closes #1424 + +- [David Benjamin brought this change] + + openssl: make SSL_ERROR_to_str more future-proof + + Rather than making assumptions about the values, use a switch-case. + + Closes #1424 + +- [Daniel Gustafsson brought this change] + + code: fix typos and style in comments + + A few random typos, and minor whitespace cleanups, found in comments + while reading code. + + Closes #1423 + +Marcel Raad (17 Apr 2017) +- extern-scan.pl: strip trailing CR + + This makes test 1135 pass with CRLF checkouts. + + Ref: https://github.com/curl/curl/pull/1344#issuecomment-289243166 + Closes https://github.com/curl/curl/pull/1422 + +- configure.ac: ignore CR after version numbers + + Ignore everything after the version numbers in LIBCURL_VERSION and + LIBCURL_VERSION_NUM to ged rid of the extra CR character. + This makes tests 1022 and 1023 pass on Linux with a CRLF checkout. + + Ref: https://github.com/curl/curl/pull/1344#issuecomment-289243166 + Closes https://github.com/curl/curl/pull/1422 + +- .gitattributes: force shell scripts to LF + + Bash on Linux errors out on CR characters. + This makes tests 1221 and 1222 pass on Linux with a CRLF checkout. + + Ref: https://github.com/curl/curl/pull/1344#issuecomment-289243166 + Closes https://github.com/curl/curl/pull/1422 + +- unit1303: fix compiler warning + + MinGW-w64 complains: + warning: conversion to 'long int' from 'time_t {aka long long int}' may + alter its value [-Wconversion] + Fix this by using the correct type. + +Daniel Stenberg (16 Apr 2017) +- RELEASE-NOTES: synced with 1451271e0 + +- [Larry Stefani brought this change] + + http2: fix handle leak in error path + + Add missing newhandle free call in push_promise(). + + Closes #1416 + +- [Larry Stefani brought this change] + + mbedtls: fix memory leak in error path + + Add missing our_ssl_sessionid free call in mbed_connect_step3(). + + Closes #1417 + +Marcel Raad (15 Apr 2017) +- curl-compilers.m4: turn implicit function declarations into errors + + This adds -Werror-implicit-function-declaration for GCC 2.95+ so that + these errors are visible at the point where they occur instead of only + at link time. + Implicit function declarations are illegal in C99 and C++ anyway, and + the same warning has been turned into an error for ICC in commit + 3072c5b8a127057aa922b7c51051bbb4a630b091. + + Ref: https://gcc.gnu.org/onlinedocs/gcc-2.95.2/gcc_2.html#SEC8 + Ref: https://curl.haxx.se/mail/lib-2017-04/0001.html + Closes https://github.com/curl/curl/pull/1419 + +- test1541: also test for CURL_PULL_WS2TCPIP_H + + Ref: https://github.com/curl/curl/issues/1408 + Closes https://github.com/curl/curl/pull/1412 + +- tests/server/util: prefer over + + Follow-up to aa573c3c55cda72ec5ef677d87f6f46a53385f0c + + Ref: https://github.com/curl/curl/pull/1406 + +Daniel Stenberg (11 Apr 2017) +- Curl_expire_latest: ignore already expired timers + + If the existing timer is still in there but has expired, the new timer + should be added. + + Reported-by: Rainer Canavan + Bug: https://curl.haxx.se/mail/lib-2017-04/0030.html + Closes #1407 + +- system.h: fix mingw section + + Reported-by: Marcel Raad + Fixes #1408 + Closes #1409 + +Marcel Raad (11 Apr 2017) +- polarssl: unbreak build with versions < 1.3.8 + + ssl_session_init was only introduced in version 1.3.8, the penultimate + version. The function only contains a memset, so replace it with that. + + Suggested-by: Jay Satiro + Fixes https://github.com/curl/curl/issues/1401 + +- poll: prefer over + + The POSIX standard location is . Using results in + warning spam when using the musl standard library. + + Closes https://github.com/curl/curl/pull/1406 + +Daniel Stenberg (10 Apr 2017) +- [Alexis La Goutte brought this change] + + openssl: fix this statement may fall through [-Wimplicit-fallthrough=] + + Closes #1402 + +Kamil Dudka (10 Apr 2017) +- nss: load CA certificates even with --insecure + + ... because they may include an intermediate certificate for a client + certificate and the intermediate certificate needs to be presented to + the server, no matter if we verify the peer or not. + + Reported-by: thraidh + Closes #851 + +Daniel Stenberg (10 Apr 2017) +- RELEASE-NOTES: synced with f9d1e9a27f7e1 + +Dan Fandrich (10 Apr 2017) +- libcurl-thread.3: fixed a bad macro that caused test 1140 to fail + +Daniel Stenberg (9 Apr 2017) +- libcurl-thread.3: also mention threaded-resolver + + Reported-by: Alex Bligh + Bug: https://curl.haxx.se/mail/lib-2017-04/0044.html + +- .github/stale.yml: enable the stale bot + + Issues and PRs with no activity for 180 days will get marked as stale, + and if no further activity happens within 14 more days, the issue gets + closed. + + This follows our established policy of not letting stalled bugs "get in + the way": https://curl.haxx.se/docs/bugs.html#Closing_off_stalled_bugs + + Closes #1398 + +Jay Satiro (8 Apr 2017) +- CURLINFO_SCHEME.3: fix variable type + + - Change documented param type to char ** from incorrect long *. + +Marcel Raad (8 Apr 2017) +- INSTALL.md: fix secure transport configure arguments + + --without-ssl is needed instead of --with-winssl. + +- vtls: fix unreferenced variable warnings + + ... by moving the variables into the correct #ifdef block. + +Daniel Stenberg (7 Apr 2017) +- BUGS: "Bugs in old versions" + +- system.h: add section for tcc + + Closes #1397 + +Marcel Raad (7 Apr 2017) +- schannel: fix compiler warnings + + When UNICODE is not defined, the Curl_convert_UTF8_to_tchar macro maps + directly to its argument. As it is declared as a pointer to const and + InitializeSecurityContext expects a pointer to non-const, both MSVC and MinGW + issue a warning about implicitly casting away the const. Fix this by declaring + the variables as pointers to non-const. + + Closes https://github.com/curl/curl/pull/1394 + +- [Isaac Boukris brought this change] + + sspi: print out InitializeSecurityContext() error message + + Reported-by: Carsten (talksinmath) + + Fixes #1384 + Closes #1395 + +- gtls: fix compiler warning + + Curl_timeleft returns time_t instead of long since commit + 21aa32d30dbf319f2d336e0cb68d3a3235869fbb. + +Daniel Stenberg (6 Apr 2017) +- test1606: verify speedcheck + +- low_speed_limit: improved function for longer time periods + + Previously, periods of fast speed between periods of slow speed would + not count and could still erroneously trigger a timeout. + + Reported-by: Paul Harris + Fixes #1345 + Closes #1390 + +- system.h: set sizeof long to 4 on "default 32 bit" systems + + Triggered a test failure on test 1541 for the build known as + "Linux 4.4 i686 tcc 0.9.26 glibc 2.20" + +Marcel Raad (6 Apr 2017) +- nss: fix build after e60fe20fdf94e829ba5fce33f7a9d6c281149f7d + + Curl_llist_alloc is now Curl_llist_init. + + Closes https://github.com/curl/curl/pull/1391 + +Daniel Stenberg (6 Apr 2017) +- INSTALL.cmake: more problems + + and mention specific issues where they are discussed + +- test1541: ignore the curl_off_t variable type name comparison + + ... the sizes and the formatting strings are what's really important and + avoids problems with int64_t vs "long long". + + Bug: https://curl.haxx.se/mail/lib-2017-04/0019.html + +- Revert "configure: prefer 'long long' to int64_t for curl_off_t" + + This reverts commit 81284374bf3c670d2050f8562edeb69f060b07cc. + + Due to mingw32 brekage. + +Marcel Raad (5 Apr 2017) +- tool_operate: fix MinGW compiler warning + + MinGW complains: + tool_operate.c:197:15: error: comparison is always true due to limited range + of data type [-Werror=type-limits] + + Fix this by only doing the comparison if 'long' is large enough to hold the + constant it is compared with. + + Closes https://github.com/curl/curl/pull/1378 + +- tool_operate: move filetime code to its own function + + Ref: https://github.com/curl/curl/pull/1378 + +Daniel Stenberg (5 Apr 2017) +- configure: prefer 'long long' to int64_t for curl_off_t + + Since it is a native type and it makes it less complicated to find a + matching one in system.h + + Bug: https://curl.haxx.se/mail/lib-2017-04/0010.html + Reported-by: Dan Fandrich + + Closes #1388 + +- [Dániel Bakai brought this change] + + tests: added test for Curl_splaygetbest to unit1309 + + This checks the new behavior of Curl_splaygetbest, so that the smallest + node not larger than the key is removed, and FIFO behavior is kept even + when there are multiple nodes with the same key. + + Closes #1358 + +- [Dániel Bakai brought this change] + + multi: fix queueing of pending easy handles + + Multi handles repeatedly invert the queue of pending easy handles when + used with CURLMOPT_MAX_TOTAL_CONNECTIONS. This is caused by a multistep + process involving Curl_splaygetbest and violates the FIFO property of + the multi handle. + This patch fixes this issue by redefining the "best" node in the + context of timeouts as the "smallest not larger than now", and + implementing the necessary data structure modifications to do this + effectively, namely: + - splay nodes with the same key are now stored in a doubly-linked + circular list instead of a non-circular one to enable O(1) + insertion to the tail of the list + - Curl_splayinsert inserts nodes with the same key to the tail of + the same list + - in case of multiple nodes with the same key, the one on the head of + the list gets selected + +Marcel Raad (4 Apr 2017) +- tool: fix Windows Unicode build + + ... by explicitly calling the ANSI versions of Windows API functions where + required. + +Daniel Stenberg (4 Apr 2017) +- [Martin Kepplinger brought this change] + + curl_sasl: declare mechtable static + + struct mechtable is only used locally here. It can be declared static. + +Jay Satiro (4 Apr 2017) +- [Antti Hätälä brought this change] + + url: don't free postponed data on connection reuse + + - Don't free postponed data on a connection that will be reused since + doing so can cause data loss when pipelining. + + Only Windows builds are affected by this. + + Closes https://github.com/curl/curl/issues/1380 + +Daniel Stenberg (4 Apr 2017) +- RELEASE-NOTES: synced with 4f2e348f9b42c69c480 + +- hash: move key into hash struct to reduce mallocs + + This removes one tiny malloc for each hash struct allocated. In a simple + case like "curl localhost", this save three mallocs. + + Closes #1376 + +- llist: replace Curl_llist_alloc with Curl_llist_init + + No longer allocate the curl_llist head struct for lists separately. + + Removes 17 (15%) tiny allocations in a normal "curl localhost" invoke. + + closes #1381 + +Jay Satiro (4 Apr 2017) +- easy: silence compiler warning + + Safe to silence warning adding time delta of poll, which can trigger on + Windows since sizeof time_t > sizeof long. + + warning C4244: '+=' : conversion from 'time_t' to 'long', possible loss + of data + +Daniel Stenberg (4 Apr 2017) +- [Richlv brought this change] + + docs: minor typo in write-out.d + + Closes #1382 + +- include: curl/system.h is a run-time version of curlbuild.h + + system.h is aimed to replace curlbuild.h at a later point in time when + we feel confident system.h works sufficiently well. + + curl/system.h is currently used in parallel with curl/curlbuild.h + + curl/system.h determines a data sizes, data types and include file + status based on available preprocessor defines instead of getting + generated at build-time. This, in order to avoid relying on a build-time + generated file that makes it complicated to do 32 and 64 bit bields from + the same installed set of headers. + + Test 1541 verifies that system.h comes to the same conclusion that + curlbuild.h offers. + + Closes #1373 + +- multi: make curl_multi_wait avoid malloc in the typical case + + When only a few additional file descriptors are used, avoid the malloc. + + Closes #1377 + +Marcel Raad (3 Apr 2017) +- tests/server/util: remove in6addr_any for recent MinGW + + In ancient MinGW versions, in6addr_any was declared as extern, but not + defined. Because of that, 22a0c57746ae12506b1ba0f0fafffd26c1907d6a added + definitions for in6addr_any when compiling with MinGW. The bug was fixed in + w32api version 3.6 from 2006, so this workaround is not needed anymore for + recent versions. + + This fixes the following MinGW-w64 warnings because the MinGW-w64 version of + IN6ADDR_ANY_INIT has the two additional braces inside the macro: + util.c:59:14: warning: braces around scalar initializer + util.c:59:40: warning: excess elements in scalar initializer + + Ref: https://sourceforge.net/p/mingw/mingw-org-wsl/ci/e4803e0da25c57ae1ad0fa75ae2b7182ff7fa339/tree/w32api/ChangeLog + Closes https://github.com/curl/curl/pull/1379 + +Daniel Stenberg (3 Apr 2017) +- docs: added examples for CURLINFO_FILETIME.3 and CURLOPT_FILETIME.3 + +Jay Satiro (31 Mar 2017) +- fail-early.d: fix typos + +- docs: Explain --fail-early does not imply --fail + + Closes https://github.com/curl/curl/pull/1375 + +Daniel Stenberg (1 Apr 2017) +- telnet: (win32) fix read callback return variable + + telnet.c(1427,21): warning: comparison of constant 268435456 with + expression of type 'CURLcode' is always false + + telnet.c(1433,21): warning: comparison of constant 268435457 with + expression of type 'CURLcode' is always false + + Reviewed-by: Jay Satiro + Reported-by: Gisle Vanem + Bug: https://github.com/curl/curl/issues/1225#issuecomment-290340890 + + Closes #1374 + +- CTestConfig.cmake: removed, unused + +- libcurl.def: removed, unused + +- docs/index.html: removed, was not shipped anyway + +- dist: add missing files to the tarball + +Peter Wu (30 Mar 2017) +- cmake: fix build with cmake 2.8.12.2 + + For some reason, CMake 2.8.12.2 did not expand the list argument in a + single DEPENDS argument. Remove the quotes, so it gets expanded into + multiple arguments for add_custom_command and add_custom_target. + + Fixes https://github.com/curl/curl/issues/1370 + Closes #1372 + +Marcel Raad (30 Mar 2017) +- ssh: fix narrowing conversion warning + + 'left' is used as time_t but declared as long. + MinGW complains: + error: conversion to 'long int' from 'time_t {aka long long int}' may alter + its value [-Werror=conversion] + Changed the declaration to time_t. + +- http2: silence unused parameter warnings + + In release mode, MinGW complains: + error: unused parameter 'lib_error_code' [-Werror=unused-parameter] + +Daniel Stenberg (30 Mar 2017) +- [Hanno Böck brought this change] + + curl: fix callback functions to match prototype + + The function tool_debug_cb doesn't match curl_debug_callback in curl.h + (unsigned vs. signed char* for 3rd param). + + Bug: https://curl.haxx.se/mail/lib-2017-03/0120.html + +- [Alexis La Goutte brought this change] + + gcc7: fix ‘*’ in boolean context, suggest ‘&&’ instead [-Wint-in-bool-context] + + Closes #1371 + +Marcel Raad (30 Mar 2017) +- schannel: fix unused variable warning + + If CURL_DISABLE_VERBOSE_STRINGS is defined, hostname is not used in + schannel_connect_step3. + +- connect: fix unreferenced parameter warning + + When CURL_DISABLE_VERBOSE_STRINGS is defined, the reason parameter in + Curl_conncontrol is not used as the infof macro expands to nothing. + +- select: use correct SIZEOF_ constant + + At least under Windows, there is no SIZEOF_LONG, so it evaluates to 0 even + though sizeof(int) == sizeof(long). This should probably have been + CURL_SIZEOF_LONG, but the type of timeout_ms changed from long to time_t + anyway. + This triggered MSVC warning C4668 about implicitly replacing undefined + macros with '0'. + + Closes https://github.com/curl/curl/pull/1362 + +Daniel Stenberg (30 Mar 2017) +- cmake: add cmake file in docs/libcurl/opts/ to dist + +- cmake: add more missing files to the dist + +- docs/Makefile.am: include CMakeLists.txt in the dist tarball + +Marcel Raad (29 Mar 2017) +- NTLM: check for features with #ifdef instead of #if + + Feature defines are normally checked with #ifdef instead of #if in the rest of + the codebase. Additionally, some compilers warn when a macro is implicitly + evaluated to 0 because it is not defined, which was the case here. + + Ref: https://github.com/curl/curl/pull/1362#discussion_r108605101 + Closes https://github.com/curl/curl/pull/1367 + +Daniel Stenberg (29 Mar 2017) +- [Hanno Böck brought this change] + + curl: fix callback argument inconsistency + + As you can see the callback definition uses a char* for the first + argument, while the function uses a void*. + + URL: https://curl.haxx.se/mail/lib-2017-03/0116.html + +- RELEASE-NOTES: synced with 556c51a2df + +- [madblobfish brought this change] + + KNOWN_BUGS: typo + + Closes #1364 + +- [Maksim Stsepanenka brought this change] + + make: use the variable MAKE for recursive calls + + Closes #1366 + +- conncache: make hashkey avoid malloc + + ... to make it much faster. Idea developed with primepie on IRC. + + Closes #1365 + +Kamil Dudka (28 Mar 2017) +- http: do not treat FTPS over CONNECT as HTTPS + + If we use FTPS over CONNECT, the TLS handshake for the FTPS control + connection needs to be initiated in the SENDPROTOCONNECT state, not + the WAITPROXYCONNECT state. Otherwise, if the TLS handshake completed + without blocking, the information about the completed TLS handshake + would be saved to a wrong flag. Consequently, the TLS handshake would + be initiated in the SENDPROTOCONNECT state once again on the same + connection, resulting in a failure of the TLS handshake. I was able to + observe the failure with the NSS backend if curl ran through valgrind. + + Note that this commit partially reverts curl-7_21_6-52-ge34131d. + +Daniel Stenberg (28 Mar 2017) +- pause: handle mixed types of data when paused + + When receiving chunked encoded data with trailers, and the write + callback returns PAUSE, there might be both body and header to store to + resend on unpause. Previously libcurl returned error for that case. + + Added test case 1540 to verify. + + Reported-by: Stephen Toub + Fixes #1354 + Closes #1357 + +Jay Satiro (28 Mar 2017) +- [Isaac Boukris brought this change] + + http: Fix proxy connection reuse with basic-auth + + When using basic-auth, connections and proxy connections + can be re-used with different Authorization headers since + it does not authenticate the connection (like NTLM does). + + For instance, the below command should re-use the proxy + connection, but it currently doesn't: + curl -v -U alice:a -x http://localhost:8181 http://localhost/ + --next -U bob:b -x http://localhost:8181 http://localhost/ + + This is a regression since refactoring of ConnectionExists() + as part of: cb4e2be7c6d42ca0780f8e0a747cecf9ba45f151 + + Fix the above by removing the username and password compare + when re-using proxy connection at proxy_info_matches(). + + However, this fix brings back another bug would make curl + to re-print the old proxy-authorization header of previous + proxy basic-auth connection because it wasn't cleared. + + For instance, in the below command the second request should + fail if the proxy requires authentication, but would succeed + after the above fix (and before aforementioned commit): + curl -v -U alice:a -x http://localhost:8181 http://localhost/ + --next -x http://localhost:8181 http://localhost/ + + Fix this by clearing conn->allocptr.proxyuserpwd after use + unconditionally, same as we do for conn->allocptr.userpwd. + + Also fix test 540 to not expect digest auth header to be + resent when connection is reused. + + Signed-off-by: Isaac Boukris + + Closes https://github.com/curl/curl/pull/1350 + +- openssl: exclude DSA code when OPENSSL_NO_DSA is defined + + - Fix compile errors that occur in openssl.c when OpenSSL lib was + built without DSA support. + + Bug: https://github.com/curl/curl/issues/1361 + Reported-by: neheb@users.noreply.github.com + +- examples/fopen: checksrc compliance + +Marcel Raad (28 Mar 2017) +- schannel: fix variable shadowing warning + + No need to redeclare the variable. + +- multi: fix MinGW-w64 compiler warnings + + error: conversion to 'long int' from 'time_t {aka long long int}' may alter + its value [-Werror=conversion] + +- .gitattributes: turn off CRLF for *.am + + If Makefile.am uses CRLF, buildconf in a Windows checkout fails with: + ".ibtoolize: error: AC_CONFIG_MACRO_DIRS([m4]) conflicts with + ACLOCAL_AMFLAGS=-I m4" + +Daniel Stenberg (26 Mar 2017) +- [klemens brought this change] + + spelling fixes + + Closes #1356 + +- curl: check for end of input in writeout backslash handling + + Reported-by: Brian Carpenter + + Added test 1442 to verify + +Marcel Raad (24 Mar 2017) +- tests/README: make "Run" section foolproof + + curl must be built before building the tests. + + Closes https://github.com/curl/curl/pull/1352 + +Daniel Stenberg (23 Mar 2017) +- openssl: fix comparison between signed and unsigned integer expressions + +Marcel Raad (23 Mar 2017) +- [Edward Kimmel brought this change] + + asiohiper: make sure socket is open in event_cb + + Send curl_socket_t to event_cb and make sure it hasn't been closed yet. + + Closes https://github.com/curl/curl/pull/1318 + +Dan Fandrich (23 Mar 2017) +- openssl: made the error table static const + +Jay Satiro (23 Mar 2017) +- openssl: fall back on SSL_ERROR_* string when no error detail + + - If SSL_get_error is called but no extended error detail is available + then show that SSL_ERROR_* as a string. + + Prior to this change there was some inconsistency in that case: the + SSL_ERROR_* code may or may not have been shown, or may have been shown + as unknown even if it was known. + + Ref: https://github.com/curl/curl/issues/1300 + + Closes https://github.com/curl/curl/pull/1348 + +Dan Fandrich (23 Mar 2017) +- mkhelp: disable compression if the perl gzip module is unavailable + + This is nowadays included with the base perl distribution, but wasn't + prior to about perl 5.14 + +Daniel Stenberg (23 Mar 2017) +- [Anders Roxell brought this change] + + tests/README: mention nroff for --manual tests + + Signed-off-by: Anders Roxell + + Closes #1342 + +- CURLINFO_PRIMARY_IP.3: add example + +- travis: run tests-nonflaky instead of tests-full + +- make: introduce 'test-nonflaky' target + + Running this in the root build dir will invoke the test suite to only + run tests not marked as 'flaky'. + +- test2033: flaky + +Jay Satiro (21 Mar 2017) +- [Ales Mlakar brought this change] + + mbedtls: add support for CURLOPT_SSL_CTX_FUNCTION + + Ref: https://curl.haxx.se/mail/lib-2017-02/0097.html + + Closes https://github.com/curl/curl/pull/1272 + +Peter Wu (21 Mar 2017) +- cmake: add support for building HTML and PDF docs + + Note that for some reason there is this warning (that also exists with + autotools, added since curl-7_15_1-94-ga718cb05f): + + docs/libcurl/curl_multi_socket_all.3:1: can't open `man3/curl_multi_socket.3': No such file or directory + + Additionally, adjust the roffit --mandir option to support creating + links when doing out-of-tree builds. + + Ref: https://github.com/curl/curl/pull/1288 + +- cmake: build manual pages (including curl.1) + + Also make Perl mandatory to allow building the docs. + + While CMakeLists.txt could probably read the list of manual pages from + Makefile.am, actually putting those in CMakeLists.txt is cleaner so that + is what is done here. + + Fixes #1230 + Ref: https://github.com/curl/curl/pull/1288 + +- docs: split file lists into Makefile.inc + + For easier sharing with CMake. The contents were reformatted to use + two-space indent and expanded tabs (matching lib/Makefile.common). + + Ref: https://github.com/curl/curl/pull/1288 + +Daniel Stenberg (21 Mar 2017) +- examples: comment typos in http2 examples + +- RELEASE-NOTES: typo + +- RELEASE-NOTES: synced with 6e0f26c8a8c28df + +- multi: fix streamclose() crash in debug mode + + The code would refer to the wrong data pointer. Only debug builds do + this - for verbosity. + + Reported-by: zelinchen@users.noreply.github.com + Fixes #1329 + +- CONTRIBUTE: mention referring to github issues in commit msgs + +Dan Fandrich (20 Mar 2017) +- runtests.pl: fixed display of the Gopher IPv6 port number + +- tests: fixed the documented test server port numbers + +- test714/5: added HTTP as a required feature + + These tests use an HTTP proxy so require that curl be built with HTTP + support. + +- tests: strip more options from non-HTTP --libcurl tests + + The CURLOPT_USERAGENT and CURLOPT_MAXREDIRS options are only set if HTTP + support is available, so ignore them in tests where HTTP is not + guaranteed. + +Jay Satiro (18 Mar 2017) +- [Palo Markovic brought this change] + + darwinssl: fix typo in variable name + + Broken a week ago in 6448f98. + + Closes https://github.com/curl/curl/pull/1337 + +- tool_operate: Fix showing HTTPS-Proxy options on CURLE_SSL_CACERT + + - Show the HTTPS-proxy options on CURLE_SSL_CACERT if libcurl was built + with HTTPS-proxy support. + + Prior to this change those options were shown only if an HTTPS-proxy was + specified by --proxy, but that did not take into account environment + variables such as http_proxy, https_proxy, etc. Follow-up to e1187c4. + + Bug: https://github.com/curl/curl/issues/1331 + Reported-by: Nehal J Wani + +- CURLINFO_LOCAL_PORT.3: fix typo + +Daniel Stenberg (16 Mar 2017) +- CURLINFO_LOCAL_PORT.3: added example + +- SSLCERTS.md: mention HTTPS proxies and their separate options + +- BINDINGS: a Delphi binding + +- KNOWN_BUGS: remove libidn related issue + + ... as we no longer use libidn + +Dan Fandrich (14 Mar 2017) +- build: removed redundant DEPENDENCIES from makefiles + +Daniel Stenberg (13 Mar 2017) +- [Sylvestre Ledru brought this change] + + Improve code readbility + + ... by removing the else branch after a return, break or continue. + + Closes #1310 + +Jay Satiro (13 Mar 2017) +- [Anatol Belski brought this change] + + winbuild: add basic support for OpenSSL 1.1.x + + - Auto-detect OpenSSL 1.1 libs + + Closes https://github.com/curl/curl/pull/1322 + +Daniel Stenberg (13 Mar 2017) +- RELEASE-NOTES: synced with c25e0761d0fc49c4 + +- make: regenerate docs/curl.1 by runinng make in docs + + ... previously, docs/ was only a dist subdir, now also a build subdir. + + Reported-by: Dan Fandrich + Bug: https://curl.haxx.se/mail/lib-2017-03/0017.html + +Dan Fandrich (12 Mar 2017) +- test1440/1: depend on well-defined file: behaviour + + Depend on the known behaviour of URLs for nonexistent files rather than + the undefined behaviour of URLs for directories (which fails on Windows). + The test isn't about file: URLs at all, so the URL used doesn't really + matter. + +- tests: clear the SSL_CERT_FILE variable on --libcurl tests + + Otherwise, the contents will end up in the output and fail the + verification. + +- test1287: added verbose logs keyword + +- tool_writeout: fixed a buffer read overrun on --write-out + + If a % ended the statement, the string's trailing NUL would be skipped + and memory past the end of the buffer would be accessed and potentially + displayed as part of the --write-out output. Added tests 1440 and 1441 + to check for this kind of condition. + + Reported-by: Brian Carpenter + +Jay Satiro (12 Mar 2017) +- [Desmond O. Chang brought this change] + + url: add option CURLOPT_SUPPRESS_CONNECT_HEADERS + + - Add new option CURLOPT_SUPPRESS_CONNECT_HEADERS to allow suppressing + proxy CONNECT response headers from the user callback functions + CURLOPT_HEADERFUNCTION and CURLOPT_WRITEFUNCTION. + + - Add new tool option --suppress-connect-headers to expose + CURLOPT_SUPPRESS_CONNECT_HEADERS and allow suppressing proxy CONNECT + response headers from --dump-header and --include. + + Assisted-by: Jay Satiro + Assisted-by: CarloCannas@users.noreply.github.com + Closes https://github.com/curl/curl/pull/783 + +- http_proxy: Ignore TE and CL in CONNECT 2xx responses + + A client MUST ignore any Content-Length or Transfer-Encoding header + fields received in a successful response to CONNECT. + "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 + + Prior to this change such a case would cause an error. + + In some ways this bug appears to be a regression since c50b878. Prior to + that libcurl may have appeared to function correctly in such cases by + acting on those headers instead of causing an error. But that behavior + was also incorrect. + + Bug: https://github.com/curl/curl/issues/1317 + Reported-by: mkzero@users.noreply.github.com + +- [Thomas Glanzmann brought this change] + + mbedtls: fix typo in variable name + + Broken a few days ago in 6448f98. + + Bug: https://curl.haxx.se/mail/lib-2017-03/0015.html + +Michael Kaufmann (11 Mar 2017) +- tests: fix the authretry tests + + Do not call curl_easy_reset() between the requests, because the + auth state must be preserved for these tests. + + Follow-up to 0afbcfd + +- proxy: skip SSL initialization for closed connections + + This prevents a "Descriptor is not a socket" error for WinSSL. + + Reported-by: Antony74@users.noreply.github.com + Reviewed-by: Jay Satiro + + Fixes https://github.com/curl/curl/issues/1239 + +- curl_easy_reset: Also reset the authentication state + + Follow-up to 5278462 + See https://github.com/curl/curl/issues/1095 + +- [Isaac Boukris brought this change] + + authneg: clear auth.multi flag at http_done + + This flag is meant for the current request based on authentication + state, once the request is done we can clear the flag. + + Also change auth.multi to auth.multipass for better readability. + + Fixes https://github.com/curl/curl/issues/1095 + Closes https://github.com/curl/curl/pull/1326 + + Signed-off-by: Isaac Boukris + Reported-by: Michael Kaufmann + +Dan Fandrich (11 Mar 2017) +- url: don't compile detect_proxy if HTTP support is disabled + +- cmdline-opts: fixed a few typos + +Daniel Stenberg (10 Mar 2017) +- README.md: add coverity and travis badges + +- ISSUE_TEMPLATE: for bugs, ask questions on the mailing list + + and try to add the top comment within an HTML comment in the hope + that it might get hidden if the text is kept + +- openssl: add two /* FALLTHROUGH */ to satisfy coverity + + CID 1402159 and 1402158 + +- tests: disabled 1903 now + + Test 1903 is doing HTTP pipelining, and that is a timing and ordering + sensitive operation and this fails far too often on the Travis CI + leading to people more or less ignoring test failures there. Not good. + + The end of pipelning is probably coming sooner rather than later + anyway... + +Dan Fandrich (9 Mar 2017) +- tls-max.d: added to the makefile + +- build: fixed making man page in out-of-tree tarball builds + + The man page taken from the release package is found in a different + location than if it's built from source. It must be referenced as $< in + the rule to get its correct location in the VPATH. + +- mkhelp: simplified the gzip code + + This eliminates the need for an external gzip program, which wasn't + working with Busybox's gzip, anyway. It now compresses using perl's + IO::Compress::Gzip + +- polarssl: fixed compile errors introduced in 6448f98c + +Daniel Stenberg (8 Mar 2017) +- bump: next release will be known as 7.54.0 + + ...due to the newly added CURL_SSLVERSION_MAX_* functionality + +- openssl: unbreak the build after 6448f98c1857de + + Verified with OpenSSL 1.1.0e and OpenSSL master (1.1.1) + +Kamil Dudka (8 Mar 2017) +- [Jozef Kralik brought this change] + + vtls: add options to specify range of enabled TLS versions + + This commit introduces the CURL_SSLVERSION_MAX_* constants as well as + the --tls-max option of the curl tool. + + Closes https://github.com/curl/curl/pull/1166 + +Daniel Stenberg (8 Mar 2017) +- RELEASE-NOTES: synced with 6888a670aa01 + +- MANPAGE: clarify the dash situation in meta data + +- insecure.d: clarify that this is for server connections + + Assisted-by: Ray Satiro + Bug: https://curl.haxx.se/mail/lib-2017-03/0002.html + +Dan Fandrich (8 Mar 2017) +- test1260: added http as a required feature + +Daniel Stenberg (7 Mar 2017) +- [Steve Brokenshire brought this change] + + maketgz: Run updatemanpages.pl to update man pages + + maketgz now runs scripts/updatemanpages.pl to update the man pages .TH + section to use the current date and curl/libcurl version. + + (TODO Section 3.1) + + Closes #1058 + +- [Steve Brokenshire brought this change] + + gitignore: Ignore man page dist files + + Ignore man page dist files generated by scripts/updatemanpages.pl + +- [Steve Brokenshire brought this change] + + Makefile.am: Remove distribution man pages when running 'make clean' + +- [Steve Brokenshire brought this change] + + Makefile.am: Added scripts/updatemanpages.pl to EXTRA_DIST + +- [Steve Brokenshire brought this change] + + updatemanpages.pl: Update man pages to use current date and versions + + Added script to update man pages to use the current date and + curl/libcurl versions. + + updatemanpages.pl has three arrays: list of directories to look in, + list of extensions to process, list of files to exclude from + processing. + + Check man page in git repoistory using the date from the existing man + page before updating to avoid updating the man page if no change is + made. + + If data is received from the git command then update the man page with + the current date and version otherwise leave alone. + + Applied patch from badger to make the date argument optional, change the + git command used, added date argument to processfile subroutine and + print to STDERR if no date is found in a man page. + + Added code to process the changed man page into a new man page with + .dist added to the filename to keep the original source files unchanged. + Updated POD documentation to reflect that the date argument optional. + + Code style is in line with CODE_STYLE.md. + + Directories: docs/ docs/libcurl/ docs/libcurl/opts/ tests/ + Extensions: .1 .3 + Excluded files: mk-ca-bundle.1 template.3 + + (TODO Section 3.1) + +- [Tatsuhiro Tsujikawa brought this change] + + http2: Fix assertion error on redirect with CL=0 + + This fixes assertion error which occurs when redirect is done with 0 + length body via HTTP/2, and the easy handle is reused, but new + connection is established due to hostname change: + + curl: http2.c:1572: ssize_t http2_recv(struct connectdata *, + int, char *, size_t, CURLcode *): + Assertion `httpc->drain_total >= data->state.drain' failed. + + To fix this bug, ensure that http2_handle_stream is called. + + Fixes #1286 + Closes #1302 + +- ares: Curl_resolver_wait_resolv: clear *entry first in function + +- ares: better error return on timeouts + + Assisted-by: Ray Satiro + + Bug: https://curl.haxx.se/mail/lib-2017-03/0009.html + +Jay Satiro (6 Mar 2017) +- KNOWN_BUGS: Add DarwinSSL won't import PKCS#12 without a password + + Bug: https://github.com/curl/curl/issues/1308 + Reported-by: Justin Clift + +Dan Fandrich (6 Mar 2017) +- test1260: removed errant XML tag + +Daniel Stenberg (6 Mar 2017) +- URL: return error on malformed URLs with junk after port number + + ... because it causes confusion with users. Example URLs: + + "http://[127.0.0.1]:11211:80" which a lot of languages' URL parsers will + parse and claim uses port number 80, while libcurl would use port number + 11211. + + "http://user@example.com:80@localhost" which by the WHATWG URL spec will + be treated to contain user name 'user@example.com' but according to + RFC3986 is user name 'user' for the host 'example.com' and then port 80 + is followed by "@localhost" + + Both these formats are now rejected, and verified so in test 1260. + + Reported-by: Orange Tsai + +- BINDINGS: update the Lua-cURL URL + +- [Sylvestre Ledru brought this change] + + BINDINGS: add Scilab binding + + Closes #1312 + +- BINDINGS: add go-curl and perl6-net-curl + + Reported-by: Peter Pentchev + +- BINDINGS: add misssing C++ bindings + + Reported-by: Giuseppe Persico + +- ares: return error at once if timed out before name resolve starts + + Pointed-out-by: Ray Satiro + Bug: https://curl.haxx.se/mail/lib-2017-03/0004.html + +Peter Wu (5 Mar 2017) +- [Michael Maltese brought this change] + + CMake: Set at most one SSL library + + Ref: https://github.com/curl/curl/pull/1228 + +- [Michael Maltese brought this change] + + CMake: Add mbedTLS support + + Ref: https://github.com/curl/curl/pull/1228 + +- [Michael Maltese brought this change] + + CMake: Add DarwinSSL support + + Assisted-by: Simon Warta + Ref: https://github.com/curl/curl/pull/1228 + +- [Michael Maltese brought this change] + + CMake: Reorganize SSL support, separate WinSSL and SSPI + + This is closer to how configure.ac does it + + Ref: https://github.com/curl/curl/pull/1228 + +Jay Satiro (4 Mar 2017) +- CURLOPT_SSL_CTX_FUNCTION.3: Fix EXAMPLE formatting errors + + .. also document that CURLE_NOT_BUILT_IN is a RETURN VALUE. + + Ref: https://github.com/curl/curl/pull/1290 + +Daniel Stenberg (4 Mar 2017) +- [Andrew Krieger brought this change] + + fix potential use of uninitialized variables + + MSVC with LTCG detects this at warning level 4. + + Closes #1304 + +Dan Fandrich (4 Mar 2017) +- [Sylvestre Ledru brought this change] + + fix some typos in the doc (#1306) + +- tests: fixed a typo in some comments + +Jay Satiro (3 Mar 2017) +- url: split off proxy init and parsing from create_conn + + Move the proxy parse/init into helper create_conn_helper_init_proxy to + mitigate the chances some non-proxy code will be mistakenly added to it. + + Ref: https://github.com/curl/curl/issues/1274#issuecomment-281556510 + Ref: https://github.com/curl/curl/pull/1293 + + Closes https://github.com/curl/curl/pull/1298 + +- [Alexis La Goutte brought this change] + + build: fix gcc7 implicit fallthrough warnings + + Mark intended fallthroughs with /* FALLTHROUGH */ so that gcc will know + it's expected and won't warn on [-Wimplicit-fallthrough=]. + + Closes https://github.com/curl/curl/pull/1297 + +- [Greg Rowe brought this change] + + configure: fix --with-zlib when a path is specified + + Prior to this change if you attempted to configure curl using + --wtih-zlib and specified a path the path would be ignored if you also + had pkg-config installed on your system. This situation can easily + arise when you are cross compiling. This change moves the test for + detecting zlib settings via pkg-config only if OPT_ZLIB is not set. + + Closes https://github.com/curl/curl/pull/1292 + +- [c4rlo brought this change] + + no-keepalive.d: fix typo + + Closes https://github.com/curl/curl/pull/1301 + +- checksrc.bat: Ignore curl_config.h.in, curl_config.h + +- configure: fix for --enable-pthreads + + Better handle options conflicts that can occur if --enable-pthreads. + + Bug: https://github.com/curl/curl/pull/1295 + Reported-by: Marc-Antoine Perennou + +- [JDepooter brought this change] + + darwinssl: Warn that disabling host verify also disables SNI + + In DarwinSSL the SSLSetPeerDomainName function is used to enable both + sending SNI and verifying the host. When host verification is disabled + the function cannot be called, therefore SNI is disabled as well. + + Closes https://github.com/curl/curl/pull/1240 + +Marcel Raad (28 Feb 2017) +- warnless: suppress compiler warning + + If size_t is 32 bits, MSVC warns: + warning C4310: cast truncates constant value + The warning is harmless as CURL_MASK_SCOFFT gets + truncated to the maximum value of size_t. + +Dan Fandrich (27 Feb 2017) +- tests: enable HTTP/2 tests to run with non-default port numbers + +Marcel Raad (27 Feb 2017) +- digest_sspi: fix compilation warning + + MSVC complains: + warning C4701: potentially uninitialized local variable 'output_token_len' used + +Jay Satiro (26 Feb 2017) +- cyassl: get library version string at runtime + + wolfSSL >= 3.6.0 supports getting its library version string at runtime. + +Dan Fandrich (26 Feb 2017) +- test1139: allow for the possibility that the man page is not rebuilt + + This is likely to be the case when building from a tar ball release + package which includes a prebuilt man page. In that case, test the + packaged man page instead. This only makes a difference when building + out-of-tree (in-tree, the location in both cases is identical). + +Jay Satiro (25 Feb 2017) +- [Isaac Boukris brought this change] + + url: fix unix-socket support for proxy-disabled builds + + Prior to this change if curl was built with Unix Socket support + (--enable-unix-sockets) and without Proxy support (--disable-proxy) then + unix socket options would erroneously be ignored. + + Regression introduced in: + 0b8d682f81ee9acb763dd4c9ad805fe08d1227c0 + + Bug: https://github.com/curl/curl/issues/1274 + Reported-by: mccormickt12@users.noreply.github.com + + Closes https://github.com/curl/curl/pull/1289 + +Dan Fandrich (26 Feb 2017) +- gopher: fixed detection of an error condition from Curl_urldecode + +- ftp: fixed a NULL pointer dereference on OOM + +Jay Satiro (25 Feb 2017) +- [Peter Wu brought this change] + + docs: de-duplicate file lists in the Makefiles + + Make use of macro substitution of suffix patterns to remove duplication + of manual names. This approach is portable according to + http://pubs.opengroup.org/onlinepubs/009695399/utilities/make.html + + Closes https://github.com/curl/curl/pull/1287 + +Dan Fandrich (25 Feb 2017) +- ftp: removed an erroneous free in an OOM path + +- proxy: fixed a memory leak on OOM + +- tests: use consistent environment variables for setting charset + + The character set in POSIX is set by the locale defined by (in + decreasing order of precedence) the LC_ALL, LC_CTYPE and LANG + environment variables (CHARSET was used by libidn but not libidn2). + LC_ALL is cleared to ensure that LC_CTYPE takes effect, but LC_ALL is + not used to set the locale to ensure that other parts of the locale + aren't overridden. Since there doesn't seem to be a cross-platform way + of specifying a UTF-8 locale, and not all systems may support UTF-8, a + is used to skip the test if UTF-8 can't be verified to be + available. Test 1035 was also converted to UTF-8 for consistency, as + the actual character set used there is irrelevant to the test. + + This patch uses a different UTF-8 locale than the last attempt, namely + en_US.UTF-8. This one has been verified on 7 different Linux and BSD + distributions and is more complete and usable than the locale UTF-8 (on + at least some systems). + +- test557: explicitly use the C locale so the numeric output is as expected + +Jay Satiro (25 Feb 2017) +- [Simon Warta brought this change] + + cmake: Replace invalid UTF-8 byte sequence + + - Change the encoding of the regex temp placeholder token to UTF-8. + + Prior to this change the file contained special chars in a different + encoding than ASCII or UTF-8 making text editors and Python complain + when reading the file. + + Closes https://github.com/curl/curl/pull/1271 + Closes https://github.com/curl/curl/pull/1275 + +Daniel Stenberg (24 Feb 2017) +- bump: work on the next release + +Version 7.53.1 (24 Feb 2017) + +Daniel Stenberg (24 Feb 2017) +- release: 7.53.1 + +- Revert "tests: use consistent environment variables for setting charset" + + This reverts commit ecd1d020abdae3c3ce3643ddab3106501e62e7c0. + + That commit caused test failures on my Debian Linux machine for all + changed test cases. We need to reconsider how that should get done. + +Dan Fandrich (23 Feb 2017) +- tests: use consistent environment variables for setting charset + + Character set in POSIX is set by the locale defined (in decreasing order + of precedence) by the LC_ALL, LC_CTYPE and LANG environment variables (I + believe CHARSET is only historic). LC_ALL is cleared to ensure that + LC_CTYPE takes effect, but LC_ALL is not used to set the locale to + ensure that other parts of the locale aren't overriden, if set. Since + there doesn't seem to be a cross-platform way of specifying a UTF-8 + locale, and not all systems may support UTF-8, a is used + (where relevant) to skip the test if UTF-8 isn't in use. Test 1035 was + also converted to UTF-8 for consistency, as the actual character set + used there is irrelevant to the test. + +Jay Satiro (23 Feb 2017) +- url: Default the CA proxy bundle location to CURL_CA_BUNDLE + + If the compile-time CURL_CA_BUNDLE location is defined use it as the + default value for the proxy CA bundle location, which is the same as + what we already do for the regular CA bundle location. + + Ref: https://github.com/curl/curl/pull/1257 + +Daniel Stenberg (23 Feb 2017) +- [Sergii Pylypenko brought this change] + + rand: added missing #ifdef HAVE_FCNTL_H around fcntl.h header + + Closes #1285 + +- TODO: "OPTIONS *" + + Closes #1280 + +- RELEASE-NOTES: synced with 443e5b03a7d441 + +- THANKS-filter: shachaf + +- [İsmail Dönmez brought this change] + + tests: Set CHARSET & LANG to UTF-8 in 1035, 2046 and 2047 + + Closes #1283 + Fixes #1277 + +- bump: 7.53.1 coming up + + synced with df665f4df0f7a352 + +- formdata: check for EOF when reading from stdin + + Reported-by: shachaf@users.noreply.github.com + + Fixes #1281 + +Jay Satiro (22 Feb 2017) +- docs: gitignore curl.1 + + curl.1 is generated by the cmdline-opts script since 4c49b83. + +Daniel Stenberg (22 Feb 2017) +- TODO: HTTP Digest using SHA-256 + +- TODO: brotli is deployed widely now + +Jay Satiro (21 Feb 2017) +- [Viktor Szakats brought this change] + + urldata: include curl_sspi.h when Windows SSPI is enabled + + f77dabe broke builds in Windows using Windows SSPI but not Windows SSL. + + Bug: https://github.com/curl/curl/issues/1276 + Reported-by: jveazey@users.noreply.github.com + +- url: Improve CURLOPT_PROXY_CAPATH error handling + + - Change CURLOPT_PROXY_CAPATH to return CURLE_NOT_BUILT_IN if the option + is not supported, which is the same as what we already do for + CURLOPT_CAPATH. + + - Change the curl tool to handle CURLOPT_PROXY_CAPATH error + CURLE_NOT_BUILT_IN as a warning instead of as an error, which is the + same as what we already do for CURLOPT_CAPATH. + + - Fix CAPATH docs to show that CURLE_NOT_BUILT_IN is returned when the + respective CAPATH option is not supported by the SSL library. + + Ref: https://github.com/curl/curl/pull/1257 + +- cyassl: fix typo + +Version 7.53.0 (22 Feb 2017) + +Daniel Stenberg (22 Feb 2017) +- release: 7.53.0 + +- cookie: fix declaration of 'dup' shadows a global declaration + +- TLS: make SSL_VERIFYSTATUS work again + + The CURLOPT_SSL_VERIFYSTATUS option was not properly handled by libcurl + and thus even if the status couldn't be verified, the connection would + be allowed and the user would not be told about the failed verification. + + Regression since cb4e2be7c6d42ca + + CVE-2017-2629 + Bug: https://curl.haxx.se/docs/adv_20170222.html + + Reported-by: Marcus Hoffmann + +Jay Satiro (21 Feb 2017) +- digest_sspi: Handle 'stale=TRUE' directive in HTTP digest + + - If the server has provided another challenge use it as the replacement + input token if stale=TRUE. Otherwise previous credentials have failed + so return CURLE_LOGIN_DENIED. + + Prior to this change the stale directive was ignored and if another + challenge was received it would cause error CURLE_BAD_CONTENT_ENCODING. + + Ref: https://tools.ietf.org/html/rfc2617#page-10 + + Bug: https://github.com/curl/curl/issues/928 + Reported-by: tarek112@users.noreply.github.com + +Daniel Stenberg (20 Feb 2017) +- smb: use getpid replacement for windows UWP builds + + Source: https://github.com/Microsoft/vcpkg/blob/7676b8780db1e1e591c4fc7eba4f96f73c428cb4/ports/curl/0002_fix_uwp.patch + +- TODO: CURLOPT_RESOLVE for any port number + + Closes #1264 + +- RELEASE-NOTES: synced with af30f1152d43dcdb + +- [Jean Gressmann brought this change] + + sftp: improved checks for create dir failures + + Since negative values are errors and not only -1. This makes SFTP upload + with --create-dirs work (again). + + Closes #1269 + +Jay Satiro (20 Feb 2017) +- [Max Khon brought this change] + + digest_sspi: Fix nonce-count generation in HTTP digest + + - on the first invocation: keep security context returned by + InitializeSecurityContext() + + - on subsequent invocations: use MakeSignature() instead of + InitializeSecurityContext() to generate HTTP digest response + + Bug: https://github.com/curl/curl/issues/870 + Reported-by: Andreas Roth + + Closes https://github.com/curl/curl/pull/1251 + +- examples/multi-uv: checksrc compliance + +Michael Kaufmann (19 Feb 2017) +- string formatting: fix 4 printf-style format strings + +Dan Fandrich (18 Feb 2017) +- tests: removed the obsolete name parameter + +Michael Kaufmann (18 Feb 2017) +- speed caps: update the timeouts if the speed is too low/high + + Follow-up to 4b86113 + + Fixes https://github.com/curl/curl/issues/793 + Fixes https://github.com/curl/curl/issues/942 + +- docs: fix timeout handling in multi-uv example + +- proxy: fix hostname resolution and IDN conversion + + Properly resolve, convert and log the proxy host names. + Support the "--connect-to" feature for SOCKS proxies and for passive FTP + data transfers. + + Follow-up to cb4e2be + + Reported-by: Jay Satiro + Fixes https://github.com/curl/curl/issues/1248 + +Jay Satiro (17 Feb 2017) +- [Isaac Boukris brought this change] + + http: fix missing 'Content-Length: 0' while negotiating auth + + - While negotiating auth during PUT/POST if a user-specified + Content-Length header is set send 'Content-Length: 0'. + + This is what we do already in HTTPREQ_POST_FORM and what we did in the + HTTPREQ_POST case (regression since afd288b). + + Prior to this change no Content-Length header would be sent in such a + case. + + Bug: https://curl.haxx.se/mail/lib-2017-02/0006.html + Reported-by: Dominik Hölzl + + Closes https://github.com/curl/curl/pull/1242 + +Daniel Stenberg (16 Feb 2017) +- [Simon Warta brought this change] + + winbuild: add note on auto-detection of MACHINE in Makefile.vc + + Closes #1265 + +- RELEASE-PROCEDURE: update the upcoming release calendar + +- TODO: consider file name from the redirected URL with -O ? + + It isn't easily solved, but with some thinking someone could probably + come up with a working approach? + + Closes #1241 + +Jay Satiro (15 Feb 2017) +- tool_urlglob: Allow a glob range with the same start and stop + + For example allow ranges like [1-1] and [a-a] etc. + + Regression since 5ca96cb. + + Bug: https://github.com/curl/curl/issues/1238 + Reported-by: R. Dennis Steed + +Daniel Stenberg (15 Feb 2017) +- axtls: adapt to API changes + + Builds with axTLS 2.1.2. This then also breaks compatibility with axTLS + < 2.1.0 (the older API) + + ... and fix the session_id mixup brought in 04b4ee549 + + Fixes #1220 + +- RELEASE-NOTES: synced with 690935390c29c + +- [Nick Draffen brought this change] + + curl: fix typo in time condition warning message + + The warning message had a typo. The argument long form is --time-cond + not --timecond + + Closes #1263 + +- smb: code indent + +Jay Satiro (14 Feb 2017) +- configure: Allow disabling pthreads, fall back on Win32 threads + + When the threaded resolver option is specified for configure the default + thread library is pthreads. This change makes it possible to + --disable-pthreads and then configure can fall back on Win32 threads for + native Windows builds. + + Closes https://github.com/curl/curl/pull/1260 + +Daniel Stenberg (13 Feb 2017) +- http2: fix memory-leak when denying push streams + + Reported-by: zelinchen@users.noreply.github.com + Fixes #1229 + +Jay Satiro (11 Feb 2017) +- tool_operate: Show HTTPS-Proxy options on CURLE_SSL_CACERT + + When CURLE_SSL_CACERT occurs the tool shows a lengthy error message to + the user explaining possible solutions such as --cacert and --insecure. + + This change appends to that message similar options --proxy-cacert and + --proxy-insecure when there's a specified HTTPS proxy. + + Closes https://github.com/curl/curl/issues/1258 + +Daniel Stenberg (10 Feb 2017) +- cmdline-opts/page-footer: ftp.sunet.se is no longer an FTP mirror + +- URL: only accept ";options" in SMTP/POP3/IMAP URL schemes + + Fixes #1252 + +Jay Satiro (9 Feb 2017) +- cmdline-opts/socks*: Mention --preproxy in --socks* opts + + - Document in --socks* opts they're still mutually exclusive of --proxy. + + Partial revert of 423a93c; I had misinterpreted the SOCKS proxy + + HTTP/HTTPS proxy combination. + + - Document in --socks* opts that --preproxy can be used to specify a + SOCKS proxy at the same time --proxy is used with an HTTP/HTTPS proxy. + +Daniel Stenberg (9 Feb 2017) +- CURLOPT_SSL_VERIFYPEER.3: also the https proxy version + +Kamil Dudka (9 Feb 2017) +- nss: make FTPS work with --proxytunnel + + If the NSS code was in the middle of a non-blocking handshake and it + was asked to finish the handshake in blocking mode, it unexpectedly + continued in the non-blocking mode, which caused a FTPS connection + over CONNECT to fail with "(81) Socket not ready for send/recv". + + Bug: https://bugzilla.redhat.com/1420327 + +Daniel Stenberg (9 Feb 2017) +- examples/multithread.c: link to our multi-thread docs + + ... instead of the OpenSSL mutex page. + +- http_proxy: avoid freeing static memory + + Follow up to 7fe81ec298e0: make sure 'host' is either NULL or malloced. + +- [Cameron MacMinn brought this change] + + http_proxy: Fix tiny memory leak upon edge case connecting to proxy + + Fixes #1255 + +Michael Kaufmann (8 Feb 2017) +- polarssl, mbedtls: Fix detection of pending data + + Reported-by: Dan Fandrich + Bug: https://curl.haxx.se/mail/lib-2017-02/0032.html + +Dan Fandrich (7 Feb 2017) +- test1139: Added the --manual keyword since the manual is required + +Daniel Stenberg (7 Feb 2017) +- RELEASE-NOTES: synced with 102454459dd688c + +- THANKS-filter: polish some recent contributors + +- http2: reset push header counter fixes crash + + When removing an easy handler from a multi before it completed its + transfer, and it had pushed streams, it would segfault due to the pushed + counted not being cleared. + + Fixed-by: zelinchen@users.noreply.github.com + Fixes #1249 + +- [Markus Westerlind brought this change] + + transfer: only retry nobody-requests for HTTP + + Using sftp to delete a file with CURLOPT_NOBODY set with a reused + connection would fail as curl expected to get some data. Thus it would + retry the command again which fails as the file has already been + deleted. + + Fixes #1243 + +Jay Satiro (7 Feb 2017) +- [Daniel Gustafsson brought this change] + + telnet: Fix typos + + Ref: https://github.com/curl/curl/pull/1245 + +- [Daniel Gustafsson brought this change] + + test552: Fix typos + + Closes https://github.com/curl/curl/pull/1245 + +- [Daniel Gustafsson brought this change] + + darwinssl: Avoid parsing certificates when not in verbose mode + + The information extracted from the server certificates in step 3 is only + used when in verbose mode, and there is no error handling or validation + performed as that has already been done. Only run the certificate + information extraction when in verbose mode and libcurl was built with + verbose strings. + + Closes https://github.com/curl/curl/pull/1246 + +- [JDepooter brought this change] + + schannel: Remove incorrect SNI disabled message + + - Remove the SNI disabled when host verification disabled message + since that is incorrect. + + - Show a message for legacy versions of Windows <= XP that connections + may fail since those versions of WinSSL lack SNI, algorithms, etc. + + Bug: https://github.com/curl/curl/pull/1240 + +Daniel Stenberg (7 Feb 2017) +- CHANGES: spell fix, use correct path to script + +- CHANGES.0: removed + + This is the previously manually edited changelog, not touched since Aug + 2015. Still present in git for those who wants it. + +Dan Fandrich (6 Feb 2017) +- cmdline-opts: Fixed build and test in out of source tree builds + +Viktor Szakats (6 Feb 2017) +- use *.sourceforge.io and misc URL updates + + Ref: https://sourceforge.net/blog/introducing-https-for-project-websites/ + Closes: https://github.com/curl/curl/pull/1247 + +Jay Satiro (6 Feb 2017) +- docs: Add more HTTPS proxy documentation + + - Document HTTPS proxy type. + + - Document --write-out %{proxy_ssl_verify_result}. + + - Document SOCKS proxy + HTTP/HTTPS proxy combination. + + HTTPS proxy support was added in 7.52.0 for OpenSSL, GnuTLS and NSS. + + Ref: https://github.com/curl/curl/commit/cb4e2be + +- OS400: Fix symbols + + - s/CURLOPT_SOCKS_PROXY/CURLOPT_PRE_PROXY + Follow-up to 7907a2b and 845522c. + + - Fix incorrect id for CURLOPT_PROXY_PINNEDPUBLICKEY. + + - Add id for CURLOPT_ABSTRACT_UNIX_SOCKET. + + Bug: https://github.com/curl/curl/issues/1237 + Reported-by: jonrumsey@users.noreply.github.com + +- [Sean Burford brought this change] + + cmake: Support curl --xattr when built with cmake + + - Test for and set HAVE_FSETXATTR when support for extended file + attributes is present. + + Closes https://github.com/curl/curl/pull/1176 + +- [Adam Langley brought this change] + + openssl: Don't use certificate after transferring ownership + + SSL_CTX_add_extra_chain_cert takes ownership of the given certificate + while, despite the similar name, SSL_CTX_add_client_CA does not. Thus + it's best to call SSL_CTX_add_client_CA before + SSL_CTX_add_extra_chain_cert, while the code still has ownership of the + argument. + + Closes https://github.com/curl/curl/pull/1236 + +Daniel Stenberg (29 Jan 2017) +- [Antoine Aubert brought this change] + + mbedtls: implement CTR-DRBG and HAVEGE random generators + + closes #1227 + +- docs: we no longer ship HTML versions of man pages + + ... refer to the web site for the web versions. + +- [railsnewbie257 brought this change] + + docs: proofread README.netware README.win32 + + Closes #1231 + +- RELEASE-NOTES; synced with ab08d82648 + +Michael Kaufmann (28 Jan 2017) +- mbedtls: disable TLS session tickets + + SSL session reuse with TLS session tickets is not supported yet. + Use SSL session IDs instead. + + See https://github.com/curl/curl/issues/1109 + +- gnutls: disable TLS session tickets + + SSL session reuse with TLS session tickets is not supported yet. + Use SSL session IDs instead. + + Fixes https://github.com/curl/curl/issues/1109 + +- polarssl: fix hangs + + This bugfix is similar to commit c111178bd4. + +Daniel Stenberg (27 Jan 2017) +- cookies: do not assume a valid domain has a dot + + This repairs cookies for localhost. + + Non-PSL builds will now only accept "localhost" without dots, while PSL + builds okeys everything not listed as PSL. + + Added test 1258 to verify. + + This was a regression brought in a76825a5efa6b4 + +- TODO: remove "Support TLS v1.3" + + Support is trickling in already. + +- [railsnewbie257 brought this change] + + INTERNALS.md: language improvements + + Closes #1226 + +- telnet: fix windows compiler warnings + + Thumbs-up-by: Jay Satiro + + Closes #1225 + +- VC: remove the makefile.vc6 build infra + + The winbuild/ build files is now the single MSVC makefile build choice. + + Closes #1215 + +- [Jay Satiro brought this change] + + cmdline-opts/gen.pl: Open input files in CRLF mode + + On Windows it's possible to have input files with CRLF line endings and + a perl that defaults to LF line endings (eg msysgit). Currently that + results in generator output of mixed line endings of CR, LF and CRLF. + + This change fixes that issue in the most succinct way by opening the + files in :crlf text mode even when the perl being used does not default + to that mode. (On operating systems that don't have a separate text mode + it's essentially a no-op.) The output continues to be in the perl's + native line ending. + +- docs/curl.1: generate from the cmdline-opts script + +- vtls: source indentation fix + +- contri*.sh: cut off parentheses from names too + +- RELEASE-NOTES: synced with 01ab7c30bba6f + +- vtls: fix PolarSSL non-blocking handling + + A regression brought in cb4e2be + + Reported-by: Michael Kaufmann + Bug: https://github.com/curl/curl/issues/1174#issuecomment-274018791 + +- [Antoine Aubert brought this change] + + vtls: fix mbedtls multi non blocking handshake. + + When using multi, mbedtls handshake is in non blocking mode. vtls must + set wait for read/write flags for the socket. + + Closes #1223 + +- [Richy Kim brought this change] + + CURLOPT_BUFFERSIZE: support enlarging receive buffer + + Replace use of fixed macro BUFSIZE to define the size of the receive + buffer. Reappropriate CURLOPT_BUFFERSIZE to include enlarging receive + buffer size. Upon setting, resize buffer if larger than the current + default size up to a MAX_BUFSIZE (512KB). This can benefit protocols + like SFTP. + + Closes #1222 + +- sws: use SOCKERRNO, not errno + + Reported-by: Gisle Vanem + +Michael Kaufmann (19 Jan 2017) +- KNOWN_BUGS: HTTP/2 server push enabled when no pushes can be accepted + + This has been implemented with commit 9ad034e. + +Viktor Szakats (19 Jan 2017) +- *.rc: escape non-ASCII/non-UTF-8 character for clarity + + Closes https://github.com/curl/curl/pull/1217 + +Kamil Dudka (19 Jan 2017) +- docs: non-blocking SSL handshake is now supported with NSS + + Implemented since curl-7_36_0-130-g8868a22 + + Reported-by: Fahim Chandurwala + +Michael Kaufmann (18 Jan 2017) +- CURLOPT_CONNECT_TO: Fix compile warnings + + Fix compile warnings that appeared only when curl has been configured + with '--disable-verbose'. + +Daniel Stenberg (18 Jan 2017) +- usercertinmem.c: improve the short description + +- parseurl: move back buffer to function scope + + Regression since 1d4202ad, which moved the buffer into a more narrow + scope, but the data in that buffer was used outside of that more narrow + scope. + + Reported-by: Dan Fandrich + Bug: https://curl.haxx.se/mail/lib-2017-01/0093.html + +Jay Satiro (17 Jan 2017) +- openssl: Fix random generation + + - Fix logic error in Curl_ossl_random. + + Broken a few days ago in 807698d. + +Daniel Stenberg (17 Jan 2017) +- TODO: share OpenSSL contexts + + By supporting this, subsequent connects would load a lot less data from + disk. + + Closes #1110 + +- bump: next release will be 7.53.0 + +Kamil Dudka (15 Jan 2017) +- nss: use the correct lock in nss_find_slot_by_name() + +Alessandro Ghedini (15 Jan 2017) +- http2: disable server push if not requested + + Ref: https://github.com/curl/curl/pull/1160 + +Daniel Stenberg (14 Jan 2017) +- [railsnewbie257 brought this change] + + docs: improved language in README.md HISTORY.md CONTRIBUTE.md + + Closes #1211 + +Alessandro Ghedini (14 Jan 2017) +- http: print correct HTTP string in verbose output when using HTTP/2 + + Before: + ``` + % src/curl https://sigsegv.ninja/ -v --http2 + ... + > GET / HTTP/1.1 + > Host: sigsegv.ninja + > User-Agent: curl/7.52.2-DEV + > Accept: */* + > + ... + ``` + + After: + ``` + % src/curl https://sigsegv.ninja/ -v --http2 + ... + > GET / HTTP/2 + > Host: sigsegv.ninja + > User-Agent: curl/7.52.2-DEV + > Accept: */* + > + ``` + +Daniel Stenberg (14 Jan 2017) +- TODO: send only part of --data + + Closes #1200 + +- TODO: implemened "--fail-fast to exit on first transfer fail" + + Even though it is called --fail-early + +- TODO: Chunked transfer multipart formpost + + Closes #1139 + +- TODO: Improve formpost API, not just add an easy argument + +- addrinfo: fix compiler warning on offsetof() use + + curl_addrinfo.c:519:20: error: conversion to ‘curl_socklen_t {aka + unsigned int}’ from ‘long unsigned int’ may alter its value + [-Werror=conversion] + + Follow-up to 1d786faee1046f + +- THANKS-filter: Jiri Malak + +- RELEASE-NOTES: synced with a7c73ae309c + +Peter Wu (13 Jan 2017) +- [Isaac Boukris brought this change] + + unix_socket: add support for abstract unix domain socket + + In addition to unix domain sockets, Linux also supports an + abstract namespace which is independent of the filesystem. + + In order to support it, add new CURLOPT_ABSTRACT_UNIX_SOCKET + option which uses the same storage as CURLOPT_UNIX_SOCKET_PATH + internally, along with a flag to specify abstract socket. + + On non-supporting platforms, the abstract address will be + interpreted as an empty string and fail gracefully. + + Also add new --abstract-unix-socket tool parameter. + + Signed-off-by: Isaac Boukris + Reported-by: Chungtsun Li (typeless) + Reviewed-by: Daniel Stenberg + Reviewed-by: Peter Wu + Closes #1197 + Fixes #1061 + +Daniel Stenberg (13 Jan 2017) +- write-out.d: 'time_total' is not always shown with ms precision + + We have higher resolution since 7.52.0 + +- next.d: --trace and --trace-ascii are also global + +- [Isaac Boukris brought this change] + + curl: reset the easy handle at --next + + So that only "global" options (verbose mostly) survive into the next + transfer, and the others have to be set again unless default is fine. + +- [Frank Gevaerts brought this change] + + docs: Add note about libcurl copying strings to CURLOPT_* manpages + + Closes #1169 + +- [Frank Gevaerts brought this change] + + CURLOPT_PREQUOTE.3: takes a struct curl_slist*, not a char* + +- IDN: Use TR46 non-transitional + + Assisted-by: Tim Rühsen + +- IDN: revert use of the transitional option + + It made the german ß get converted to ss, IDNA2003 style, and we can't + have that for the .de TLD - a primary reason for our switch to IDNA2008. + + Test 165 verifies. + +- [Tim Rühsen brought this change] + + IDN: Fix compile time detection of linidn2 TR46 + + Follow-up to f30cbcac1 + + Closes #1207 + +- [ERAMOTO Masaya brought this change] + + url: --noproxy option overrides NO_PROXY environment variable + + Under condition using http_proxy env var, noproxy list was the + combination of --noproxy option and NO_PROXY env var previously. Since + this commit, --noproxy option overrides NO_PROXY environment variable + even if use http_proxy env var. + + Closes #1140 + +- [ERAMOTO Masaya brought this change] + + url: Refactor detect_proxy() + + If defined CURL_DISABLE_HTTP, detect_proxy() returned NULL. If not + defined CURL_DISABLE_HTTP, detect_proxy() checked noproxy list. + + Thus refactor to set proxy to NULL instead of calling detect_proxy() if + define CURL_DISABLE_HTTP, and refactor to call detect_proxy() if not + define CURL_DISABLE_HTTP and the host is not in the noproxy list. + +- [ERAMOTO Masaya brought this change] + + url: Fix NO_PROXY env var to work properly with --proxy option. + + The combination of --noproxy option and http_proxy env var works well + both for proxied hosts and non-proxied hosts. + + However, when combining NO_PROXY env var with --proxy option, + non-proxied hosts are not reachable while proxied host is OK. + + This patch allows us to access non-proxied hosts even if using NO_PROXY + env var with --proxy option. + +- [Tim Rühsen brought this change] + + IDN: Use TR46 'transitional' for toASCII translations + + References: http://unicode.org/faq/idn.html + http://unicode.org/reports/tr46 + + Closes #1206 + +- [railsnewbie257 brought this change] + + docs: FAQ MAIL-ETIQUETTE language fixes + + Closes #1194 + +- [Marcus Hoffmann brought this change] + + gnutls: check for alpn and ocsp in configure + + Check for presence of gnutls_alpn_* and gnutls_ocsp_* functions during + configure instead of relying on the version number. GnuTLS has options + to turn these features off and we ca just work with with such builds + like we work with older versions. + + Signed-off-by: Marcus Hoffmann + + Closes #1204 + +Jay Satiro (12 Jan 2017) +- url: Fix parsing for when 'file' is the default protocol + + Follow-up to 3463408. + + Prior to 3463408 file:// hostnames were silently stripped. + + Prior to this commit it did not work when a schemeless url was used with + file as the default protocol. + + Ref: https://curl.haxx.se/mail/lib-2016-11/0081.html + Closes https://github.com/curl/curl/pull/1124 + + Also fix for drive letters: + + - Support --proto-default file c:/foo/bar.txt + + - Support file://c:/foo/bar.txt + + - Fail when a file:// drive letter is detected and not MSDOS/Windows. + + Bug: https://github.com/curl/curl/issues/1187 + Reported-by: Anatol Belski + Assisted-by: Anatol Belski + +Daniel Stenberg (12 Jan 2017) +- rand: make it work without TLS backing + + Regression introduced in commit f682156a4fc6c4 + + Reported-by: John Kohl + Bug: https://curl.haxx.se/mail/lib-2017-01/0055.html + +Jay Satiro (12 Jan 2017) +- STARTTLS: Don't print response character in denied messages + + Both IMAP and POP3 response characters are used internally, but when + appended to the STARTTLS denial message likely could confuse the user. + + Closes https://github.com/curl/curl/pull/1203 + +- smtp: Fix STARTTLS denied error message + + - Format the numeric denial code as an integer instead of a character. + +Daniel Stenberg (11 Jan 2017) +- http2_send: avoid unsigned integer wrap around + + ... when checking for a too large request. + +Jay Satiro (9 Jan 2017) +- [Jiri Malak brought this change] + + cmake: Fix passing _WINSOCKAPI_ macro to compiler + + Define _WINSOCKAPI_ blank rather than to 1 in order to match the value + used by Microsoft's winsock header files. + + Closes https://github.com/curl/curl/pull/1195 + +Daniel Stenberg (9 Jan 2017) +- sws: retry send() on EWOULDBLOCK + + Fixes spurious test 1060 and 1061 failures on OpenBSD, Solaris and more. + + Bug: https://curl.haxx.se/mail/lib-2017-01/0009.html + Reported-by: Christian Weisgerber + +- RELEASE-NOTES: synced with a41e8592d6b3e58 + +- examples: make the C++ examples follow our code style too + + At least mostly, not counting // comments. + +- [Aulddays brought this change] + + asiohiper: improved socket handling + + libcurl requires CURLMOPT_SOCKETFUNCTION to KEEP watching socket events + and notify back. Modify event_cb() to continue watching events when + fired. + + Fixes #1191 + Closes #1192 + Fixed-by: Mingliang Zhu + +- [Jiří Malák brought this change] + + lib506: fix build for Open Watcom + + Rename symbol lock to locks to not clash with OW CRTL function name. + + Closes #1196 + +- ROADMAP: 2017 cleanup + + Removed items already fixed, clarified a few others. + +- COPYING: update the generic copyright year range + +- docs/silent: mention --show-error in --silent description + + Reported in #1190 + Reported-by: Dan Jacobson + +- docs/page-header: mention how to disable the progress meter + + curl.1 is regenerated + + Fixes #1190 + +Dan Fandrich (7 Jan 2017) +- wolfssl: display negotiated SSL version and cipher + +- wolfssl: support setting cipher list + +Patrick Monnerat (6 Jan 2017) +- CIPHERS.md: document GSKit ciphers + +Jay Satiro (5 Jan 2017) +- [peterpih brought this change] + + TheArtOfHttpScripting: grammar + +Nick Zitzmann (3 Jan 2017) +- darwinssl: --insecure overrides --cacert if both settings are in use + + Fixes #1184 + +Jay Satiro (2 Jan 2017) +- docs/libcurl: TCP_KEEPALIVE start and interval default to 60 + + Since the TCP keep-alive options were added in 705f0f7 the start and + interval default values have been 60, but that wasn't documented. + + Bug: https://curl.haxx.se/mail/lib-2017-01/0000.html + Reported-by: Praveen Pvs + +Daniel Stenberg (29 Dec 2016) +- curl.h: CURLE_FUNCTION_NOT_FOUND is no longer in use + + This error code was once introduced when some library was dynamically + loaded and a funciton within said library couldn't be found. + +- content_encoding: change return code on a failure + + Failure to decompress is now a write error instead of the weird + "function not found". + +- page-footer: error 36 is protocol agnostic! + +Jay Satiro (28 Dec 2016) +- tool_operate: Fix --remote-time incorrect times on Windows + + - Use Windows API SetFileTime to set the file time instead of utime. + + Avoid utime on Windows if possible because it may apply a daylight + saving time offset to our UTC file time. + + Bug: https://curl.haxx.se/mail/archive-2016-11/0033.html + Reported-by: Tim + + Closes https://github.com/curl/curl/pull/1121 + +Daniel Stenberg (29 Dec 2016) +- [Max Khon brought this change] + + digest_sspi: copy terminating NUL as well + + Curl_auth_decode_digest_http_message(): copy terminating NUL as later + Curl_override_sspi_http_realm() expects a NUL-terminated string. + + Fixes #1180 + +- curl_formadd.3: CURLFORM_CONTENTSLENGTH not needed when chunked + + Mentioned in #1013 + +- [Kyselgov E.N brought this change] + + cmake: use crypt32.lib when building with OpenSSL on windows + + Reviewed-by: Peter Wu + Closes #1149 + Fixes #1147 + +- [Chris Araman brought this change] + + darwinssl: fix CFArrayRef leak + + Reviewed-by: Nick Zitzmann + Closes #1173 + +- [Chris Araman brought this change] + + darwinssl: fix iOS build + + Reviewed-by: Nick Zitzmann + Fixes #1172 + +- curl: remove superfluous include file + + The is a leftover from the past when TCP socket options + were set in this file. This include causes build issues on AIX 4.3. + + Reported-by: Kim Minjoong + + Closes #1178 + +- RELEASE-NOTES: synced with a7b38c9dc98481e + +- vtls: s/SSLEAY/OPENSSL + + Fixed an old leftover use of the USE_SSLEAY define which would make a + socket get removed from the applications sockets to monitor when the + multi_socket API was used, leading to timeouts. + + Bug: #1174 + +- docs/ciphers: link to our own new page about ciphers + + ... as the former ones always go stale! + +- cmdline-opts/page-footer: add three more exit codes + + ... and regenerated curl.1 + +- formdata: use NULL, not 0, when returning pointers + +- ftp: failure to resolve proxy should return that error code + +- configure: accept --with-libidn2 instead + + ... which the help text already implied since we switched to libidn2 + from libidn in commit 9c91ec778104ae3b back in October 2016. + + Reported-by: Christian Weisgerber + Bug: https://curl.haxx.se/mail/lib-2016-12/0110.html + +- test1282: verify the ftp-gss check + +- ftp-gss: check for init before use + + To avoid dereferencing a NULL pointer. + + Reported-by: Daniel Romero + +Jay Satiro (24 Dec 2016) +- build-wolfssl: Sync config with wolfSSL 3.10 + + wolfSSL configure script relevant changes from 3.9 to 3.10: + + - DES3 no longer enabled by default + - Shamir no longer enabled by default + - Extended master secret enabled by default + - RSA and ECC timing protections enabled by default + + For backwards compatibility I enabled DES3 and ECC shamir config options + (ie no change from 3.9), and the other changes are included. + +- cyassl: use time_t instead of long for timeout + +Daniel Stenberg (23 Dec 2016) +- bump: toward next release + +- http: remove "Curl_http_done: called premature" message + + ... it only confuses people. + +- openssl-random: check return code when asking for random + + and fail appropriately if it returns error + +- gnutls-random: check return code for failed random + +Version 7.52.1 (22 Dec 2016) + +Daniel Stenberg (22 Dec 2016) +- RELEASE-NOTES: curl 7.52.1 + +- lib557.c: use a shorter MAXIMIZE representation + + Since several compilers had problems with the previous one + + Reported-by: Ray Satiro + Bug: https://curl.haxx.se/mail/lib-2016-12/0098.html + +- runtests: remove the valgrind parser + + Old legacy parsing that 1) hid problems for us and 2) probably isn't + needed anymore. + +- [Kamil Dudka brought this change] + + randit: store the value in the buffer + +- tests/Makefile: run checksrc on debug builds + + ... just like we already do in src/ and lib/ + +- lib557: move the "enable LONGLINE" to allow more long lines + + This file is riddled with them... + +- bump: toward next release + +Marcel Raad (21 Dec 2016) +- lib: fix MSVC compiler warnings + + Visual C++ complained: + warning C4267: '=': conversion from 'size_t' to 'long', possible loss of data + warning C4701: potentially uninitialized local variable 'path' used + +Version 7.52.0 (20 Dec 2016) + +Daniel Stenberg (20 Dec 2016) +- THANKS: 13 new contributors from 7.52.0 + +- RELEASE-NOTES: 7.52.0 + +- ssh: inhibit coverity warning with (void) + + CID 1397391 (#1 of 1): Unchecked return value (CHECKED_RETURN) + +- Curl_recv_has_postponed_data: silence compiler warnings + + Follow-up to d00f2a8f2 + +Jay Satiro (19 Dec 2016) +- tests: checksrc compliance + +- http_proxy: Fix proxy CONNECT hang on pending data + + - Check for pending data before waiting on the socket. + + Bug: https://github.com/curl/curl/issues/1156 + Reported-by: Adam Langley + +Daniel Stenberg (19 Dec 2016) +- cmdline-opts/tlsv1.d: rephrased + +- [Dan McNulty brought this change] + + schannel: fix wildcard cert name validation on Win CE + + Fixes a few issues in manual wildcard cert name validation in + schannel support code for Win32 CE: + - when comparing the wildcard name to the hostname, the wildcard + character was removed from the cert name and the hostname + was checked to see if it ended with the modified cert name. + This allowed cert names like *.com to match the connection + hostname. This violates recommendations from RFC 6125. + - when the wildcard name in the certificate is longer than the + connection hostname, a buffer overread of the connection + hostname buffer would occur during the comparison of the + certificate name and the connection hostname. + +- printf: fix floating point buffer overflow issues + + ... and add a bunch of floating point printf tests + +- config-amigaos.h: (embarrassed) made the line shorter + +- config-amigaos.h: fix bug report email reference + +- RELEASE-NOTES: synced with 4517158abfeba + +- CIPHERS.md: backtick the names to show underscores fine + +- form-string.d: fix format mistake + + and regenerated curl.1 + + Reported-by: Gisle Vanem + +Michael Kaufmann (18 Dec 2016) +- openssl: simplify expression in Curl_ossl_version + +- curl_easy_recv: Improve documentation and example program + + Follow-up to 82245ea: Fix the example program sendrecv.c (handle + CURLE_AGAIN, handle incomplete send). Improve the documentation + for curl_easy_recv() and curl_easy_send(). + + Reviewed-by: Frank Meier + Assisted-by: Jay Satiro + + See https://github.com/curl/curl/pull/1134 + +- [Isaac Boukris brought this change] + + Curl_getconnectinfo: avoid checking if the connection is closed + + It doesn't benefit us much as the connection could get closed at + any time, and also by checking we lose the ability to determine + if the socket was closed by reading zero bytes. + + Reported-by: Michael Kaufmann + + Closes https://github.com/curl/curl/pull/1134 + +Daniel Stenberg (18 Dec 2016) +- CIPHERS.md: attempt to document TLS cipher names + + As the official docs seems really hard to keep track of and link to over + time + +- curl.1: generated after 6cce4dbf830 + +- cmdline-opts/post30X.d: fix the RFC references + +- curl.1: regenerated + + Fixed trailing whitespace and numerous formatting glitches + +- cmdline-opts: formatting fixes + +- curl_easy_setopt.3: removed CURLOPT_SOCKS_PROXYTYPE + +- tool_getparam.c: make comments use the up-to-date option names + +- manpage-scan.pl: allow deprecated options to get removed from curl.1 + + --krb4, --ftp-ssl and --ftp-ssl-reqd no longer need to be documented in the + man page + +- cmdline-opts/gen.pl: trim off trailing spaces + +- cmdline-opts/proxy-tlsuser.d: remove trailing .d + +- curl_easy_setopt.3: CURLOPT_PRE_PROXY instead of CURLOPT_SOCKS_PROXY + +- symbols: removed two, added one + +- cmdline-opts: include the man page split up files in the dist + +- curl.1: generated with gen.pl + + This is the first time we replace the manually edited curt.1 with the + generated one created by gen.pl and the individual option documentation + pages. + + Do not edit this file, edit the individual pages and regenerate this + output. + + This file will be generated by the build system soon and then removed + from git. + +- cmdline-opts: added some missing info + +- CURLINFO_SSL_VERIFYRESULT.3: language + +- HTTPS-PROXY docs: update/polish + +- cmdline-opts/page-header: mention it is generated + + ... to avoid people from trying to edit the pending curl.1 version that + gets generated by gen.pl + +- preproxy: renamed what was added as SOCKS_PROXY + + CURLOPT_SOCKS_PROXY -> CURLOPT_PRE_PROXY + + Added the corresponding --preroxy command line option. Sets a SOCKS + proxy to connect to _before_ connecting to a HTTP(S) proxy. + +- curl: normal socks proxies still use CURLOPT_PROXY + + ... the newly introduced CURLOPT_SOCKS_PROXY is special and should be + asked for specially. (Needs new code.) + + Unified proxy type to a single variable in the config struct. + +- CURLOPT_SOCKS_PROXYTYPE: removed + + This was added as part of the SOCKS+HTTPS proxy merge but there's no + need to support this as we prefer to have the protocol specified as a + prefix instead. + +- curl_multi_socket.3: fix typo + +- checksrc: warn for assignments within if() expressions + + ... they're already frowned upon in our source code style guide, this + now enforces the rule harder. + +- checksrc: stricter no-space-before-paren enforcement + + In order to make the code style more uniform everywhere + +- ISSUE_TEMPLATE: try mentioning known bugs/todo in new issue template + +- RELEASE-NOTES: synced with 71a55534fa6 + +- [Adam Langley brought this change] + + openssl: don't use OpenSSL's ERR_PACK. + + ERR_PACK is an internal detail of OpenSSL. Also, when using it, a + function name must be specified which is overly specific: the test will + break whenever OpenSSL internally change things so that a different + function creates the error. + + Closes #1157 + +Dan Fandrich (5 Dec 2016) +- test2032: Mark test as flaky + +Jay Satiro (3 Dec 2016) +- [Jeremy Pearson brought this change] + + libcurl-multi.3: typo + + Closes https://github.com/curl/curl/pull/1153 + +Dan Fandrich (2 Dec 2016) +- test1281: added http as a required feature + +Daniel Stenberg (2 Dec 2016) +- curl: support zero-length argument strings in config files + + ... like 'user-agent = ""' + + Adjusted test 71 to verify. + +- http_proxy: simplify CONNECT response reading + + Since it now reads responses one byte a time, a loop could be removed + and it is no longer limited to get the whole response within 16K, it is + now instead only limited to 16K maximum header line lengths. + +- tests: fix CONNECT test cases to be more strict + + ... as they broke with the cleaned up CONNECT handling + +- CONNECT: read responses one byte at a time + + ... so that it doesn't read data that is actually coming from the + remote. 2xx responses have no body from the proxy, that data is from the + peer. + + Fixes #1132 + +- CONNECT: reject TE or CL in 2xx responses + + A server MUST NOT send any Transfer-Encoding or Content-Length header + fields in a 2xx (Successful) response to CONNECT. (RFC 7231 section + 4.3.6) + + Also fixes the three test cases that did this. + +- URL parser: reject non-numerical port numbers + + Test 1281 added to verify + +Dan Fandrich (30 Nov 2016) +- runtests: made Servers: output be more consistent by removing OFF + +- cyassl: fixed typo introduced in 4f8b1774 + +Michael Kaufmann (30 Nov 2016) +- CURLOPT_CONNECT_TO: Skip non-matching "connect-to" entries properly + + If a port number in a "connect-to" entry does not match, skip this + entry instead of connecting to port 0. + + If a port number in a "connect-to" entry matches, use this entry + and look no further. + + Reported-by: Jay Satiro + Assisted-by: Jay Satiro, Daniel Stenberg + + Closes #1148 + +Daniel Stenberg (29 Nov 2016) +- BUGS: describe bug handling process + +- RELEASE-NOTES: synced with 19613fb3 + +Jay Satiro (28 Nov 2016) +- http2: check nghttp2_session_set_local_window_size exists + + The function only exists since nghttp2 1.12.0. + + Bug: https://github.com/curl/curl/commit/a4d8888#commitcomment-19985676 + Reported-by: Michael Kaufmann + +Daniel Stenberg (28 Nov 2016) +- [Anders Bakken brought this change] + + http2: Fix crashes when parent stream gets aborted + + Closes #1125 + +- cmdline-docs: more options converted and fixed + + Now all options are in the new system. + +- gen: include footer in mainpage output + +Jay Satiro (28 Nov 2016) +- lib1536: checksrc compliance + +Daniel Stenberg (28 Nov 2016) +- cmdline-opts: more command line options documented + + Moved over to the new format + +- curl: remove --proxy-ssl* options + + There's mostly likely no need to allow setting SSLv2/3 version for HTTPS + proxy. Those protocols are insecure by design and deprecated. + +- CURLOPT_PROXY_*.3: polished some proxy option man pages + +Patrick Monnerat (26 Nov 2016) +- os400: support CURLOPT_PROXY_PINNEDPUBLICKEY + + Also define it in ILE/RPG binding. + +Daniel Stenberg (26 Nov 2016) +- [Okhin Vasilij brought this change] + + curl_version_info: add CURL_VERSION_HTTPS_PROXY + + Closes #1142 + +- [Frank Gevaerts brought this change] + + tests: Add some testcases for recent new features. + + Add missing tests for CURLINFO_SCHEME, CURLINFO_PROTOCOL, %{scheme}, + and %{http_version} + + closes #1143 + +- [Frank Gevaerts brought this change] + + curl_easy_reset: clear info for CULRINFO_PROTOCOL and CURLINFO_SCHEME + +- CURLOPT_PROXY_CAINFO.3: clarify proxy use + +- CURLOPT_PROXY_CRLFILE.3: clarify https proxy and availability + +- curl_easy_setopt.3: add CURLOPT_PROXY_PINNEDPUBLICKEY + + Follow-up to 4f8b17743d7c55a + +- docs: include all opts man pages in dist + + Sorted the lists too. + + ... and include the new ones in the PDF and HTML generation targets + +- [Thomas Glanzmann brought this change] + + HTTPS Proxy: Implement CURLOPT_PROXY_PINNEDPUBLICKEY + +- [Thomas Glanzmann brought this change] + + url: proxy: Use 443 as default port for https proxies + +- TODO: removed "HTTPS proxy" + +- [Jan-E brought this change] + + winbuild: add config option ENABLE_NGHTTP2 + + Closes #1141 + +Jay Satiro (24 Nov 2016) +- tool_urlglob: Improve sanity check in glob_range + + Prior to this change we depended on errno if strtol could not perform a + conversion. POSIX says EINVAL *may* be set. Some implementations like + Microsoft's will not set it if there's no conversion. + + Ref: https://github.com/curl/curl/commit/ee4f7660#commitcomment-19658189 + +- tool_help: Change description for --retry-connrefused + + Ref: https://github.com/curl/curl/pull/1064#issuecomment-260052409 + +Patrick Monnerat (25 Nov 2016) +- os400: sync ILE/RPG binding + +Jay Satiro (24 Nov 2016) +- test1135: Fix curl_easy_duphandle prototype for code style + + Follow-up to dbadaeb which changed the style. + +- x509asn1: Restore the parameter check in Curl_getASN1Element + + - Restore the removed parts of the parameter check. + + Follow-up to 945f60e which altered the parameter check. + +Daniel Stenberg (25 Nov 2016) +- RELEASE-NOTES: update option counters + +- [Frank Gevaerts brought this change] + + add CURLINFO_SCHEME, CURLINFO_PROTOCOL, and %{scheme} + + Adds access to the effectively used protocol/scheme to both libcurl and + curl, both in string and numeric (CURLPROTO_*) form. + + Note that the string form will be uppercase, as it is just the internal + string. + + As these strings are declared internally as const, and all other strings + returned by curl_easy_getinfo() are de-facto const as well, string + handling in getinfo.c got const-ified. + + Closes #1137 + +- RELEASE-NOTES: synced with 63198a4750aeb + +- curl.1: the new --proxy options ship in 7.52.0 + +- checksrc: move open braces to comply with function declaration style + +- checksrc: detect wrongly placed open braces in func declarations + +- checksrc: white space edits to comply to stricter checksrc + +- checksrc: verify ASTERISKNOSPACE + + Detects (char*) and 'char*foo' uses. + +- checksrc: code style: use 'char *name' style + +- checksrc: add ASTERISKSPACE + + Verifies a 'char *name' style, with no space after the asterisk. + +- openssl: remove dead code + + Coverity CID 1394666 + +- [Okhin Vasilij brought this change] + + HTTPS-proxy: fixed mbedtls and polishing + +- darwinssl: adopted to the HTTPS proxy changes + + It builds and runs all test cases. No adaptations for actual HTTPS proxy + support has been made. + +- gtls: fix indent to silence compiler warning + + vtls/gtls.c: In function ‘Curl_gtls_data_pending’: + vtls/gtls.c:1429:3: error: this ‘if’ clause does not guard... [-Werror=misleading-indentation] + if(conn->proxy_ssl[connindex].session && + ^~ + vtls/gtls.c:1433:5: note: ...this statement, but the latter is misleadingly indented as if it is guarded by the ‘if’ + return res; + +- [Thomas Glanzmann brought this change] + + mbedtls: Fix compile errors + +- [Alex Rousskov brought this change] + + proxy: Support HTTPS proxy and SOCKS+HTTP(s) + + * HTTPS proxies: + + An HTTPS proxy receives all transactions over an SSL/TLS connection. + Once a secure connection with the proxy is established, the user agent + uses the proxy as usual, including sending CONNECT requests to instruct + the proxy to establish a [usually secure] TCP tunnel with an origin + server. HTTPS proxies protect nearly all aspects of user-proxy + communications as opposed to HTTP proxies that receive all requests + (including CONNECT requests) in vulnerable clear text. + + With HTTPS proxies, it is possible to have two concurrent _nested_ + SSL/TLS sessions: the "outer" one between the user agent and the proxy + and the "inner" one between the user agent and the origin server + (through the proxy). This change adds supports for such nested sessions + as well. + + A secure connection with a proxy requires its own set of the usual SSL + options (their actual descriptions differ and need polishing, see TODO): + + --proxy-cacert FILE CA certificate to verify peer against + --proxy-capath DIR CA directory to verify peer against + --proxy-cert CERT[:PASSWD] Client certificate file and password + --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) + --proxy-ciphers LIST SSL ciphers to use + --proxy-crlfile FILE Get a CRL list in PEM format from the file + --proxy-insecure Allow connections to proxies with bad certs + --proxy-key KEY Private key file name + --proxy-key-type TYPE Private key file type (DER/PEM/ENG) + --proxy-pass PASS Pass phrase for the private key + --proxy-ssl-allow-beast Allow security flaw to improve interop + --proxy-sslv2 Use SSLv2 + --proxy-sslv3 Use SSLv3 + --proxy-tlsv1 Use TLSv1 + --proxy-tlsuser USER TLS username + --proxy-tlspassword STRING TLS password + --proxy-tlsauthtype STRING TLS authentication type (default SRP) + + All --proxy-foo options are independent from their --foo counterparts, + except --proxy-crlfile which defaults to --crlfile and --proxy-capath + which defaults to --capath. + + Curl now also supports %{proxy_ssl_verify_result} --write-out variable, + similar to the existing %{ssl_verify_result} variable. + + Supported backends: OpenSSL, GnuTLS, and NSS. + + * A SOCKS proxy + HTTP/HTTPS proxy combination: + + If both --socks* and --proxy options are given, Curl first connects to + the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS + proxy. + + TODO: Update documentation for the new APIs and --proxy-* options. + Look for "Added in 7.XXX" marks. + +Patrick Monnerat (24 Nov 2016) +- Declare endian read functions argument as a const pointer. + This is done for all functions of the form Curl_read[136][624]_[lb]e. + +- Limit ASN.1 structure sizes to 256K. Prevent some allocation size overflows. + See CRL-01-006. + +Jay Satiro (22 Nov 2016) +- url: Fix conn reuse for local ports and interfaces + + - Fix connection reuse for when the proposed new conn 'needle' has a + specified local port but does not have a specified device interface. + + Bug: https://curl.haxx.se/mail/lib-2016-11/0137.html + Reported-by: bjt3[at]hotmail.com + +Daniel Stenberg (21 Nov 2016) +- rand: pass in number of randoms as an unsigned argument + +Jay Satiro (20 Nov 2016) +- rand: Fix potentially uninitialized result warning + +Marcel Raad (19 Nov 2016) +- vtls: fix build warnings + + Fix warnings about conversions from long to time_t in openssl.c and + schannel.c. + + Follow-up to de4de4e3c7c + +Daniel Stenberg (18 Nov 2016) +- [Marcel Raad brought this change] + + lib: fix compiler warnings after de4de4e3c7c + + Visual C++ now complains about implicitly casting time_t (64-bit) to + long (32-bit). Fix this by changing some variables from long to time_t, + or explicitly casting to long where the public interface would be + affected. + + Closes #1131 + +Peter Wu (17 Nov 2016) +- [Isaac Boukris brought this change] + + Don't mix unix domain sockets with regular ones + + When reusing a connection, make sure the unix domain + socket option matches. + +Jay Satiro (17 Nov 2016) +- tests: Fix HTTP2-Settings header for huge window size + + Follow-up to a4d8888. Changing the window size in that commit resulted + in a different HTTP2-Settings upgrade header, causing test 1800 to fail. + +- http2: Use huge HTTP/2 windows + + - Improve performance by using a huge HTTP/2 window size. + + Bug: https://github.com/curl/curl/issues/1102 + Reported-by: afrind@users.noreply.github.com + Assisted-by: Tatsuhiro Tsujikawa + +Daniel Stenberg (16 Nov 2016) +- cmdline-docs: more conversion + +- gen: support 'protos' + + and warn on unrecognized lines + +- gen: support 'single' to make an individual page man page + +- cmdline-docs: more options converted over + +- gen: support 'redirect' + + ... and warn for too long --help lines + +- cmdline/gen: replace options in texts better + +Jay Satiro (16 Nov 2016) +- http2: Fix address sanitizer memcpy warning + + - In Curl_http2_switched don't call memcpy when src is NULL. + + Curl_http2_switched can be called like: + + Curl_http2_switched(conn, NULL, 0); + + .. and prior to this change memcpy was then called like: + + memcpy(dest, NULL, 0) + + .. causing address sanitizer to warn: + + http2.c:2057:3: runtime error: null pointer passed as argument 2, which + is declared to never be null + +- tool_help: Clarify --dump-header only writes received headers + +- curl.1: Clarify --dump-header only writes received headers + +Daniel Stenberg (15 Nov 2016) +- [Alex Chan brought this change] + + docs: Spelling fixes + +Kamil Dudka (15 Nov 2016) +- docs: the next release will be 7.52.0 + +Daniel Stenberg (15 Nov 2016) +- cmdline-opts: support generating the --help output + +- [David Schweikert brought this change] + + darwinssl: fix SSL client certificate not found on MacOS Sierra + + Reviewed-by: Nick Zitzmann + + Closes #1105 + +- curl: add --fail-early to help output + + Fixes test 1139 failures + + Follow-up to f82bbe01c8835 + +- glob: fix [a-c] globbing regression + + Brought in ee4f76606cf + + Added test case 1280 to verify + + Reported-by: Dave Reisner + + Bug: https://github.com/curl/curl/commit/ee4f76606cfa4ee068bf28edd37c8dae7e8db317#commitcomment-19823146 + +- curl: add --fail-early + + Exit with an error on the first transfer error instead of continuing to + do the rest of the URLs. + + Discussion: https://curl.haxx.se/mail/archive-2016-11/0038.html + +- Curl_rand: fixed and moved to rand.c + + Now Curl_rand() is made to fail if it cannot get the necessary random + level. + + Changed the proto of Curl_rand() slightly to provide a number of ints at + once. + + Moved out from vtls, since it isn't a TLS function and vtls provides + Curl_ssl_random() for this to use. + + Discussion: https://curl.haxx.se/mail/lib-2016-11/0119.html + +- cmdline-opts: first test version of a new man page generator kit + + See MANPAGE.md for the description of how this works. Each command line + option is now described in a separate .d file. + +- time_t fix: follow-up to de4de4e3c7c + + Blah, I accidentally wrote size_t instead of time_t for two variables. + + Reported-by: Dave Reisner + +- timeval: prefer time_t to hold seconds instead of long + + ... as long is still 32bit on modern 64bit windows machines, while + time_t is generally 64bit. + +Dan Fandrich (12 Nov 2016) +- tests: fixed variable might be clobbered warning + + This stops the compiler from potentially making invalid assumptions + about the immutability of sdp and sap across the longjmp boundary. + +Daniel Stenberg (12 Nov 2016) +- RELEASE-NOTES: synced with 346340808c + +- URL-parser: for file://[host]/ URLs, the [host] must be localhost + + Previously, the [host] part was just ignored which made libcurl accept + strange URLs misleading users. like "file://etc/passwd" which might've + looked like it refers to "/etc/passwd" but is just "/passwd" since the + "etc" is an ignored host name. + + Reported-by: Mike Crowe + Assisted-by: Kamil Dudka + +- test558: adapt to 0649433da + +- openssl: make sure to fail in the unlikely event that PRNG seeding fails + +- openssl: avoid unnecessary seeding if already done + + 1.1.0+ does more of this by itself so we can avoid extra processing this + way. + +- openssl: RAND_status always exists in OpenSSL >= 0.9.7 + + and remove RAND_screen from configure since nothing is using that + function + +- Curl_pgrsUpdate: use dedicated function for time passed + +- realloc: use Curl_saferealloc to avoid common mistakes + + Discussed: https://curl.haxx.se/mail/lib-2016-11/0087.html + +- [Daniel Hwang brought this change] + + curl: Add --retry-connrefused + + to consider ECONNREFUSED as a transient error. + + Closes #1064 + +- openssl: raise the max_version to 1.3 if asked for + + Now I've managed to negotiate TLS 1.3 with https://enabled.tls13.com/ when + using boringssl. + +Jay Satiro (9 Nov 2016) +- vtls: Fail on unrecognized param for CURLOPT_SSLVERSION + + - Fix GnuTLS code for CURL_SSLVERSION_TLSv1_2 that broke when the + TLS 1.3 support was added in 6ad3add. + + - Homogenize across code for all backends the error message when TLS 1.3 + is not available to ": TLS 1.3 is not yet supported". + + - Return an error when a user-specified ssl version is unrecognized. + + --- + + Prior to this change our code for some of the backends used the + 'default' label in the switch statement (ie ver unrecognized) for + ssl.version and treated it the same as CURL_SSLVERSION_DEFAULT. + + Bug: https://curl.haxx.se/mail/lib-2016-11/0048.html + Reported-by: Kamil Dudka + +Daniel Stenberg (9 Nov 2016) +- [Isaac Boukris brought this change] + + SPNEGO: Fix memory leak when authentication fails + + If SPNEGO fails, cleanup the negotiate handle right away. + + Fixes #1115 + + Signed-off-by: Isaac Boukris + Reported-by: ashman-p + +- CODE_STYLE.md: link to INTERNALS.md correctly + +- bump: next version will be 7.52.0 + +- RELEASE-NOTES: synced with dfcdaaba371e9a3 + +- examples/fileupload.c: fclose the file as well + +- printf: fix ".*f" handling + + It would always use precision 1 instead of reading it from the argument + list as intended. + + Reported-by: Ray Satiro + + Bug: #1113 + +- curl_formadd.3: *_FILECONTENT and *_FILE need the file to be kept + + Reported-by: Frank Gevaerts + +Kamil Dudka (7 Nov 2016) +- nss: silence warning 'SSL_NEXT_PROTO_EARLY_VALUE not handled in switch' + + ... with nss-3.26.0 and newer + + Reported-by: Daniel Stenberg + +Daniel Stenberg (7 Nov 2016) +- openssl: initial TLS 1.3 adaptions + + BoringSSL supports TLSv1.3 already, but these changes don't seem to be anough + to get it working. + +- ssh: check md5 fingerprints case insensitively (regression) + + Revert the change from ce8d09483eea but use the new function + + Reported-by: Kamil Dudka + Bug: https://github.com/curl/curl/commit/ce8d09483eea2fcb1b50e323e1a8ed1f3613b2e3#commitcomment-19666146 + +Kamil Dudka (7 Nov 2016) +- curl: introduce the --tlsv1.3 option to force TLS 1.3 + + Fully implemented with the NSS backend only for now. + + Reviewed-by: Ray Satiro + +- vtls: support TLS 1.3 via CURL_SSLVERSION_TLSv1_3 + + Fully implemented with the NSS backend only for now. + + Reviewed-by: Ray Satiro + +- nss: map CURL_SSLVERSION_DEFAULT to NSS default + + ... but make sure we use at least TLSv1.0 according to libcurl API + + Reported-by: Cure53 + Reviewed-by: Ray Satiro + +Daniel Stenberg (7 Nov 2016) +- s/cURL/curl + + We're mostly saying just "curl" in lower case these days so here's a big + cleanup to adapt to this reality. A few instances are left as the + project could still formally be considered called cURL. + +Jay Satiro (7 Nov 2016) +- [Tatsuhiro Tsujikawa brought this change] + + http2: Don't send header fields prohibited by HTTP/2 spec + + Previously, we just ignored "Connection" header field. But HTTP/2 + specification actually prohibits few more header fields. This commit + ignores all of them so that we don't send these bad header fields. + + Bug: https://curl.haxx.se/mail/archive-2016-10/0033.html + Reported-by: Ricki Hirner + + Closes https://github.com/curl/curl/pull/1092 + +Daniel Stenberg (7 Nov 2016) +- curl.1: explain the SMTP data expected for -T + + Fixes #1107 + + Reported-by: Adam Piggott + +Peter Wu (6 Nov 2016) +- cmake: disable poll for macOS + + Mirrors the autotools behavior introduced with curl-7_50_3-83-ga34c7ce. + + Fixes #1089 + +Jay Satiro (5 Nov 2016) +- easy: Initialize info variables on easy init and duphandle + + - Call Curl_initinfo on init and duphandle. + + Prior to this change the statistical and informational variables were + simply zeroed by calloc on easy init and duphandle. While zero is the + correct default value for almost all info variables, there is one where + it isn't (filetime initializes to -1). + + Bug: https://github.com/curl/curl/issues/1103 + Reported-by: Neal Poole + +Daniel Stenberg (5 Nov 2016) +- [Mauro Rappa brought this change] + + curl -w: added more decimal digits to timing counters + + Now showing microsecond resolution. + + Closes #1106 + +Jakub Zakrzewski (4 Nov 2016) +- dist: add CMakeLists.txt to the tarball + +Daniel Stenberg (4 Nov 2016) +- mbedtls: fix build with mbedtls versions < 2.4.0 + + Regression added in 62a8095e714 + + Reported-by: Tony Kelman + + Discussed in #1087 + +- configure: verify that compiler groks -Werror=partial-availability + + Reported-by: bemoody + + Fixes #1104 + +- docs: shorten and simplify the top comment in multi-uv.c + + and change URL to use https + +- [Andrei Sedoi brought this change] + + docs: handle CURL_POLL_INOUT in multi-uv example + +- [Andrei Sedoi brought this change] + + docs: multi-uv: don't use CURLMsg after cleanup + +- [Andrei Sedoi brought this change] + + docs: remove unused variables in multi-uv example + +- bump: start working on 7.51.1 + +- winbuild: remove strcase.obj from curl build + + Reported-by: Bruce Stephens + + Fixes #1098 + +Dan Fandrich (2 Nov 2016) +- msvc: removed a straggling reference to strequal.c + + Follow-up to 502acba2 + +Version 7.51.0 (2 Nov 2016) + +Daniel Stenberg (2 Nov 2016) +- THANKS: synced with 7.51.0 + +- RELEASE-NOTES: 7.51.0 + +- ftp_done: don't clobber the passed in error code + + Coverity CID 1374359 pointed out the unused result value. + +- ftp: remove dead code in ftp_done + + Coverity CID 1374358 + +Jay Satiro (1 Nov 2016) +- generate.bat: Include include/curl in libcurl VS projects + + .. because including those headers helps Visual Studio's Intellisense. + +- generate.bat: Remove strcase.[ch] from curl tool VS projects + + ..because they're no longer needed in the tool build. strcase is still + built by the libcurl project and exports curl_str(n)equal which is used + by the curl tool. + + Bug: https://github.com/curl/curl/commit/9363f1a#all_commit_comments + +Daniel Stenberg (2 Nov 2016) +- metalink: simplify the hex parsing function + + ... and now it avoids using the libcurl toupper() function + +Michael Kaufmann (1 Nov 2016) +- file: fix compiler warning + + follow-up to 46133aa5 + +Dan Fandrich (1 Nov 2016) +- strcase: fixed Metalink builds by redefining checkprefix() + + ...to use the public function curl_strnequal(). This isn't ideal because + it adds extra overhead to any internal calls to checkprefix. + + follow-up to 95bd2b3e + +Daniel Stenberg (1 Nov 2016) +- curl.1: typo + +- curl.1: expand on how multiple uses of -o looks + + Suggested-by: Dan Jacobson + Issue: https://github.com/curl/curl/issues/1097 + +- tests/util: get a private strncasecompare clone + + ... since the curlx_* code no longer provides one and we don't link + libcurl to these test servers. + +- strcase: make the tool use curl_str[n]equal instead + + As they are after all part of the public API. Saves space and reduces + complexity. Remove the strcase defines from the curlx_ family. + + Suggested-by: Dan Fandrich + Idea: https://curl.haxx.se/mail/lib-2016-10/0136.html + +Kamil Dudka (31 Oct 2016) +- gskit, nss: do not include strequal.h + + follow-up to 811a693b80 + +Dan Fandrich (31 Oct 2016) +- strcasecompare: include curl.h in strcase.c + + This should fix the "warning: 'curl_strequal' redeclared without + dllimport attribute: previous dllimport ignored" message and subsequent + link error on Windows because of the missing CURL_EXTERN on the + prototype. + +Daniel Stenberg (31 Oct 2016) +- strcase: fix the remaining rawstr users + +- msvc builds: s/rawstr/strcase + + Follow-up to 811a693b + +Dan Fandrich (31 Oct 2016) +- strcasecompare: replaced remaining rawstr.h with strcase.h + + This is a followup to commit 811a693b + +Marcel Raad (31 Oct 2016) +- digest_sspi: fix include + + Fix compile break from 811a693b80 + +Dan Fandrich (31 Oct 2016) +- libauthretry: use the external function curl_strequal + + The internal version strcasecompare isn't available outside libcurl + +Daniel Stenberg (31 Oct 2016) +- RELEASE-NOTES: synced with d14538d2501ef0da + +- configure: raise the default minimum version for macos to 10.8 + + follow-up to 4f8d0b6f02aa7043. Since the darwinssl code breaks + otherwise. If you build without darwinssl 10.5 works fine. + +- unit1301: keep testing curl_strequal + + as that is still part of the API, fix from 8fe4bd084412f30 + +- ldap: fix include + + Fix bug from 811a693b80 + +- url: remove unconditional idn2.h include + + Mistake brought by 9c91ec778104a + +- curl_strequal: part of public API/ABI, needs to be kept + + These two public functions have been mentioned as deprecated since a + very long time but since they are still part of the API and ABI we need + to keep them around. + +- strcase: s/strequal/strcasecompare + + some more follow-ups to 811a693b80 + +- ldap: fix strcase use + + follow-up to 811a693b80 + +- test165: adapted to the libidn2 use and IDNA2008 fix + +- cookie: replace use of fgets() with custom version + + ... that will ignore lines that are too long to fit in the buffer. + + CVE-2016-8615 + + Bug: https://curl.haxx.se/docs/adv_20161102A.html + Reported-by: Cure53 + +- strcasecompare: all case insensitive string compares ignore locale now + + We had some confusions on when each function was used. We should not act + differently on different locales anyway. + +- strcasecompare: is the new name for strequal() + + ... to make it less likely that we forget that the function actually + does case insentive compares. Also replaced several invokes of the + function with a plain strcmp when case sensitivity is not an issue (like + comparing with "-"). + +- ftp: check for previous patch must be case sensitive! + + ... otherwise example.com/PATH and example.com/path would be assumed to + be the same and they usually aren't! + +- SSH: check md5 fingerprint case sensitively + +- connectionexists: use case sensitive user/password comparisons + + CVE-2016-8616 + + Bug: https://curl.haxx.se/docs/adv_20161102B.html + Reported-by: Cure53 + +- base64: check for integer overflow on large input + + CVE-2016-8617 + + Bug: https://curl.haxx.se/docs/adv_20161102C.html + Reported-by: Cure53 + +- krb5: avoid realloc(0) + + If the requested size is zero, bail out with error instead of doing a + realloc() that would cause a double-free: realloc(0) acts as a free() + and then there's a second free in the cleanup path. + + CVE-2016-8619 + + Bug: https://curl.haxx.se/docs/adv_20161102E.html + Reported-by: Cure53 + +- aprintf: detect wrap-around when growing allocation + + On 32bit systems we could otherwise wrap around after 2GB and allocate 0 + bytes and crash. + + CVE-2016-8618 + + Bug: https://curl.haxx.se/docs/adv_20161102D.html + Reported-by: Cure53 + +- range: reject char globs with missing end like '[L-]' + + ... which previously would lead to out of boundary reads. + + Reported-by: Luật Nguyễn + +- glob_next_url: make sure to stay within the given output buffer + +- range: prevent negative end number in a glob range + + CVE-2016-8620 + + Bug: https://curl.haxx.se/docs/adv_20161102F.html + Reported-by: Luật Nguyễn + +- parsedate: handle cut off numbers better + + ... and don't read outside of the given buffer! + + CVE-2016-8621 + + bug: https://curl.haxx.se/docs/adv_20161102G.html + Reported-by: Luật Nguyễn + +- escape: avoid using curl_easy_unescape() internally + + Since the internal Curl_urldecode() function has a better API. + +- unescape: avoid integer overflow + + CVE-2016-8622 + + Bug: https://curl.haxx.se/docs/adv_20161102H.html + Reported-by: Cure53 + +- cookies: getlist() now holds deep copies of all cookies + + Previously it only held references to them, which was reckless as the + thread lock was released so the cookies could get modified by other + handles that share the same cookie jar over the share interface. + + CVE-2016-8623 + + Bug: https://curl.haxx.se/docs/adv_20161102I.html + Reported-by: Cure53 + +- TODO: remove IDNA2008 + +- idn: switch to libidn2 use and IDNA2008 support + + CVE-2016-8625 + + Bug: https://curl.haxx.se/docs/adv_20161102K.html + Reported-by: Christian Heimes + +- test1246: verify URL parsing with host name ending with '#' + +- urlparse: accept '#' as end of host name + + 'http://example.com#@127.0.0.1/x.txt' equals a request to example.com + for the '/' document with the rest of the URL being a fragment. + + CVE-2016-8624 + + Bug: https://curl.haxx.se/docs/adv_20161102J.html + Reported-by: Fernando Muñoz + +Jay Satiro (31 Oct 2016) +- INTERNALS: better markdown (follow-up) + + - Wrap more words with underscores in backticks. + + Follow-up to 13f4913. + +Daniel Stenberg (30 Oct 2016) +- INTERNALS: better markdown + + words with underscore need to be within `these` + + Bug: https://github.com/curl/curl-www/issues/19 + Reported-by : Jay Satiro + +Jay Satiro (30 Oct 2016) +- mk-ca-bundle.vbs: Fix UTF-8 output + + - Change initial message box to mention delay when downloading/parsing. + + Since there is no progress meter it was somewhat unexpected that after + choosing a filename nothing appears to happen, when actually the cert + data is in the process of being downloaded and parsed. + + - Warn if OpenSSL is not present. + + - Use a UTF-8 stream to make the ca-bundle data. + + - Save the UTF-8 ca-bundle stream as binary so that no BOM is added. + + --- + + This is a follow-up to d2c6d15 which switched mk-ca-bundle.vbs output to + ANSI due to corrupt UTF-8 output, now fixed. + + This change completes making the default certificate bundle output of + mk-ca-bundle.vbs as close as possible to that of mk-ca-bundle.pl, which + should make it easier to review any difference between their output. + + Ref: https://github.com/curl/curl/pull/1012 + +Daniel Stenberg (28 Oct 2016) +- BINDINGS: converted to markdown + + To make it render better on the web site, at the price of it becoming + slightly less readable as text. + +Jay Satiro (27 Oct 2016) +- CURLMOPT_MAX_PIPELINE_LENGTH.3: Clarify it's not for HTTP/2 + + - Clarify that this option is only for HTTP/1.1 pipelining. + + Bug: https://github.com/curl/curl/issues/1059 + Reported-by: Jeroen Ooms + + Assisted-by: Daniel Stenberg + +Daniel Stenberg (27 Oct 2016) +- KNOWN_BUGS: HTTP/2 server push enabled when no pushes can be accepted + + Closes #927 + +- KNOWN_BUGS: c-ares deviates from stock resolver on http://1346569778 + + Closes #893 + +Michael Osipov (27 Oct 2016) +- configure.in: Fix test syntax + + Some versions of test allow == for equality, but others (such as the HP-UX + version) do not. Use a single = for correctness. + + Error output: + checking for monotonic clock_gettime... ./configure[20445]: ==: A test command parameter is not valid. + +Daniel Stenberg (27 Oct 2016) +- SECURITY: minor updates + + - we allow the security push up to 48 hours before the release + + - add a mention about possible pre-notifications + + - lower case the 'curl-security' title + +- [Andrei Sedoi brought this change] + + docs: fix req->data in multi-uv example + + Closes #1088 + +- mbedtls: stop using deprecated include file + + Reported-by: wyattoday + Fixes #1087 + +Kamil Dudka (25 Oct 2016) +- [Martin Frodl brought this change] + + nss: fix tight loop in non-blocking TLS handhsake over proxy + + ... in case the handshake completes before entering + CURLM_STATE_PROTOCONNECT + + Bug: https://bugzilla.redhat.com/1388162 + +Jay Satiro (25 Oct 2016) +- mk-ca-bundle: Update the vbscript version + + Bring the VBScript version more in line with the perl version: + + - Change timestamp to UTC. + + - Change URL retrieval to HTTPS-only by default. + + - Comment out the options that disabled SSL cert checking by default. + + - Assume OpenSSL is present, get SHA256. And add a flag to toggle it. + + - Fix cert issuer name output. + + The cert issuer output is now ansi, converted from UTF-8. Prior to this + it was corrupt UTF-8. It turns out though we can work with UTF-8 the + FSO object that writes ca-bundle can't write UTF-8, so there will have + to be some alternative if UTF-8 is needed (like an ADODB.Stream). + + - Disable the certificate text info feature. + + The certificate text info doesn't work properly with any recent OpenSSL. + +Daniel Stenberg (24 Oct 2016) +- TODO: indent code to make it render properly + +- TODO: Remove the generated include file + +- TODO: add "--retry should resume" + + See #1084 + +- mk-ca-bundle.1: document -k + + Brought in 1ad2bdcf110266c. Now does HTTPS by default and needs -k to + fall back to plain HTTP. + +- [Jay Satiro brought this change] + + mk-ca-bundle: Change URL retrieval to HTTPS-only by default + + - Change all predefined Mozilla URLs to HTTPS (Gregory Szorc). + + - New option -k to allow URLs other than HTTPS and enable HTTP fallback. + + Prior to this change the default URL retrieval mode was to fall back to + HTTP if HTTPS didn't work. + + Reported-by: Gregory Szorc + + Closes #1012 + +- RELEASE-NOTES: synced with 50ee3aaf1a9b22d + +Dan Fandrich (23 Oct 2016) +- INSTALL.md: Updated minimum file sizes for 7.50.3 + +Daniel Stenberg (22 Oct 2016) +- multi: force connections to get closed in close_all_connections + + Several independent reports on infinite loops hanging in the + close_all_connections() function when closing a multi handle, can be + fixed by first marking the connection to get closed before calling + Curl_disconnect. + + This is more fixing-the-symptom rather than the underlying problem + though. + + Bug: https://curl.haxx.se/mail/lib-2016-10/0011.html + Bug: https://curl.haxx.se/mail/lib-2016-10/0059.html + + Reported-by: Dan Fandrich, Valentin David, Miloš Ljumović + +- [Anders Bakken brought this change] + + curl_multi_remove_handle: fix a double-free + + In short the easy handle needs to be disconnected from its connection at + this point since the connection still is serving other easy handles. + + In our app we can reliably reproduce a crash in our http2 stress test + that is fixed by this change. I can't easily reproduce the same test in + a small example. + + This is the gdb/asan output: + + ==11785==ERROR: AddressSanitizer: heap-use-after-free on address 0xe9f4fb80 at pc 0x09f41f19 bp 0xf27be688 sp 0xf27be67c + READ of size 4 at 0xe9f4fb80 thread T13 (RESOURCE_HTTP) + #0 0x9f41f18 in curl_multi_remove_handle /path/to/source/3rdparty/curl/lib/multi.c:666 + + 0xe9f4fb80 is located 0 bytes inside of 1128-byte region [0xe9f4fb80,0xe9f4ffe8) + freed by thread T13 (RESOURCE_HTTP) here: + #0 0xf7b1b5c2 in __interceptor_free /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_malloc_linux.cc:45 + #1 0x9f7862d in conn_free /path/to/source/3rdparty/curl/lib/url.c:2808 + #2 0x9f78c6a in Curl_disconnect /path/to/source/3rdparty/curl/lib/url.c:2876 + #3 0x9f41b09 in multi_done /path/to/source/3rdparty/curl/lib/multi.c:615 + #4 0x9f48017 in multi_runsingle /path/to/source/3rdparty/curl/lib/multi.c:1896 + #5 0x9f490f1 in curl_multi_perform /path/to/source/3rdparty/curl/lib/multi.c:2123 + #6 0x9c4443c in perform /path/to/source/src/net/resourcemanager/ResourceManagerCurlThread.cpp:854 + #7 0x9c445e0 in ... + #8 0x9c4cf1d in ... + #9 0xa2be6b5 in ... + #10 0xf7aa5780 in asan_thread_start /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_interceptors.cc:226 + #11 0xf4d3a16d in __clone (/lib/i386-linux-gnu/libc.so.6+0xe716d) + + previously allocated by thread T13 (RESOURCE_HTTP) here: + #0 0xf7b1ba27 in __interceptor_calloc /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_malloc_linux.cc:70 + #1 0x9f7dfa6 in allocate_conn /path/to/source/3rdparty/curl/lib/url.c:3904 + #2 0x9f88ca0 in create_conn /path/to/source/3rdparty/curl/lib/url.c:5797 + #3 0x9f8c928 in Curl_connect /path/to/source/3rdparty/curl/lib/url.c:6438 + #4 0x9f45a8c in multi_runsingle /path/to/source/3rdparty/curl/lib/multi.c:1411 + #5 0x9f490f1 in curl_multi_perform /path/to/source/3rdparty/curl/lib/multi.c:2123 + #6 0x9c4443c in perform /path/to/source/src/net/resourcemanager/ResourceManagerCurlThread.cpp:854 + #7 0x9c445e0 in ... + #8 0x9c4cf1d in ... + #9 0xa2be6b5 in ... + #10 0xf7aa5780 in asan_thread_start /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_interceptors.cc:226 + #11 0xf4d3a16d in __clone (/lib/i386-linux-gnu/libc.so.6+0xe716d) + + SUMMARY: AddressSanitizer: heap-use-after-free /path/to/source/3rdparty/curl/lib/multi.c:666 in curl_multi_remove_handle + Shadow bytes around the buggy address: + 0x3d3e9f20: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9f30: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9f40: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9f50: fd fd fd fd fd fd fd fd fd fd fd fd fd fa fa fa + 0x3d3e9f60: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa + =>0x3d3e9f70:[fd]fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9f80: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9f90: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9fa0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9fb0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + 0x3d3e9fc0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd + Shadow byte legend (one shadow byte represents 8 application bytes): + Addressable: 00 + Partially addressable: 01 02 03 04 05 06 07 + Heap left redzone: fa + Heap right redzone: fb + Freed heap region: fd + Stack left redzone: f1 + Stack mid redzone: f2 + Stack right redzone: f3 + Stack partial redzone: f4 + Stack after return: f5 + Stack use after scope: f8 + Global redzone: f9 + Global init order: f6 + Poisoned by user: f7 + Container overflow: fc + Array cookie: ac + Intra object redzone: bb + ASan internal: fe + Left alloca redzone: ca + Right alloca redzone: cb + ==11785==ABORTING + + Thread 14 "RESOURCE_HTTP" received signal SIGABRT, Aborted. + [Switching to Thread 0xf27bfb40 (LWP 12324)] + 0xf7fd8be9 in __kernel_vsyscall () + (gdb) bt + #0 0xf7fd8be9 in __kernel_vsyscall () + #1 0xf4c7ee89 in __GI_raise (sig=6) at ../sysdeps/unix/sysv/linux/raise.c:54 + #2 0xf4c803e7 in __GI_abort () at abort.c:89 + #3 0xf7b2ef2e in __sanitizer::Abort () at /opt/toolchain/src/gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc:122 + #4 0xf7b262fa in __sanitizer::Die () at /opt/toolchain/src/gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_common.cc:145 + #5 0xf7b21ab3 in __asan::ScopedInErrorReport::~ScopedInErrorReport (this=0xf27be171, __in_chrg=) at /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_report.cc:689 + #6 0xf7b214a5 in __asan::ReportGenericError (pc=166993689, bp=4068206216, sp=4068206204, addr=3925146496, is_write=false, access_size=4, exp=0, fatal=true) at /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_report.cc:1074 + #7 0xf7b21fce in __asan::__asan_report_load4 (addr=3925146496) at /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_rtl.cc:129 + #8 0x09f41f19 in curl_multi_remove_handle (multi=0xf3406080, data=0xde582400) at /path/to/source3rdparty/curl/lib/multi.c:666 + #9 0x09f6b277 in Curl_close (data=0xde582400) at /path/to/source3rdparty/curl/lib/url.c:415 + #10 0x09f3354e in curl_easy_cleanup (data=0xde582400) at /path/to/source3rdparty/curl/lib/easy.c:860 + #11 0x09c6de3f in ... + #12 0x09c378c5 in ... + #13 0x09c48133 in ... + #14 0x09c4d092 in ... + #15 0x0a2be6b6 in ... + #16 0xf7aa5781 in asan_thread_start (arg=0xf2d22938) at /opt/toolchain/src/gcc-6.2.0/libsanitizer/asan/asan_interceptors.cc:226 + #17 0xf5de52b5 in start_thread (arg=0xf27bfb40) at pthread_create.c:333 + #18 0xf4d3a16e in clone () at ../sysdeps/unix/sysv/linux/i386/clone.S:114 + + Fixes #1083 + +- testcurl.1: fix the URL to the autobuild summary + +- testcurl.1: update URLs + +- INSTALL: converted to markdown => INSTALL.md + + Also heavily edited for content. Removed lots of old cruft that we added + like 10+ years ago that is likely incorrect by now. + + Also removed INSTALL.devcpp for same reason. + +- [Martin Storsjo brought this change] + + configure: Check for other variants of the -m*os*-version-min flags + + In addition to -miphoneos-version-min, the same version can be set + using -mios-version-min. And for WatchOS and TvOS, there's + -mwatchos-version-min and -mtvos-version-min. + +- configure: set min version flags for builds on mac + + This helps building binaries that can work on multiple macOS versions. + + Help-by: Martin Storsjö + + Fixes #1069 + +- curl_multi_add_handle: set timeouts in closure handles + + The closure handle only ever has default timeouts set. To improve the + state somewhat we clone the timeouts from each added handle so that the + closure handle always has the same timeouts as the most recently added + easy handle. + + Fixes #739 + +- configure/CURL_CHECK_FUNC_POLL: disable poll completely on mac + + ... so that the same libcurl build easier can run on any version. + + Follow-up to issue #1057 + +- RELEASE-NOTES: synced with f36f8c14551efc6772 + +- test14xx: fixed --libcurl output tests again after 8e8afa82cbb + +- s/cURL/curl + + The tool was never called cURL, only the project. But even so, we have + more and more over time switched to just use lower case. + +- polarssl: indented code, removed unused variables + +- polarssl: reduce #ifdef madness with a macro + +- polarssl: fix unaligned SSL session-id lock + +- Curl_polarsslthreadlock_thread_setup: clear array at init + + ... since if it fails to init the entire array and then tries to clean + it up, it would attempt to work on an uninitialized pointer. + +- curl: set INTERLEAVEDATA too + + As otherwise the callback could be called with a NULL pointer when RTSP + data is provided. + +- gopher: properly return error for poll failures + +- select: switch to macros in uppercase + + Curl_select_ready() was the former API that was replaced with + Curl_select_check() a while back and the former arg setup was provided + with a define (in order to leave existing code unmodified). + + Now we instead offer SOCKET_READABLE and SOCKET_WRITABLE for the most + common shortcuts where only one socket is checked. They're also more + visibly macros. + +- select: use more proper macro-looking names + + ... so that it becomes more obvious in the code what is what. Also added + a typecast for one of the calculations. + +- Curl_socket_check: add extra check to avoid integer overflow + +- maketgz: make it support "only" generating version info + + ... to allow you to update the local repository with the given version + number data. + +Jay Satiro (17 Oct 2016) +- url: skip to-be-closed connections when pipelining (follow-up) + + - Change back behavior so that pipelining is considered possible for + connections that have not yet reached the protocol level. + + This is a follow-up to e5f0b1a which had changed the behavior of + checking if pipelining is possible to ignore connections that had + 'bits.close' set. Connections that have not yet reached the protocol + level also have that bit set, and we need to consider pipelining + possible on those connections. + +Daniel Stenberg (17 Oct 2016) +- HTTP2: mention the tool's limited support + +- RELEASE-NOTES: synced with a1a5cd04877fd6fd + +- [David Woodhouse brought this change] + + curl: do not set CURLOPT_SSLENGINEDEFAULT automatically + + There were bugs in the PKCS#11 engine, and fixing them triggers bugs in + OpenSSL. Just don't get involved; there's no need to be making the + engine methods the default anyway. + + https://github.com/OpenSC/libp11/pull/108 + https://github.com/openssl/openssl/pull/1639 + + Merges #1042 + +- KNOWN_BUGS: two more existing problems + +Marcel Raad (16 Oct 2016) +- win: fix Universal Windows Platform build + + This fixes a merge error in commit 7f3df80 caused by commit 332e8d6. + + Additionally, this changes Curl_verify_windows_version for Windows App + builds to assume to always be running on the target Windows version. + There seems to be no way to determine the Windows version from a + UWP app. Neither GetVersion(Ex), nor VerifyVersionInfo, nor the + Version Helper functions are supported. + + Bug: https://github.com/curl/curl/pull/820#issuecomment-250889878 + Reported-by: Paul Joyce + + Closes https://github.com/curl/curl/pull/1048 + +Daniel Stenberg (16 Oct 2016) +- KNOWN_BUGS: minor formatting edit + +Jay Satiro (14 Oct 2016) +- [Rider Linden brought this change] + + url: skip to-be-closed connections when pipelining + + No longer attempt to use "doomed" to-be-closed connections when + pipelining. Prior to this change connections marked for deletion (e.g. + timeout) would be erroneously used, resulting in sporadic crashes. + + As originally reported and fixed by Carlo Wood (origin unknown). + + Bug: https://github.com/curl/curl/issues/627 + Reported-by: Rider Linden + + Closes https://github.com/curl/curl/pull/1075 + Participation-by: nopjmp@users.noreply.github.com + +Daniel Stenberg (13 Oct 2016) +- vtls: only re-use session-ids using the same scheme + + To make it harder to do cross-protocol mistakes + +Jay Satiro (11 Oct 2016) +- [Torben Dannhauer brought this change] + + dist: add missing cmake modules to the tarball + + Closes https://github.com/curl/curl/pull/1070 + +Daniel Stenberg (11 Oct 2016) +- configure: detect the broken poll() in macOS 10.12 + + Fixes #1057 + +- dist: remove PDF and HTML converted docs from the releases + +- [Remo E brought this change] + + cmake: add nghttp2 support + + Closes #922 + +- [Andreas Streichardt brought this change] + + resolve: add error message when resolving using SIGALRM + + Closes #1066 + +- GIT-INFO: remove the Mac 10.1-specific details + + There shouldn't be many devs out there anymore using such outdated macOS + versions. And it removes the dead link. + + Closes #1049 + +- RELEASE-NOTES: spellfix + +- RELEASE-NOTES: synced with 82720490628cb53a + + 5 more fixes, 2 more contributors + +- [Tobias Stoeckmann brought this change] + + smb: properly check incoming packet boundaries + + Not all reply messages were properly checked for their lengths, which + made it possible to access uninitialized memory (but this does not lead + to out of boundary accesses). + + Closes #1052 + +- test557: verify printf() with 128 and 129 arguments + +- mprintf: return error on too many arguments + + 128 arguments should be enough for everyone + +- ftp: fix Curl_ftpsendf() + + ... it no longer takes printf() arguments since it was only really taken + advantage by one user and it was not written and used in a safe + way. Thus the 'f' is removed from the function name and the proto is + changed. + + Although the current code wouldn't end up in badness, it was a risk that + future changes could end up springf()ing too large data or passing in a + format string inadvertently. + +- formpost: avoid silent snprintf() truncation + + The previous use of snprintf() could make libcurl silently truncate some + input data and not report that back on overly large input, which could + make data get sent over the network in a bad format. + + Example: + + $ curl --form 'a=b' -H "Content-Type: $(perl -e 'print "A"x4100')" + +- TODO: build: Enable PIE and RELRO by default + +- TODO: Support better than MD5 hostkey hash (for ssh) + +- [Daniel Gustafsson brought this change] + + tests: Fix a small typo in the tests README (#1060) + + The subdirectory for logs in tests/ is named log/ without an 's' + at the end. + +- TODO: Introduce --fail-fast to exit on first transfer fail + + See #1054 + +- TODO: Leave secure cookies alone + +- [Rainer Müller brought this change] + + CURLOPT_DEBUGFUNCTION.3: unused argument warning (#1056) + + The 'userp' argument is unused in this example code. + +- TODO: TCP Fast Open for windows + +- RELEASE-NOTES: synced with 8fd2a754f0de + +- CURLOPT_KEEP_SENDING_ON_ERROR.3: mention when it is added + +- memdup: use 'void *' as return and source type + +- TODO: Add easy argument to formpost functions + +- formpost: trying to attach a directory no longer crashes + + The error path would previously add a freed entry to the linked list. + + Reported-by: Toby Peterson + + Fixes #1053 + +- [Sergei Kuzmin brought this change] + + cookies: same domain handling changed to match browser behavior + + Cokie with the same domain but different tailmatching property are now + considered different and do not replace each other. If header contains + following lines then two cookies will be set: Set-Cookie: foo=bar; + domain=.foo.com; expires=Thu Mar 3 GMT 8:56:27 2033 Set-Cookie: foo=baz; + domain=foo.com; expires=Thu Mar 3 GMT 8:56:27 2033 + + This matches Chrome, Opera, Safari, and Firefox behavior. When sending + stored tokens to foo.com Chrome, Opera, Firefox store send them in the + stored order, while Safari pre-sort the cookies. + + Closes #1050 + +- [Stephen Brokenshire brought this change] + + FAQ: Fix typos in section 5.14 (#1047) + + Type required for YourClass::func C++ function (using size_t in line + with the documentation for CURLOPT_WRITEFUNCTION) and missing second + colon when specifying the static function for CURLOPT_WRITEFUNCTION. + +- [Sebastian Mundry brought this change] + + KNOWN_BUGS: Fix typos in section 5.8. + + Closes #1046 + +- [mundry brought this change] + + CONTRIBUTE.md: Fix typo in 'About pull requests' section. (#1045) + +- curl.1: --trace supports % for sending to stderr! + +- KNOWN_BUGS: 5.8 configure finding libs in wrong directory + +Dan Fandrich (24 Sep 2016) +- configure: Fixed builds with libssh2 in a custom location + + A libssh2 library in the standard system location was being used in + preference to the desired one while linking. + +Daniel Stenberg (23 Sep 2016) +- SECURITY: remove the top ascii logo + +Michael Kaufmann (22 Sep 2016) +- New libcurl option to keep sending on error + + Add the new option CURLOPT_KEEP_SENDING_ON_ERROR to control whether + sending the request body shall be completed when the server responds + early with an error status code. + + This is suitable for manual NTLM authentication. + + Reviewed-by: Jay Satiro + + Closes https://github.com/curl/curl/pull/904 + +Kamil Dudka (22 Sep 2016) +- nss: add chacha20-poly1305 cipher suites if supported by NSS + +- nss: add cipher suites using SHA384 if supported by NSS + +- nss: fix typo in ecdhe_rsa_null cipher suite string + + As it seems to be a rarely used cipher suite (for securely established + but _unencrypted_ connections), I believe it is fine not to provide an + alias for the misspelled variant. + +Jay Satiro (21 Sep 2016) +- docs: Remove that --proto is just used for initial retrieval + + .. and add that --proto-redir and CURLOPT_REDIR_PROTOCOLS do not + override protocols denied by --proto and CURLOPT_PROTOCOLS. + + - Add a test to enforce: --proto deny must override --proto-redir allow + + Closes https://github.com/curl/curl/pull/1031 + +Daniel Stenberg (21 Sep 2016) +- dist: add CurlSymbolHiding.cmake to the tarball + + Follow-up to 6140dfcf3e784 + + Reported-by: Alexander Sinditskiy + +- curl_global_cleanup.3: don't unload the lib with sub threads running + + Discussed in #997 + + Assisted-by: Jay Satiro + +- MAIL-ETIQUETTE: language + +Jay Satiro (20 Sep 2016) +- easy: Reset all statistical session info in curl_easy_reset + + Bug: https://github.com/curl/curl/issues/1017 + Reported-by: Jeroen Ooms + +Daniel Stenberg (19 Sep 2016) +- RELEASE-NOTES: synced with 79607eec51055 + +Jay Satiro (19 Sep 2016) +- [Daniel Gustafsson brought this change] + + darwinssl: Fix typo in comment + + Closes https://github.com/curl/curl/pull/1028 + +Daniel Stenberg (19 Sep 2016) +- [Bernard Spil brought this change] + + libressl: fix version output + + LibreSSL defines `OPENSSL_VERSION_NUMBER` as `0x20000000L` for all + versions returning `LibreSSL/2.0.0` for any LibreSSL version. + + This change provides a local OpenSSL_version_num function replacement + returning LIBRESSL_VERSION_NUMBER instead. + + Closes #1029 + +- [rugk brought this change] + + TODO: Add PINNEDPUBLICKEY - HPKP compatibility, HSTS & HPKP + + Closes #1025 + Closes #1026 + Closes #1027 + +- openssl: don't call ERR_remote_thread_state on >= 1.1.0 + + Follow-up fix to d9321562 + +- openssl: don’t call CRYTPO_cleanup_all_ex_data + + The OpenSSL function CRYTPO_cleanup_all_ex_data() cannot be called + multiple times without crashing - and other libs might call it! We + basically cannot call it without risking a crash. The function is a + no-op since OpenSSL 1.1.0. + + Not calling this function only risks a small memory leak with OpenSSL < + 1.1.0. + + Bug: https://curl.haxx.se/mail/lib-2016-09/0045.html + Reported-by: Todd Short + +- TODO: Support SSLKEYLOGFILE + +Jay Satiro (18 Sep 2016) +- CURLOPT_PINNEDPUBLICKEY.3: fix the AVAILABILITY formatting + +Nick Zitzmann (18 Sep 2016) +- darwinssl: disable RC4 cipher-suite support + + RC4 was a nice alternative to CBC back in the days of BEAST, but it's insecure and obsolete now. + +- configure: change "iOS/Mac OS X native" to "Apple OS native" + + Since I first wrote that text, Apple introduced tvOS and watchOS, and renamed "Mac OS X" to "macOS." Let's make the text a little more inclusive, since curl can be built for all four operating systems. + +Jay Satiro (18 Sep 2016) +- test2048: fix url + +- examples/imap-append: Set size of data to be uploaded + + Prior to this commit this example failed with error + 'Cannot APPEND with unknown input file size'. + + Bug: https://github.com/curl/curl/issues/1008 + Reported-by: lukaszgn@users.noreply.github.com + + Closes https://github.com/curl/curl/pull/1011 + +Daniel Stenberg (16 Sep 2016) +- [Tony Kelman brought this change] + + LICENSE-MIXING.md: update with mbedTLS dual licensing + + Recent versions of mbedTLS are available under either Apache 2.0 or GPL + 2.0, see https://tls.mbed.org/how-to-get + + Closes #1019 + +- KNOWN_BUGS: chunked-encoded requests with HTTP/2 is fixed + +- http2: debug ouput sent HTTP/2 request headers + +- http: accept "Transfer-Encoding: chunked" for HTTP/2 as well + + ... but don't send the actual header over the wire as it isn't accepted. + Chunked uploading is still triggered using this method. + + Fixes #1013 + Fixes #662 + +- openssl: fix per-thread memory leak usiong 1.0.1 or 1.0.2 + + OpenSSL 1.0.1 and 1.0.2 build an error queue that is stored per-thread + so we need to clean it when easy handles are freed, in case the thread + will be killed in which the easy handle was used. All OpenSSL code in + libcurl should extract the error in association with the error already + so clearing this queue here should be harmless at worst. + + Fixes #964 + +- RELEASE-NOTES: reset and go toward 7.51.0 (again) + +Version 7.50.3 (14 Sep 2016) + +Daniel Stenberg (14 Sep 2016) +- THANKS: updated with curl 7.50.3 contributors + +- RELEASE-NOTES: curl 7.50.3 + +- test1605: verify negative input lengths to (un)escape functions + +- curl_easy_unescape: deny negative string lengths as input + + CVE-2016-7167 + + Bug: https://curl.haxx.se/docs/adv_20160914.html + +- curl_easy_escape: deny negative string lengths as input + + CVE-2016-7167 + + Bug: https://curl.haxx.se/docs/adv_20160914.html + +- curl: make --create-dirs on windows grok both forward and backward slashes + + Reported-by: Ryan Scott + + Fixes #1007 + +- RELEASE-NOTES: synced with 665694979b6 + +- [Tony Kelman brought this change] + + mbedtls: switch off NTLM in build if md4 isn't available + + NTLM support with mbedTLS was added in 497e7c9 but requires that mbedTLS + is built with the MD4 functions available, which it isn't in default + builds. This now adapts if the funtion isn't there and builds libcurl + without NTLM support if so. + + Fixes #1004 + +Jay Satiro (12 Sep 2016) +- CODE_STYLE: fix long-line guideline + + - Change maximum allowed line length from 80 to 79. + +- CODE_STYLE: add column alignment section + + Note that since the added examples are for column alignment I had to + encapsulate with ~~~c markdown to preserve their alignment. + +Peter Wu (11 Sep 2016) +- cmake: fix curl-config --static-libs + + The `curl-config --static-libs` command should not output paths like + -l/usr/lib/libssl.so, instead print the absolute path without `-l`. + + This also removes the confusing message "Static linking is broken" which + was printed because curl-config --static-libs was disfunctional even + though the static libcurl.a library works properly. + + Fixes https://github.com/curl/curl/issues/841 + +Daniel Stenberg (11 Sep 2016) +- http: refuse to pass on response body with NO_NODY was set + + ... like when a HTTP/0.9 response comes back without any headers at all + and just a body this now prevents that body from being sent to the + callback etc. + + Adapted test 1144 to verify. + + Fixes #973 + + Assisted-by: Ray Satiro + +- RELEASE-NOTES: synced with 257bf3ac67eb6 + +Jakub Zakrzewski (10 Sep 2016) +- CMake: Don't build unit tests if private symbols are hidden + + This only excludes building unit tests from default build ( 'all' Make + target or "Build Solution" in VisualStudio). The projects and Make + targets will still be generated and shown in supporting IDEs. + + Fixes https://github.com/curl/curl/issues/981 + Reported-by: Randy Armstrong + + Closes https://github.com/curl/curl/pull/990 + +- CMake: Try to (un-)hide private library symbols + + Detect support for compiler symbol visibility flags and apply those + according to CURL_HIDDEN_SYMBOLS option. + It should work true to the autotools build except it tries to unhide + symbols on Windows when requested and prints warning if it fails. + + Ref: https://github.com/curl/curl/issues/981#issuecomment-242665951 + Reported-by: Daniel Stenberg + +Daniel Stenberg (9 Sep 2016) +- openssl: fix bad memory free (regression) + + ... by partially reverting f975f06033b1. The allocation could be made by + OpenSSL so the free must be made with OPENSSL_free() to avoid problems. + + Reported-by: Harold Stuart + Fixes #1005 + +- http2: support > 64bit sized uploads + + ... by making sure we don't count down the "upload left" counter when the + uploaded size is unknown and then it can be allowed to continue forever. + + Fixes #996 + +Jay Satiro (7 Sep 2016) +- errors: new alias CURLE_WEIRD_SERVER_REPLY (8) + + Since we're using CURLE_FTP_WEIRD_SERVER_REPLY in imap, pop3 and smtp as + more of a generic "failed to parse" introduce an alias without FTP in + the name. + + Closes https://github.com/curl/curl/pull/975 + +Daniel Stenberg (7 Sep 2016) +- bump: toward 7.51.0 + +- HISTORY: remove ascii logo to render nicer on web + +- curl: whitelist use of strtok() in non-threaded context + +- checksrc: detect strtok() use + + ... as that function slipped through once before. + +GitHub (7 Sep 2016) +- [Viktor Szakats brought this change] + + mk-ca-bundle.pl: use SHA256 instead of SHA1 + + This hash is used to verify the original downloaded certificate bundle + and also included in the generated bundle's comment header. Also + rename related internal symbols to algorithm-agnostic names. + +Version 7.50.2 (7 Sep 2016) + +Daniel Stenberg (7 Sep 2016) +- RELEASE-NOTES: curl 7.50.2 release + +- THANKS: updated for 7.50.2 + +Jay Satiro (6 Sep 2016) +- [Gaurav Malhotra brought this change] + + openssl: fix CURLINFO_SSL_VERIFYRESULT + + CURLINFO_SSL_VERIFYRESULT does not get the certificate verification + result when SSL_connect fails because of a certificate verification + error. + + This fix saves the result of SSL_get_verify_result so that it is + returned by CURLINFO_SSL_VERIFYRESULT. + + Closes https://github.com/curl/curl/pull/995 + +Daniel Stenberg (6 Sep 2016) +- [Daniel Gustafsson brought this change] + + darwinssl: test for errSecSuccess in PKCS12 import rather than noErr (#993) + + While noErr and errSecSuccess are defined as the same value, the API + documentation states that SecPKCS12Import() returns errSecSuccess if + there were no errors in importing. Ensure that a future change of the + defined value doesn't break (however unlikely) and be consistent with + the API docs. + +- [Daniel Gustafsson brought this change] + + docs: Fix link to CONTRIBUTE in Github contribution guidelines (#994) + +- [Marcel Raad brought this change] + + openssl: Fix compilation with OPENSSL_API_COMPAT=0x10100000L + + With OPENSSL_API_COMPAT=0x10100000L (OpenSSL 1.1 API), the cleanup + functions are unavailable (they're no-ops anyway in OpenSSL 1.1). The + replacements for SSL_load_error_strings, SSLeay_add_ssl_algorithms, and + OpenSSL_add_all_algorithms are called automatically [1][2]. SSLeay() is + now called OpenSSL_version_num(). + + [1]: https://www.openssl.org/docs/man1.1.0/ssl/OPENSSL_init_ssl.html + [2]: https://www.openssl.org/docs/man1.1.0/crypto/OPENSSL_init_crypto.html + + Closes #992 + +- RELEASE-NOTES: synced with 3d4c0c8b9bc1d + +- http2: return EOF when done uploading without known size + + Fixes #982 + +- http2: skip the content-length parsing, detect unknown size + +- http2: minor white space edit diff --git a/deps-win32/curl-7.54.1/CMake/CMakeConfigurableFile.in b/deps-win32/curl-7.54.1/CMake/CMakeConfigurableFile.in new file mode 100644 index 0000000..4cf74a1 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/CMakeConfigurableFile.in @@ -0,0 +1,2 @@ +@CMAKE_CONFIGURABLE_FILE_CONTENT@ + diff --git a/deps-win32/curl-7.54.1/CMake/CurlSymbolHiding.cmake b/deps-win32/curl-7.54.1/CMake/CurlSymbolHiding.cmake new file mode 100644 index 0000000..9f7d296 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/CurlSymbolHiding.cmake @@ -0,0 +1,61 @@ +include(CheckCSourceCompiles) + +option(CURL_HIDDEN_SYMBOLS "Set to ON to hide libcurl internal symbols (=hide all symbols that aren't officially external)." ON) +mark_as_advanced(CURL_HIDDEN_SYMBOLS) + +if(CURL_HIDDEN_SYMBOLS) + set(SUPPORTS_SYMBOL_HIDING FALSE) + + if(CMAKE_C_COMPILER_ID MATCHES "Clang") + set(SUPPORTS_SYMBOL_HIDING TRUE) + set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))") + set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden") + elseif(CMAKE_COMPILER_IS_GNUCC) + if(NOT CMAKE_VERSION VERSION_LESS 2.8.10) + set(GCC_VERSION ${CMAKE_C_COMPILER_VERSION}) + else() + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion + OUTPUT_VARIABLE GCC_VERSION) + endif() + if(NOT GCC_VERSION VERSION_LESS 3.4) + # note: this is considered buggy prior to 4.0 but the autotools don't care, so let's ignore that fact + set(SUPPORTS_SYMBOL_HIDING TRUE) + set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))") + set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden") + endif() + elseif(CMAKE_C_COMPILER_ID MATCHES "SunPro" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 8.0) + set(SUPPORTS_SYMBOL_HIDING TRUE) + set(_SYMBOL_EXTERN "__global") + set(_CFLAG_SYMBOLS_HIDE "-xldscope=hidden") + elseif(CMAKE_C_COMPILER_ID MATCHES "Intel" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 9.0) + # note: this should probably just check for version 9.1.045 but I'm not 100% sure + # so let's to it the same way autotools do. + set(SUPPORTS_SYMBOL_HIDING TRUE) + set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))") + set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden") + check_c_source_compiles("#include + int main (void) { printf(\"icc fvisibility bug test\"); return 0; }" _no_bug) + if(NOT _no_bug) + set(SUPPORTS_SYMBOL_HIDING FALSE) + set(_SYMBOL_EXTERN "") + set(_CFLAG_SYMBOLS_HIDE "") + endif() + elseif(MSVC) + set(SUPPORTS_SYMBOL_HIDING TRUE) + endif() + + set(HIDES_CURL_PRIVATE_SYMBOLS ${SUPPORTS_SYMBOL_HIDING}) +elseif(MSVC) + if(NOT CMAKE_VERSION VERSION_LESS 3.7) + set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) #present since 3.4.3 but broken + set(HIDES_CURL_PRIVATE_SYMBOLS FALSE) + else() + message(WARNING "Hiding private symbols regardless CURL_HIDDEN_SYMBOLS being disabled.") + set(HIDES_CURL_PRIVATE_SYMBOLS TRUE) + endif() +elseif() + set(HIDES_CURL_PRIVATE_SYMBOLS FALSE) +endif() + +set(CURL_CFLAG_SYMBOLS_HIDE ${_CFLAG_SYMBOLS_HIDE}) +set(CURL_EXTERN_SYMBOL ${_SYMBOL_EXTERN}) diff --git a/deps-win32/curl-7.54.1/CMake/CurlTests.c b/deps-win32/curl-7.54.1/CMake/CurlTests.c new file mode 100644 index 0000000..bc36c8e --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/CurlTests.c @@ -0,0 +1,551 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) 1998 - 2014, Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.haxx.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ***************************************************************************/ +#ifdef TIME_WITH_SYS_TIME +/* Time with sys/time test */ + +#include +#include +#include + +int +main () +{ +if ((struct tm *) 0) +return 0; + ; + return 0; +} + +#endif + +#ifdef HAVE_FCNTL_O_NONBLOCK + +/* headers for FCNTL_O_NONBLOCK test */ +#include +#include +#include +/* */ +#if defined(sun) || defined(__sun__) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# if defined(__SVR4) || defined(__srv4__) +# define PLATFORM_SOLARIS +# else +# define PLATFORM_SUNOS4 +# endif +#endif +#if (defined(_AIX) || defined(__xlC__)) && !defined(_AIX41) +# define PLATFORM_AIX_V3 +#endif +/* */ +#if defined(PLATFORM_SUNOS4) || defined(PLATFORM_AIX_V3) || defined(__BEOS__) +#error "O_NONBLOCK does not work on this platform" +#endif + +int +main () +{ + /* O_NONBLOCK source test */ + int flags = 0; + if(0 != fcntl(0, F_SETFL, flags | O_NONBLOCK)) + return 1; + return 0; +} +#endif + +/* tests for gethostbyaddr_r or gethostbyname_r */ +#if defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \ + defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \ + defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \ + defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT) || \ + defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \ + defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT) +# define _REENTRANT + /* no idea whether _REENTRANT is always set, just invent a new flag */ +# define TEST_GETHOSTBYFOO_REENTRANT +#endif +#if defined(HAVE_GETHOSTBYADDR_R_5) || \ + defined(HAVE_GETHOSTBYADDR_R_7) || \ + defined(HAVE_GETHOSTBYADDR_R_8) || \ + defined(HAVE_GETHOSTBYNAME_R_3) || \ + defined(HAVE_GETHOSTBYNAME_R_5) || \ + defined(HAVE_GETHOSTBYNAME_R_6) || \ + defined(TEST_GETHOSTBYFOO_REENTRANT) +#include +#include +int main(void) +{ + char *address = "example.com"; + int length = 0; + int type = 0; + struct hostent h; + int rc = 0; +#if defined(HAVE_GETHOSTBYADDR_R_5) || \ + defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \ + \ + defined(HAVE_GETHOSTBYNAME_R_3) || \ + defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT) + struct hostent_data hdata; +#elif defined(HAVE_GETHOSTBYADDR_R_7) || \ + defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \ + defined(HAVE_GETHOSTBYADDR_R_8) || \ + defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \ + \ + defined(HAVE_GETHOSTBYNAME_R_5) || \ + defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \ + defined(HAVE_GETHOSTBYNAME_R_6) || \ + defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT) + char buffer[8192]; + int h_errnop; + struct hostent *hp; +#endif + +#ifndef gethostbyaddr_r + (void)gethostbyaddr_r; +#endif + +#if defined(HAVE_GETHOSTBYADDR_R_5) || \ + defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) + rc = gethostbyaddr_r(address, length, type, &h, &hdata); +#elif defined(HAVE_GETHOSTBYADDR_R_7) || \ + defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) + hp = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &h_errnop); + (void)hp; +#elif defined(HAVE_GETHOSTBYADDR_R_8) || \ + defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) + rc = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &hp, &h_errnop); +#endif + +#if defined(HAVE_GETHOSTBYNAME_R_3) || \ + defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT) + rc = gethostbyname_r(address, &h, &hdata); +#elif defined(HAVE_GETHOSTBYNAME_R_5) || \ + defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) + rc = gethostbyname_r(address, &h, buffer, 8192, &h_errnop); + (void)hp; /* not used for test */ +#elif defined(HAVE_GETHOSTBYNAME_R_6) || \ + defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT) + rc = gethostbyname_r(address, &h, buffer, 8192, &hp, &h_errnop); +#endif + + (void)length; + (void)type; + (void)rc; + return 0; +} +#endif + +#ifdef HAVE_SOCKLEN_T +#ifdef _WIN32 +#include +#else +#include +#include +#endif +int +main () +{ +if ((socklen_t *) 0) + return 0; +if (sizeof (socklen_t)) + return 0; + ; + return 0; +} +#endif +#ifdef HAVE_IN_ADDR_T +#include +#include +#include + +int +main () +{ +if ((in_addr_t *) 0) + return 0; +if (sizeof (in_addr_t)) + return 0; + ; + return 0; +} +#endif + +#ifdef HAVE_BOOL_T +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_STDBOOL_H +#include +#endif +int +main () +{ +if (sizeof (bool *) ) + return 0; + ; + return 0; +} +#endif + +#ifdef STDC_HEADERS +#include +#include +#include +#include +int main() { return 0; } +#endif +#ifdef RETSIGTYPE_TEST +#include +#include +#ifdef signal +# undef signal +#endif +#ifdef __cplusplus +extern "C" void (*signal (int, void (*)(int)))(int); +#else +void (*signal ()) (); +#endif + +int +main () +{ + return 0; +} +#endif +#ifdef HAVE_INET_NTOA_R_DECL +#include + +typedef void (*func_type)(); + +int main() +{ +#ifndef inet_ntoa_r + func_type func; + func = (func_type)inet_ntoa_r; +#endif + return 0; +} +#endif +#ifdef HAVE_INET_NTOA_R_DECL_REENTRANT +#define _REENTRANT +#include + +typedef void (*func_type)(); + +int main() +{ +#ifndef inet_ntoa_r + func_type func; + func = (func_type)&inet_ntoa_r; +#endif + return 0; +} +#endif +#ifdef HAVE_GETADDRINFO +#include +#include +#include + +int main(void) { + struct addrinfo hints, *ai; + int error; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; +#ifndef getaddrinfo + (void)getaddrinfo; +#endif + error = getaddrinfo("127.0.0.1", "8080", &hints, &ai); + if (error) { + return 1; + } + return 0; +} +#endif +#ifdef HAVE_FILE_OFFSET_BITS +#ifdef _FILE_OFFSET_BITS +#undef _FILE_OFFSET_BITS +#endif +#define _FILE_OFFSET_BITS 64 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int main () { ; return 0; } +#endif +#ifdef HAVE_IOCTLSOCKET +/* includes start */ +#ifdef HAVE_WINDOWS_H +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# endif +# include +# ifdef HAVE_WINSOCK2_H +# include +# else +# ifdef HAVE_WINSOCK_H +# include +# endif +# endif +#endif + +int +main () +{ + +/* ioctlsocket source code */ + int socket; + unsigned long flags = ioctlsocket(socket, FIONBIO, &flags); + + ; + return 0; +} + +#endif +#ifdef HAVE_IOCTLSOCKET_CAMEL +/* includes start */ +#ifdef HAVE_WINDOWS_H +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# endif +# include +# ifdef HAVE_WINSOCK2_H +# include +# else +# ifdef HAVE_WINSOCK_H +# include +# endif +# endif +#endif + +int +main () +{ + +/* IoctlSocket source code */ + if(0 != IoctlSocket(0, 0, 0)) + return 1; + ; + return 0; +} +#endif +#ifdef HAVE_IOCTLSOCKET_CAMEL_FIONBIO +/* includes start */ +#ifdef HAVE_WINDOWS_H +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# endif +# include +# ifdef HAVE_WINSOCK2_H +# include +# else +# ifdef HAVE_WINSOCK_H +# include +# endif +# endif +#endif + +int +main () +{ + +/* IoctlSocket source code */ + long flags = 0; + if(0 != ioctlsocket(0, FIONBIO, &flags)) + return 1; + ; + return 0; +} +#endif +#ifdef HAVE_IOCTLSOCKET_FIONBIO +/* includes start */ +#ifdef HAVE_WINDOWS_H +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# endif +# include +# ifdef HAVE_WINSOCK2_H +# include +# else +# ifdef HAVE_WINSOCK_H +# include +# endif +# endif +#endif + +int +main () +{ + + int flags = 0; + if(0 != ioctlsocket(0, FIONBIO, &flags)) + return 1; + + ; + return 0; +} +#endif +#ifdef HAVE_IOCTL_FIONBIO +/* headers for FIONBIO test */ +/* includes start */ +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +#ifdef HAVE_SYS_IOCTL_H +# include +#endif +#ifdef HAVE_STROPTS_H +# include +#endif + +int +main () +{ + + int flags = 0; + if(0 != ioctl(0, FIONBIO, &flags)) + return 1; + + ; + return 0; +} +#endif +#ifdef HAVE_IOCTL_SIOCGIFADDR +/* headers for FIONBIO test */ +/* includes start */ +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +#ifdef HAVE_SYS_IOCTL_H +# include +#endif +#ifdef HAVE_STROPTS_H +# include +#endif +#include + +int +main () +{ + struct ifreq ifr; + if(0 != ioctl(0, SIOCGIFADDR, &ifr)) + return 1; + + ; + return 0; +} +#endif +#ifdef HAVE_SETSOCKOPT_SO_NONBLOCK +/* includes start */ +#ifdef HAVE_WINDOWS_H +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# endif +# include +# ifdef HAVE_WINSOCK2_H +# include +# else +# ifdef HAVE_WINSOCK_H +# include +# endif +# endif +#endif +/* includes start */ +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +/* includes end */ + +int +main () +{ + if(0 != setsockopt(0, SOL_SOCKET, SO_NONBLOCK, 0, 0)) + return 1; + ; + return 0; +} +#endif +#ifdef HAVE_GLIBC_STRERROR_R +#include +#include +int +main () { + char buffer[1024]; /* big enough to play with */ + char *string = + strerror_r(EACCES, buffer, sizeof(buffer)); + /* this should've returned a string */ + if(!string || !string[0]) + return 99; + return 0; +} +#endif +#ifdef HAVE_POSIX_STRERROR_R +#include +#include +int +main () { + char buffer[1024]; /* big enough to play with */ + int error = + strerror_r(EACCES, buffer, sizeof(buffer)); + /* This should've returned zero, and written an error string in the + buffer.*/ + if(!buffer[0] || error) + return 99; + return 0; +} +#endif +#ifdef HAVE_FSETXATTR_6 +#include /* header from libc, not from libattr */ +int +main() { + fsetxattr(0, 0, 0, 0, 0, 0); + return 0; +} +#endif +#ifdef HAVE_FSETXATTR_5 +#include /* header from libc, not from libattr */ +int +main() { + fsetxattr(0, 0, 0, 0, 0); + return 0; +} +#endif diff --git a/deps-win32/curl-7.54.1/CMake/FindCARES.cmake b/deps-win32/curl-7.54.1/CMake/FindCARES.cmake new file mode 100644 index 0000000..c4ab5f1 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/FindCARES.cmake @@ -0,0 +1,42 @@ +# - Find c-ares +# Find the c-ares includes and library +# This module defines +# CARES_INCLUDE_DIR, where to find ares.h, etc. +# CARES_LIBRARIES, the libraries needed to use c-ares. +# CARES_FOUND, If false, do not try to use c-ares. +# also defined, but not for general use are +# CARES_LIBRARY, where to find the c-ares library. + +FIND_PATH(CARES_INCLUDE_DIR ares.h + /usr/local/include + /usr/include + ) + +SET(CARES_NAMES ${CARES_NAMES} cares) +FIND_LIBRARY(CARES_LIBRARY + NAMES ${CARES_NAMES} + PATHS /usr/lib /usr/local/lib + ) + +IF (CARES_LIBRARY AND CARES_INCLUDE_DIR) + SET(CARES_LIBRARIES ${CARES_LIBRARY}) + SET(CARES_FOUND "YES") +ELSE (CARES_LIBRARY AND CARES_INCLUDE_DIR) + SET(CARES_FOUND "NO") +ENDIF (CARES_LIBRARY AND CARES_INCLUDE_DIR) + + +IF (CARES_FOUND) + IF (NOT CARES_FIND_QUIETLY) + MESSAGE(STATUS "Found c-ares: ${CARES_LIBRARIES}") + ENDIF (NOT CARES_FIND_QUIETLY) +ELSE (CARES_FOUND) + IF (CARES_FIND_REQUIRED) + MESSAGE(FATAL_ERROR "Could not find c-ares library") + ENDIF (CARES_FIND_REQUIRED) +ENDIF (CARES_FOUND) + +MARK_AS_ADVANCED( + CARES_LIBRARY + CARES_INCLUDE_DIR + ) diff --git a/deps-win32/curl-7.54.1/CMake/FindGSS.cmake b/deps-win32/curl-7.54.1/CMake/FindGSS.cmake new file mode 100644 index 0000000..60dcb73 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/FindGSS.cmake @@ -0,0 +1,289 @@ +# - Try to find the GSS Kerberos library +# Once done this will define +# +# GSS_ROOT_DIR - Set this variable to the root installation of GSS +# +# Read-Only variables: +# GSS_FOUND - system has the Heimdal library +# GSS_FLAVOUR - "MIT" or "Heimdal" if anything found. +# GSS_INCLUDE_DIR - the Heimdal include directory +# GSS_LIBRARIES - The libraries needed to use GSS +# GSS_LINK_DIRECTORIES - Directories to add to linker search path +# GSS_LINKER_FLAGS - Additional linker flags +# GSS_COMPILER_FLAGS - Additional compiler flags +# GSS_VERSION - This is set to version advertised by pkg-config or read from manifest. +# In case the library is found but no version info available it'll be set to "unknown" + +set(_MIT_MODNAME mit-krb5-gssapi) +set(_HEIMDAL_MODNAME heimdal-gssapi) + +include(CheckIncludeFile) +include(CheckIncludeFiles) +include(CheckTypeSize) + +set(_GSS_ROOT_HINTS + "${GSS_ROOT_DIR}" + "$ENV{GSS_ROOT_DIR}" +) + +# try to find library using system pkg-config if user didn't specify root dir +if(NOT GSS_ROOT_DIR AND NOT "$ENV{GSS_ROOT_DIR}") + if(UNIX) + find_package(PkgConfig QUIET) + pkg_search_module(_GSS_PKG ${_MIT_MODNAME} ${_HEIMDAL_MODNAME}) + list(APPEND _GSS_ROOT_HINTS "${_GSS_PKG_PREFIX}") + elseif(WIN32) + list(APPEND _GSS_ROOT_HINTS "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MIT\\Kerberos;InstallDir]") + endif() +endif() + +if(NOT _GSS_FOUND) #not found by pkg-config. Let's take more traditional approach. + find_file(_GSS_CONFIGURE_SCRIPT + NAMES + "krb5-config" + HINTS + ${_GSS_ROOT_HINTS} + PATH_SUFFIXES + bin + NO_CMAKE_PATH + NO_CMAKE_ENVIRONMENT_PATH + ) + + # if not found in user-supplied directories, maybe system knows better + find_file(_GSS_CONFIGURE_SCRIPT + NAMES + "krb5-config" + PATH_SUFFIXES + bin + ) + + if(_GSS_CONFIGURE_SCRIPT) + execute_process( + COMMAND ${_GSS_CONFIGURE_SCRIPT} "--cflags" "gssapi" + OUTPUT_VARIABLE _GSS_CFLAGS + RESULT_VARIABLE _GSS_CONFIGURE_FAILED + ) +message(STATUS "CFLAGS: ${_GSS_CFLAGS}") + if(NOT _GSS_CONFIGURE_FAILED) # 0 means success + # should also work in an odd case when multiple directories are given + string(STRIP "${_GSS_CFLAGS}" _GSS_CFLAGS) + string(REGEX REPLACE " +-I" ";" _GSS_CFLAGS "${_GSS_CFLAGS}") + string(REGEX REPLACE " +-([^I][^ \\t;]*)" ";-\\1"_GSS_CFLAGS "${_GSS_CFLAGS}") + + foreach(_flag ${_GSS_CFLAGS}) + if(_flag MATCHES "^-I.*") + string(REGEX REPLACE "^-I" "" _val "${_flag}") + list(APPEND _GSS_INCLUDE_DIR "${_val}") + else() + list(APPEND _GSS_COMPILER_FLAGS "${_flag}") + endif() + endforeach() + endif() + + execute_process( + COMMAND ${_GSS_CONFIGURE_SCRIPT} "--libs" "gssapi" + OUTPUT_VARIABLE _GSS_LIB_FLAGS + RESULT_VARIABLE _GSS_CONFIGURE_FAILED + ) +message(STATUS "LDFLAGS: ${_GSS_LIB_FLAGS}") + if(NOT _GSS_CONFIGURE_FAILED) # 0 means success + # this script gives us libraries and link directories. Blah. We have to deal with it. + string(STRIP "${_GSS_LIB_FLAGS}" _GSS_LIB_FLAGS) + string(REGEX REPLACE " +-(L|l)" ";-\\1" _GSS_LIB_FLAGS "${_GSS_LIB_FLAGS}") + string(REGEX REPLACE " +-([^Ll][^ \\t;]*)" ";-\\1"_GSS_LIB_FLAGS "${_GSS_LIB_FLAGS}") + + foreach(_flag ${_GSS_LIB_FLAGS}) + if(_flag MATCHES "^-l.*") + string(REGEX REPLACE "^-l" "" _val "${_flag}") + list(APPEND _GSS_LIBRARIES "${_val}") + elseif(_flag MATCHES "^-L.*") + string(REGEX REPLACE "^-L" "" _val "${_flag}") + list(APPEND _GSS_LINK_DIRECTORIES "${_val}") + else() + list(APPEND _GSS_LINKER_FLAGS "${_flag}") + endif() + endforeach() + endif() + + + execute_process( + COMMAND ${_GSS_CONFIGURE_SCRIPT} "--version" + OUTPUT_VARIABLE _GSS_VERSION + RESULT_VARIABLE _GSS_CONFIGURE_FAILED + ) + + # older versions may not have the "--version" parameter. In this case we just don't care. + if(_GSS_CONFIGURE_FAILED) + set(_GSS_VERSION 0) + endif() + + + execute_process( + COMMAND ${_GSS_CONFIGURE_SCRIPT} "--vendor" + OUTPUT_VARIABLE _GSS_VENDOR + RESULT_VARIABLE _GSS_CONFIGURE_FAILED + ) + + # older versions may not have the "--vendor" parameter. In this case we just don't care. + if(_GSS_CONFIGURE_FAILED) + set(GSS_FLAVOUR "Heimdal") # most probably, shouldn't really matter + else() + if(_GSS_VENDOR MATCHES ".*H|heimdal.*") + set(GSS_FLAVOUR "Heimdal") + else() + set(GSS_FLAVOUR "MIT") + endif() + endif() + + else() # either there is no config script or we are on platform that doesn't provide one (Windows?) + + find_path(_GSS_INCLUDE_DIR + NAMES + "gssapi/gssapi.h" + HINTS + ${_GSS_ROOT_HINTS} + PATH_SUFFIXES + include + inc + ) + + if(_GSS_INCLUDE_DIR) #jay, we've found something + set(CMAKE_REQUIRED_INCLUDES "${_GSS_INCLUDE_DIR}") + check_include_files( "gssapi/gssapi_generic.h;gssapi/gssapi_krb5.h" _GSS_HAVE_MIT_HEADERS) + + if(_GSS_HAVE_MIT_HEADERS) + set(GSS_FLAVOUR "MIT") + else() + # prevent compiling the header - just check if we can include it + set(CMAKE_REQUIRED_DEFINITIONS "${CMAKE_REQUIRED_DEFINITIONS} -D__ROKEN_H__") + check_include_file( "roken.h" _GSS_HAVE_ROKEN_H) + + check_include_file( "heimdal/roken.h" _GSS_HAVE_HEIMDAL_ROKEN_H) + if(_GSS_HAVE_ROKEN_H OR _GSS_HAVE_HEIMDAL_ROKEN_H) + set(GSS_FLAVOUR "Heimdal") + endif() + set(CMAKE_REQUIRED_DEFINITIONS "") + endif() + else() + # I'm not convienced if this is the right way but this is what autotools do at the moment + find_path(_GSS_INCLUDE_DIR + NAMES + "gssapi.h" + HINTS + ${_GSS_ROOT_HINTS} + PATH_SUFFIXES + include + inc + ) + + if(_GSS_INCLUDE_DIR) + set(GSS_FLAVOUR "Heimdal") + endif() + endif() + + # if we have headers, check if we can link libraries + if(GSS_FLAVOUR) + set(_GSS_LIBDIR_SUFFIXES "") + set(_GSS_LIBDIR_HINTS ${_GSS_ROOT_HINTS}) + get_filename_component(_GSS_CALCULATED_POTENTIAL_ROOT "${_GSS_INCLUDE_DIR}" PATH) + list(APPEND _GSS_LIBDIR_HINTS ${_GSS_CALCULATED_POTENTIAL_ROOT}) + + if(WIN32) + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + list(APPEND _GSS_LIBDIR_SUFFIXES "lib/AMD64") + if(GSS_FLAVOUR STREQUAL "MIT") + set(_GSS_LIBNAME "gssapi64") + else() + set(_GSS_LIBNAME "libgssapi") + endif() + else() + list(APPEND _GSS_LIBDIR_SUFFIXES "lib/i386") + if(GSS_FLAVOUR STREQUAL "MIT") + set(_GSS_LIBNAME "gssapi32") + else() + set(_GSS_LIBNAME "libgssapi") + endif() + endif() + else() + list(APPEND _GSS_LIBDIR_SUFFIXES "lib;lib64") # those suffixes are not checked for HINTS + if(GSS_FLAVOUR STREQUAL "MIT") + set(_GSS_LIBNAME "gssapi_krb5") + else() + set(_GSS_LIBNAME "gssapi") + endif() + endif() + + find_library(_GSS_LIBRARIES + NAMES + ${_GSS_LIBNAME} + HINTS + ${_GSS_LIBDIR_HINTS} + PATH_SUFFIXES + ${_GSS_LIBDIR_SUFFIXES} + ) + + endif() + + endif() +else() + if(_GSS_PKG_${_MIT_MODNAME}_VERSION) + set(GSS_FLAVOUR "MIT") + set(_GSS_VERSION _GSS_PKG_${_MIT_MODNAME}_VERSION) + else() + set(GSS_FLAVOUR "Heimdal") + set(_GSS_VERSION _GSS_PKG_${_MIT_HEIMDAL}_VERSION) + endif() +endif() + +set(GSS_INCLUDE_DIR ${_GSS_INCLUDE_DIR}) +set(GSS_LIBRARIES ${_GSS_LIBRARIES}) +set(GSS_LINK_DIRECTORIES ${_GSS_LINK_DIRECTORIES}) +set(GSS_LINKER_FLAGS ${_GSS_LINKER_FLAGS}) +set(GSS_COMPILER_FLAGS ${_GSS_COMPILER_FLAGS}) +set(GSS_VERSION ${_GSS_VERSION}) + +if(GSS_FLAVOUR) + + if(NOT GSS_VERSION AND GSS_FLAVOUR STREQUAL "Heimdal") + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(HEIMDAL_MANIFEST_FILE "Heimdal.Application.amd64.manifest") + else() + set(HEIMDAL_MANIFEST_FILE "Heimdal.Application.x86.manifest") + endif() + + if(EXISTS "${GSS_INCLUDE_DIR}/${HEIMDAL_MANIFEST_FILE}") + file(STRINGS "${GSS_INCLUDE_DIR}/${HEIMDAL_MANIFEST_FILE}" heimdal_version_str + REGEX "^.*version=\"[0-9]\\.[^\"]+\".*$") + + string(REGEX MATCH "[0-9]\\.[^\"]+" + GSS_VERSION "${heimdal_version_str}") + endif() + + if(NOT GSS_VERSION) + set(GSS_VERSION "Heimdal Unknown") + endif() + elseif(NOT GSS_VERSION AND GSS_FLAVOUR STREQUAL "MIT") + get_filename_component(_MIT_VERSION "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MIT\\Kerberos\\SDK\\CurrentVersion;VersionString]" NAME CACHE) + if(WIN32 AND _MIT_VERSION) + set(GSS_VERSION "${_MIT_VERSION}") + else() + set(GSS_VERSION "MIT Unknown") + endif() + endif() +endif() + + +include(FindPackageHandleStandardArgs) + +set(_GSS_REQUIRED_VARS GSS_LIBRARIES GSS_FLAVOUR) + +find_package_handle_standard_args(GSS + REQUIRED_VARS + ${_GSS_REQUIRED_VARS} + VERSION_VAR + GSS_VERSION + FAIL_MESSAGE + "Could NOT find GSS, try to set the path to GSS root folder in the system variable GSS_ROOT_DIR" +) + +mark_as_advanced(GSS_INCLUDE_DIR GSS_LIBRARIES) diff --git a/deps-win32/curl-7.54.1/CMake/FindLibSSH2.cmake b/deps-win32/curl-7.54.1/CMake/FindLibSSH2.cmake new file mode 100644 index 0000000..12a7c61 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/FindLibSSH2.cmake @@ -0,0 +1,35 @@ +# - Try to find the libssh2 library +# Once done this will define +# +# LIBSSH2_FOUND - system has the libssh2 library +# LIBSSH2_INCLUDE_DIR - the libssh2 include directory +# LIBSSH2_LIBRARY - the libssh2 library name + +if (LIBSSH2_INCLUDE_DIR AND LIBSSH2_LIBRARY) + set(LibSSH2_FIND_QUIETLY TRUE) +endif (LIBSSH2_INCLUDE_DIR AND LIBSSH2_LIBRARY) + +FIND_PATH(LIBSSH2_INCLUDE_DIR libssh2.h +) + +FIND_LIBRARY(LIBSSH2_LIBRARY NAMES ssh2 +) + +if(LIBSSH2_INCLUDE_DIR) + file(STRINGS "${LIBSSH2_INCLUDE_DIR}/libssh2.h" libssh2_version_str REGEX "^#define[\t ]+LIBSSH2_VERSION_NUM[\t ]+0x[0-9][0-9][0-9][0-9][0-9][0-9].*") + + string(REGEX REPLACE "^.*LIBSSH2_VERSION_NUM[\t ]+0x([0-9][0-9]).*$" "\\1" LIBSSH2_VERSION_MAJOR "${libssh2_version_str}") + string(REGEX REPLACE "^.*LIBSSH2_VERSION_NUM[\t ]+0x[0-9][0-9]([0-9][0-9]).*$" "\\1" LIBSSH2_VERSION_MINOR "${libssh2_version_str}") + string(REGEX REPLACE "^.*LIBSSH2_VERSION_NUM[\t ]+0x[0-9][0-9][0-9][0-9]([0-9][0-9]).*$" "\\1" LIBSSH2_VERSION_PATCH "${libssh2_version_str}") + + string(REGEX REPLACE "^0(.+)" "\\1" LIBSSH2_VERSION_MAJOR "${LIBSSH2_VERSION_MAJOR}") + string(REGEX REPLACE "^0(.+)" "\\1" LIBSSH2_VERSION_MINOR "${LIBSSH2_VERSION_MINOR}") + string(REGEX REPLACE "^0(.+)" "\\1" LIBSSH2_VERSION_PATCH "${LIBSSH2_VERSION_PATCH}") + + set(LIBSSH2_VERSION "${LIBSSH2_VERSION_MAJOR}.${LIBSSH2_VERSION_MINOR}.${LIBSSH2_VERSION_PATCH}") +endif(LIBSSH2_INCLUDE_DIR) + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(LibSSH2 DEFAULT_MSG LIBSSH2_INCLUDE_DIR LIBSSH2_LIBRARY ) + +MARK_AS_ADVANCED(LIBSSH2_INCLUDE_DIR LIBSSH2_LIBRARY LIBSSH2_VERSION_MAJOR LIBSSH2_VERSION_MINOR LIBSSH2_VERSION_PATCH LIBSSH2_VERSION) diff --git a/deps-win32/curl-7.54.1/CMake/FindMbedTLS.cmake b/deps-win32/curl-7.54.1/CMake/FindMbedTLS.cmake new file mode 100644 index 0000000..a916395 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/FindMbedTLS.cmake @@ -0,0 +1,13 @@ +find_path(MBEDTLS_INCLUDE_DIRS mbedtls/ssl.h) + +find_library(MBEDTLS_LIBRARY mbedtls) +find_library(MBEDX509_LIBRARY mbedx509) +find_library(MBEDCRYPTO_LIBRARY mbedcrypto) + +set(MBEDTLS_LIBRARIES "${MBEDTLS_LIBRARY}" "${MBEDX509_LIBRARY}" "${MBEDCRYPTO_LIBRARY}") + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(MBEDTLS DEFAULT_MSG + MBEDTLS_INCLUDE_DIRS MBEDTLS_LIBRARY MBEDX509_LIBRARY MBEDCRYPTO_LIBRARY) + +mark_as_advanced(MBEDTLS_INCLUDE_DIRS MBEDTLS_LIBRARY MBEDX509_LIBRARY MBEDCRYPTO_LIBRARY) diff --git a/deps-win32/curl-7.54.1/CMake/FindNGHTTP2.cmake b/deps-win32/curl-7.54.1/CMake/FindNGHTTP2.cmake new file mode 100644 index 0000000..4e566cf --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/FindNGHTTP2.cmake @@ -0,0 +1,18 @@ +include(FindPackageHandleStandardArgs) + +find_path(NGHTTP2_INCLUDE_DIR "nghttp2/nghttp2.h") + +find_library(NGHTTP2_LIBRARY NAMES nghttp2) + +find_package_handle_standard_args(NGHTTP2 + FOUND_VAR + NGHTTP2_FOUND + REQUIRED_VARS + NGHTTP2_LIBRARY + NGHTTP2_INCLUDE_DIR + FAIL_MESSAGE + "Could NOT find NGHTTP2" +) + +set(NGHTTP2_INCLUDE_DIRS ${NGHTTP2_INCLUDE_DIR} ) +set(NGHTTP2_LIBRARIES ${NGHTTP2_LIBRARY}) diff --git a/deps-win32/curl-7.54.1/CMake/Macros.cmake b/deps-win32/curl-7.54.1/CMake/Macros.cmake new file mode 100644 index 0000000..dab005f --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/Macros.cmake @@ -0,0 +1,95 @@ +#File defines convenience macros for available feature testing + +# This macro checks if the symbol exists in the library and if it +# does, it prepends library to the list. It is intended to be called +# multiple times with a sequence of possibly dependent libraries in +# order of least-to-most-dependent. Some libraries depend on others +# to link correctly. +macro(CHECK_LIBRARY_EXISTS_CONCAT LIBRARY SYMBOL VARIABLE) + check_library_exists("${LIBRARY};${CURL_LIBS}" ${SYMBOL} "${CMAKE_LIBRARY_PATH}" + ${VARIABLE}) + if(${VARIABLE}) + set(CURL_LIBS ${LIBRARY} ${CURL_LIBS}) + endif(${VARIABLE}) +endmacro(CHECK_LIBRARY_EXISTS_CONCAT) + +# Check if header file exists and add it to the list. +# This macro is intended to be called multiple times with a sequence of +# possibly dependent header files. Some headers depend on others to be +# compiled correctly. +macro(CHECK_INCLUDE_FILE_CONCAT FILE VARIABLE) + check_include_files("${CURL_INCLUDES};${FILE}" ${VARIABLE}) + if(${VARIABLE}) + set(CURL_INCLUDES ${CURL_INCLUDES} ${FILE}) + set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -D${VARIABLE}") + endif(${VARIABLE}) +endmacro(CHECK_INCLUDE_FILE_CONCAT) + +# For other curl specific tests, use this macro. +macro(CURL_INTERNAL_TEST CURL_TEST) + if(NOT DEFINED "${CURL_TEST}") + set(MACRO_CHECK_FUNCTION_DEFINITIONS + "-D${CURL_TEST} ${CURL_TEST_DEFINES} ${CMAKE_REQUIRED_FLAGS}") + if(CMAKE_REQUIRED_LIBRARIES) + set(CURL_TEST_ADD_LIBRARIES + "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}") + endif(CMAKE_REQUIRED_LIBRARIES) + + message(STATUS "Performing Curl Test ${CURL_TEST}") + try_compile(${CURL_TEST} + ${CMAKE_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c + CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS} + "${CURL_TEST_ADD_LIBRARIES}" + OUTPUT_VARIABLE OUTPUT) + if(${CURL_TEST}) + set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}") + message(STATUS "Performing Curl Test ${CURL_TEST} - Success") + file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "Performing Curl Test ${CURL_TEST} passed with the following output:\n" + "${OUTPUT}\n") + else(${CURL_TEST}) + message(STATUS "Performing Curl Test ${CURL_TEST} - Failed") + set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}") + file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "Performing Curl Test ${CURL_TEST} failed with the following output:\n" + "${OUTPUT}\n") + endif(${CURL_TEST}) + endif() +endmacro(CURL_INTERNAL_TEST) + +macro(CURL_INTERNAL_TEST_RUN CURL_TEST) + if(NOT DEFINED "${CURL_TEST}_COMPILE") + set(MACRO_CHECK_FUNCTION_DEFINITIONS + "-D${CURL_TEST} ${CMAKE_REQUIRED_FLAGS}") + if(CMAKE_REQUIRED_LIBRARIES) + set(CURL_TEST_ADD_LIBRARIES + "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}") + endif(CMAKE_REQUIRED_LIBRARIES) + + message(STATUS "Performing Curl Test ${CURL_TEST}") + try_run(${CURL_TEST} ${CURL_TEST}_COMPILE + ${CMAKE_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c + CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS} + "${CURL_TEST_ADD_LIBRARIES}" + OUTPUT_VARIABLE OUTPUT) + if(${CURL_TEST}_COMPILE AND NOT ${CURL_TEST}) + set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}") + message(STATUS "Performing Curl Test ${CURL_TEST} - Success") + else(${CURL_TEST}_COMPILE AND NOT ${CURL_TEST}) + message(STATUS "Performing Curl Test ${CURL_TEST} - Failed") + set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}") + file(APPEND "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log" + "Performing Curl Test ${CURL_TEST} failed with the following output:\n" + "${OUTPUT}") + if(${CURL_TEST}_COMPILE) + file(APPEND + "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log" + "There was a problem running this test\n") + endif(${CURL_TEST}_COMPILE) + file(APPEND "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log" + "\n\n") + endif(${CURL_TEST}_COMPILE AND NOT ${CURL_TEST}) + endif() +endmacro(CURL_INTERNAL_TEST_RUN) diff --git a/deps-win32/curl-7.54.1/CMake/OtherTests.cmake b/deps-win32/curl-7.54.1/CMake/OtherTests.cmake new file mode 100644 index 0000000..3b203c5 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/OtherTests.cmake @@ -0,0 +1,232 @@ +include(CheckCSourceCompiles) +# The begin of the sources (macros and includes) +set(_source_epilogue "#undef inline") + +macro(add_header_include check header) + if(${check}) + set(_source_epilogue "${_source_epilogue}\n#include <${header}>") + endif(${check}) +endmacro(add_header_include) + +set(signature_call_conv) +if(HAVE_WINDOWS_H) + add_header_include(HAVE_WINSOCK2_H "winsock2.h") + add_header_include(HAVE_WINDOWS_H "windows.h") + add_header_include(HAVE_WINSOCK_H "winsock.h") + set(_source_epilogue + "${_source_epilogue}\n#ifndef WIN32_LEAN_AND_MEAN\n#define WIN32_LEAN_AND_MEAN\n#endif") + set(signature_call_conv "PASCAL") + if(HAVE_LIBWS2_32) + set(CMAKE_REQUIRED_LIBRARIES ws2_32) + endif() +else(HAVE_WINDOWS_H) + add_header_include(HAVE_SYS_TYPES_H "sys/types.h") + add_header_include(HAVE_SYS_SOCKET_H "sys/socket.h") +endif(HAVE_WINDOWS_H) + +check_c_source_compiles("${_source_epilogue} +int main(void) { + recv(0, 0, 0, 0); + return 0; +}" curl_cv_recv) +if(curl_cv_recv) + if(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown") + foreach(recv_retv "int" "ssize_t" ) + foreach(recv_arg1 "int" "ssize_t" "SOCKET") + foreach(recv_arg2 "void *" "char *") + foreach(recv_arg3 "size_t" "int" "socklen_t" "unsigned int") + foreach(recv_arg4 "int" "unsigned int") + if(NOT curl_cv_func_recv_done) + unset(curl_cv_func_recv_test CACHE) + check_c_source_compiles(" + ${_source_epilogue} + extern ${recv_retv} ${signature_call_conv} + recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4}); + int main(void) { + ${recv_arg1} s=0; + ${recv_arg2} buf=0; + ${recv_arg3} len=0; + ${recv_arg4} flags=0; + ${recv_retv} res = recv(s, buf, len, flags); + (void) res; + return 0; + }" + curl_cv_func_recv_test) + message(STATUS + "Tested: ${recv_retv} recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4})") + if(curl_cv_func_recv_test) + set(curl_cv_func_recv_args + "${recv_arg1},${recv_arg2},${recv_arg3},${recv_arg4},${recv_retv}") + set(RECV_TYPE_ARG1 "${recv_arg1}") + set(RECV_TYPE_ARG2 "${recv_arg2}") + set(RECV_TYPE_ARG3 "${recv_arg3}") + set(RECV_TYPE_ARG4 "${recv_arg4}") + set(RECV_TYPE_RETV "${recv_retv}") + set(HAVE_RECV 1) + set(curl_cv_func_recv_done 1) + endif(curl_cv_func_recv_test) + endif(NOT curl_cv_func_recv_done) + endforeach(recv_arg4) + endforeach(recv_arg3) + endforeach(recv_arg2) + endforeach(recv_arg1) + endforeach(recv_retv) + else() + string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG1 "${curl_cv_func_recv_args}") + string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG2 "${curl_cv_func_recv_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG3 "${curl_cv_func_recv_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" RECV_TYPE_ARG4 "${curl_cv_func_recv_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" RECV_TYPE_RETV "${curl_cv_func_recv_args}") + endif() + + if("${curl_cv_func_recv_args}" STREQUAL "unknown") + message(FATAL_ERROR "Cannot find proper types to use for recv args") + endif("${curl_cv_func_recv_args}" STREQUAL "unknown") +else(curl_cv_recv) + message(FATAL_ERROR "Unable to link function recv") +endif(curl_cv_recv) +set(curl_cv_func_recv_args "${curl_cv_func_recv_args}" CACHE INTERNAL "Arguments for recv") +set(HAVE_RECV 1) + +check_c_source_compiles("${_source_epilogue} +int main(void) { + send(0, 0, 0, 0); + return 0; +}" curl_cv_send) +if(curl_cv_send) + if(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown") + foreach(send_retv "int" "ssize_t" ) + foreach(send_arg1 "int" "ssize_t" "SOCKET") + foreach(send_arg2 "const void *" "void *" "char *" "const char *") + foreach(send_arg3 "size_t" "int" "socklen_t" "unsigned int") + foreach(send_arg4 "int" "unsigned int") + if(NOT curl_cv_func_send_done) + unset(curl_cv_func_send_test CACHE) + check_c_source_compiles(" + ${_source_epilogue} + extern ${send_retv} ${signature_call_conv} + send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4}); + int main(void) { + ${send_arg1} s=0; + ${send_arg2} buf=0; + ${send_arg3} len=0; + ${send_arg4} flags=0; + ${send_retv} res = send(s, buf, len, flags); + (void) res; + return 0; + }" + curl_cv_func_send_test) + message(STATUS + "Tested: ${send_retv} send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4})") + if(curl_cv_func_send_test) + string(REGEX REPLACE "(const) .*" "\\1" send_qual_arg2 "${send_arg2}") + string(REGEX REPLACE "const (.*)" "\\1" send_arg2 "${send_arg2}") + set(curl_cv_func_send_args + "${send_arg1},${send_arg2},${send_arg3},${send_arg4},${send_retv},${send_qual_arg2}") + set(SEND_TYPE_ARG1 "${send_arg1}") + set(SEND_TYPE_ARG2 "${send_arg2}") + set(SEND_TYPE_ARG3 "${send_arg3}") + set(SEND_TYPE_ARG4 "${send_arg4}") + set(SEND_TYPE_RETV "${send_retv}") + set(HAVE_SEND 1) + set(curl_cv_func_send_done 1) + endif(curl_cv_func_send_test) + endif(NOT curl_cv_func_send_done) + endforeach(send_arg4) + endforeach(send_arg3) + endforeach(send_arg2) + endforeach(send_arg1) + endforeach(send_retv) + else() + string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG1 "${curl_cv_func_send_args}") + string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG2 "${curl_cv_func_send_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG3 "${curl_cv_func_send_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG4 "${curl_cv_func_send_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" SEND_TYPE_RETV "${curl_cv_func_send_args}") + string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" SEND_QUAL_ARG2 "${curl_cv_func_send_args}") + endif() + + if("${curl_cv_func_send_args}" STREQUAL "unknown") + message(FATAL_ERROR "Cannot find proper types to use for send args") + endif("${curl_cv_func_send_args}" STREQUAL "unknown") + set(SEND_QUAL_ARG2 "const") +else(curl_cv_send) + message(FATAL_ERROR "Unable to link function send") +endif(curl_cv_send) +set(curl_cv_func_send_args "${curl_cv_func_send_args}" CACHE INTERNAL "Arguments for send") +set(HAVE_SEND 1) + +check_c_source_compiles("${_source_epilogue} + int main(void) { + int flag = MSG_NOSIGNAL; + (void)flag; + return 0; + }" HAVE_MSG_NOSIGNAL) + +if(NOT HAVE_WINDOWS_H) + add_header_include(HAVE_SYS_TIME_H "sys/time.h") + add_header_include(TIME_WITH_SYS_TIME "time.h") + add_header_include(HAVE_TIME_H "time.h") +endif() +check_c_source_compiles("${_source_epilogue} +int main(void) { + struct timeval ts; + ts.tv_sec = 0; + ts.tv_usec = 0; + (void)ts; + return 0; +}" HAVE_STRUCT_TIMEVAL) + + +include(CheckCSourceRuns) +# See HAVE_POLL in CMakeLists.txt for why poll is disabled on macOS +if(NOT APPLE) + set(CMAKE_REQUIRED_FLAGS) + if(HAVE_SYS_POLL_H) + set(CMAKE_REQUIRED_FLAGS "-DHAVE_SYS_POLL_H") + endif(HAVE_SYS_POLL_H) + check_c_source_runs(" + #ifdef HAVE_SYS_POLL_H + # include + #endif + int main(void) { + return poll((void *)0, 0, 10 /*ms*/); + }" HAVE_POLL_FINE) +endif() + +set(HAVE_SIG_ATOMIC_T 1) +set(CMAKE_REQUIRED_FLAGS) +if(HAVE_SIGNAL_H) + set(CMAKE_REQUIRED_FLAGS "-DHAVE_SIGNAL_H") + set(CMAKE_EXTRA_INCLUDE_FILES "signal.h") +endif(HAVE_SIGNAL_H) +check_type_size("sig_atomic_t" SIZEOF_SIG_ATOMIC_T) +if(HAVE_SIZEOF_SIG_ATOMIC_T) + check_c_source_compiles(" + #ifdef HAVE_SIGNAL_H + # include + #endif + int main(void) { + static volatile sig_atomic_t dummy = 0; + (void)dummy; + return 0; + }" HAVE_SIG_ATOMIC_T_NOT_VOLATILE) + if(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE) + set(HAVE_SIG_ATOMIC_T_VOLATILE 1) + endif(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE) +endif(HAVE_SIZEOF_SIG_ATOMIC_T) + +if(HAVE_WINDOWS_H) + set(CMAKE_EXTRA_INCLUDE_FILES winsock2.h) +else() + set(CMAKE_EXTRA_INCLUDE_FILES) + if(HAVE_SYS_SOCKET_H) + set(CMAKE_EXTRA_INCLUDE_FILES sys/socket.h) + endif(HAVE_SYS_SOCKET_H) +endif() + +check_type_size("struct sockaddr_storage" SIZEOF_STRUCT_SOCKADDR_STORAGE) +if(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE) + set(HAVE_STRUCT_SOCKADDR_STORAGE 1) +endif(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE) + diff --git a/deps-win32/curl-7.54.1/CMake/Platforms/WindowsCache.cmake b/deps-win32/curl-7.54.1/CMake/Platforms/WindowsCache.cmake new file mode 100644 index 0000000..6fc2991 --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/Platforms/WindowsCache.cmake @@ -0,0 +1,125 @@ +if(NOT UNIX) + if(WIN32) + set(HAVE_LIBDL 0) + set(HAVE_LIBUCB 0) + set(HAVE_LIBSOCKET 0) + set(NOT_NEED_LIBNSL 0) + set(HAVE_LIBNSL 0) + set(HAVE_GETHOSTNAME 1) + set(HAVE_LIBZ 0) + set(HAVE_LIBCRYPTO 0) + + set(HAVE_DLOPEN 0) + + set(HAVE_ALLOCA_H 0) + set(HAVE_ARPA_INET_H 0) + set(HAVE_DLFCN_H 0) + set(HAVE_FCNTL_H 1) + set(HAVE_INTTYPES_H 0) + set(HAVE_IO_H 1) + set(HAVE_MALLOC_H 1) + set(HAVE_MEMORY_H 1) + set(HAVE_NETDB_H 0) + set(HAVE_NETINET_IF_ETHER_H 0) + set(HAVE_NETINET_IN_H 0) + set(HAVE_NET_IF_H 0) + set(HAVE_PROCESS_H 1) + set(HAVE_PWD_H 0) + set(HAVE_SETJMP_H 1) + set(HAVE_SGTTY_H 0) + set(HAVE_SIGNAL_H 1) + set(HAVE_SOCKIO_H 0) + set(HAVE_STDINT_H 0) + set(HAVE_STDLIB_H 1) + set(HAVE_STRINGS_H 0) + set(HAVE_STRING_H 1) + set(HAVE_SYS_PARAM_H 0) + set(HAVE_SYS_POLL_H 0) + set(HAVE_SYS_SELECT_H 0) + set(HAVE_SYS_SOCKET_H 0) + set(HAVE_SYS_SOCKIO_H 0) + set(HAVE_SYS_STAT_H 1) + set(HAVE_SYS_TIME_H 0) + set(HAVE_SYS_TYPES_H 1) + set(HAVE_SYS_UTIME_H 1) + set(HAVE_TERMIOS_H 0) + set(HAVE_TERMIO_H 0) + set(HAVE_TIME_H 1) + set(HAVE_UNISTD_H 0) + set(HAVE_UTIME_H 0) + set(HAVE_X509_H 0) + set(HAVE_ZLIB_H 0) + + set(HAVE_SIZEOF_LONG_DOUBLE 1) + set(SIZEOF_LONG_DOUBLE 8) + + set(HAVE_SOCKET 1) + set(HAVE_POLL 0) + set(HAVE_SELECT 1) + set(HAVE_STRDUP 1) + set(HAVE_STRSTR 1) + set(HAVE_STRTOK_R 0) + set(HAVE_STRFTIME 1) + set(HAVE_UNAME 0) + set(HAVE_STRCASECMP 0) + set(HAVE_STRICMP 1) + set(HAVE_STRCMPI 1) + set(HAVE_GETHOSTBYADDR 1) + set(HAVE_GETTIMEOFDAY 0) + set(HAVE_INET_ADDR 1) + set(HAVE_INET_NTOA 1) + set(HAVE_INET_NTOA_R 0) + set(HAVE_TCGETATTR 0) + set(HAVE_TCSETATTR 0) + set(HAVE_PERROR 1) + set(HAVE_CLOSESOCKET 1) + set(HAVE_SETVBUF 0) + set(HAVE_SIGSETJMP 0) + set(HAVE_GETPASS_R 0) + set(HAVE_STRLCAT 0) + set(HAVE_GETPWUID 0) + set(HAVE_GETEUID 0) + set(HAVE_UTIME 1) + set(HAVE_RAND_EGD 0) + set(HAVE_RAND_SCREEN 0) + set(HAVE_RAND_STATUS 0) + set(HAVE_GMTIME_R 0) + set(HAVE_LOCALTIME_R 0) + set(HAVE_GETHOSTBYADDR_R 0) + set(HAVE_GETHOSTBYNAME_R 0) + set(HAVE_SIGNAL_FUNC 1) + set(HAVE_SIGNAL_MACRO 0) + + set(HAVE_GETHOSTBYADDR_R_5 0) + set(HAVE_GETHOSTBYADDR_R_5_REENTRANT 0) + set(HAVE_GETHOSTBYADDR_R_7 0) + set(HAVE_GETHOSTBYADDR_R_7_REENTRANT 0) + set(HAVE_GETHOSTBYADDR_R_8 0) + set(HAVE_GETHOSTBYADDR_R_8_REENTRANT 0) + set(HAVE_GETHOSTBYNAME_R_3 0) + set(HAVE_GETHOSTBYNAME_R_3_REENTRANT 0) + set(HAVE_GETHOSTBYNAME_R_5 0) + set(HAVE_GETHOSTBYNAME_R_5_REENTRANT 0) + set(HAVE_GETHOSTBYNAME_R_6 0) + set(HAVE_GETHOSTBYNAME_R_6_REENTRANT 0) + + set(TIME_WITH_SYS_TIME 0) + set(HAVE_O_NONBLOCK 0) + set(HAVE_IN_ADDR_T 0) + set(HAVE_INET_NTOA_R_DECL 0) + set(HAVE_INET_NTOA_R_DECL_REENTRANT 0) + if(ENABLE_IPV6) + set(HAVE_GETADDRINFO 1) + else() + set(HAVE_GETADDRINFO 0) + endif() + set(STDC_HEADERS 1) + set(RETSIGTYPE_TEST 1) + + set(HAVE_SIGACTION 0) + set(HAVE_MACRO_SIGSETJMP 0) + else(WIN32) + message("This file should be included on Windows platform only") + endif(WIN32) +endif(NOT UNIX) + diff --git a/deps-win32/curl-7.54.1/CMake/Utilities.cmake b/deps-win32/curl-7.54.1/CMake/Utilities.cmake new file mode 100644 index 0000000..8b6276d --- /dev/null +++ b/deps-win32/curl-7.54.1/CMake/Utilities.cmake @@ -0,0 +1,44 @@ +# File containing various utilities + +# Converts a CMake list to a string containing elements separated by spaces +function(TO_LIST_SPACES _LIST_NAME OUTPUT_VAR) + set(NEW_LIST_SPACE) + foreach(ITEM ${${_LIST_NAME}}) + set(NEW_LIST_SPACE "${NEW_LIST_SPACE} ${ITEM}") + endforeach() + string(STRIP ${NEW_LIST_SPACE} NEW_LIST_SPACE) + set(${OUTPUT_VAR} "${NEW_LIST_SPACE}" PARENT_SCOPE) +endfunction() + +# Appends a lis of item to a string which is a space-separated list, if they don't already exist. +function(LIST_SPACES_APPEND_ONCE LIST_NAME) + string(REPLACE " " ";" _LIST ${${LIST_NAME}}) + list(APPEND _LIST ${ARGN}) + list(REMOVE_DUPLICATES _LIST) + to_list_spaces(_LIST NEW_LIST_SPACE) + set(${LIST_NAME} "${NEW_LIST_SPACE}" PARENT_SCOPE) +endfunction() + +# Convinience function that does the same as LIST(FIND ...) but with a TRUE/FALSE return value. +# Ex: IN_STR_LIST(MY_LIST "Searched item" WAS_FOUND) +function(IN_STR_LIST LIST_NAME ITEM_SEARCHED RETVAL) + list(FIND ${LIST_NAME} ${ITEM_SEARCHED} FIND_POS) + if(${FIND_POS} EQUAL -1) + set(${RETVAL} FALSE PARENT_SCOPE) + else() + set(${RETVAL} TRUE PARENT_SCOPE) + endif() +endfunction() + +# Returns a list of arguments that evaluate to true +function(collect_true output_var output_count_var) + set(${output_var}) + foreach(option_var IN LISTS ARGN) + if(${option_var}) + list(APPEND ${output_var} ${option_var}) + endif() + endforeach() + set(${output_var} ${${output_var}} PARENT_SCOPE) + list(LENGTH ${output_var} ${output_count_var}) + set(${output_count_var} ${${output_count_var}} PARENT_SCOPE) +endfunction() diff --git a/deps-win32/curl-7.54.1/CMakeLists.txt b/deps-win32/curl-7.54.1/CMakeLists.txt new file mode 100644 index 0000000..d2e1c2b --- /dev/null +++ b/deps-win32/curl-7.54.1/CMakeLists.txt @@ -0,0 +1,1318 @@ +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 1998 - 2016, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### +# curl/libcurl CMake script +# by Tetetest and Sukender (Benoit Neil) + +# TODO: +# The output .so file lacks the soname number which we currently have within the lib/Makefile.am file +# Add full (4 or 5 libs) SSL support +# Add INSTALL target (EXTRA_DIST variables in Makefile.am may be moved to Makefile.inc so that CMake/CPack is aware of what's to include). +# Add CTests(?) +# Check on all possible platforms +# Test with as many configurations possible (With or without any option) +# Create scripts that help keeping the CMake build system up to date (to reduce maintenance). According to Tetetest: +# - lists of headers that 'configure' checks for; +# - curl-specific tests (the ones that are in m4/curl-*.m4 files); +# - (most obvious thing:) curl version numbers. +# Add documentation subproject +# +# To check: +# (From Daniel Stenberg) The cmake build selected to run gcc with -fPIC on my box while the plain configure script did not. +# (From Daniel Stenberg) The gcc command line use neither -g nor any -O options. As a developer, I also treasure our configure scripts's --enable-debug option that sets a long range of "picky" compiler options. +cmake_minimum_required(VERSION 2.8 FATAL_ERROR) +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}") +include(Utilities) +include(Macros) +include(CMakeDependentOption) + +project( CURL C ) + +message(WARNING "the curl cmake build system is poorly maintained. Be aware") + +file (READ ${CURL_SOURCE_DIR}/include/curl/curlver.h CURL_VERSION_H_CONTENTS) +string (REGEX MATCH "#define LIBCURL_VERSION \"[^\"]*" + CURL_VERSION ${CURL_VERSION_H_CONTENTS}) +string (REGEX REPLACE "[^\"]+\"" "" CURL_VERSION ${CURL_VERSION}) +string (REGEX MATCH "#define LIBCURL_VERSION_NUM 0x[0-9a-fA-F]+" + CURL_VERSION_NUM ${CURL_VERSION_H_CONTENTS}) +string (REGEX REPLACE "[^0]+0x" "" CURL_VERSION_NUM ${CURL_VERSION_NUM}) + +include_regular_expression("^.*$") # Sukender: Is it necessary? + +# Setup package meta-data +# SET(PACKAGE "curl") +message(STATUS "curl version=[${CURL_VERSION}]") +# SET(PACKAGE_TARNAME "curl") +# SET(PACKAGE_NAME "curl") +# SET(PACKAGE_VERSION "-") +# SET(PACKAGE_STRING "curl-") +# SET(PACKAGE_BUGREPORT "a suitable curl mailing list => https://curl.haxx.se/mail/") +set(OPERATING_SYSTEM "${CMAKE_SYSTEM_NAME}") +set(OS "\"${CMAKE_SYSTEM_NAME}\"") + +include_directories(${PROJECT_BINARY_DIR}/include/curl) +include_directories( ${CURL_SOURCE_DIR}/include ) + +option(BUILD_CURL_EXE "Set to ON to build curl executable." ON) +option(CURL_STATICLIB "Set to ON to build libcurl with static linking." OFF) +option(ENABLE_ARES "Set to ON to enable c-ares support" OFF) +if(WIN32) + CMAKE_DEPENDENT_OPTION(ENABLE_THREADED_RESOLVER + "Set to ON to enable threaded DNS lookup" + ON "NOT ENABLE_ARES" + OFF) +else() + option(ENABLE_THREADED_RESOLVER "Set to ON to enable POSIX threaded DNS lookup" OFF) +endif() +option(ENABLE_DEBUG "Set to ON to enable curl debug features" OFF) +option(ENABLE_CURLDEBUG "Set to ON to build with TrackMemory feature enabled" OFF) + +if (ENABLE_DEBUG) + # DEBUGBUILD will be defined only for Debug builds + if(NOT CMAKE_VERSION VERSION_LESS 3.0) + set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS $<$:DEBUGBUILD>) + else() + set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG DEBUGBUILD) + endif() + set(ENABLE_CURLDEBUG ON) +endif() + +if (ENABLE_CURLDEBUG) + set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS CURLDEBUG) +endif() + +# initialize CURL_LIBS +set(CURL_LIBS "") + +if(ENABLE_THREADED_RESOLVER AND ENABLE_ARES) + message(FATAL_ERROR "Options ENABLE_THREADED_RESOLVER and ENABLE_ARES are mutually exclusive") +endif() + +if(ENABLE_ARES) + set(USE_ARES 1) + find_package(CARES REQUIRED) + list(APPEND CURL_LIBS ${CARES_LIBRARY} ) + set(CURL_LIBS ${CURL_LIBS} ${CARES_LIBRARY}) +endif() + +if(MSVC) + option(BUILD_RELEASE_DEBUG_DIRS "Set OFF to build each configuration to a separate directory" OFF) + mark_as_advanced(BUILD_RELEASE_DEBUG_DIRS) +endif() + +include(CurlSymbolHiding) + +option(HTTP_ONLY "disables all protocols except HTTP (This overrides all CURL_DISABLE_* options)" OFF) +mark_as_advanced(HTTP_ONLY) +option(CURL_DISABLE_FTP "disables FTP" OFF) +mark_as_advanced(CURL_DISABLE_FTP) +option(CURL_DISABLE_LDAP "disables LDAP" OFF) +mark_as_advanced(CURL_DISABLE_LDAP) +option(CURL_DISABLE_TELNET "disables Telnet" OFF) +mark_as_advanced(CURL_DISABLE_TELNET) +option(CURL_DISABLE_DICT "disables DICT" OFF) +mark_as_advanced(CURL_DISABLE_DICT) +option(CURL_DISABLE_FILE "disables FILE" OFF) +mark_as_advanced(CURL_DISABLE_FILE) +option(CURL_DISABLE_TFTP "disables TFTP" OFF) +mark_as_advanced(CURL_DISABLE_TFTP) +option(CURL_DISABLE_HTTP "disables HTTP" OFF) +mark_as_advanced(CURL_DISABLE_HTTP) + +option(CURL_DISABLE_LDAPS "to disable LDAPS" OFF) +mark_as_advanced(CURL_DISABLE_LDAPS) + +option(CURL_DISABLE_RTSP "to disable RTSP" OFF) +mark_as_advanced(CURL_DISABLE_RTSP) +option(CURL_DISABLE_PROXY "to disable proxy" OFF) +mark_as_advanced(CURL_DISABLE_PROXY) +option(CURL_DISABLE_POP3 "to disable POP3" OFF) +mark_as_advanced(CURL_DISABLE_POP3) +option(CURL_DISABLE_IMAP "to disable IMAP" OFF) +mark_as_advanced(CURL_DISABLE_IMAP) +option(CURL_DISABLE_SMTP "to disable SMTP" OFF) +mark_as_advanced(CURL_DISABLE_SMTP) +option(CURL_DISABLE_GOPHER "to disable Gopher" OFF) +mark_as_advanced(CURL_DISABLE_GOPHER) + +if(HTTP_ONLY) + set(CURL_DISABLE_FTP ON) + set(CURL_DISABLE_LDAP ON) + set(CURL_DISABLE_LDAPS ON) + set(CURL_DISABLE_TELNET ON) + set(CURL_DISABLE_DICT ON) + set(CURL_DISABLE_FILE ON) + set(CURL_DISABLE_TFTP ON) + set(CURL_DISABLE_RTSP ON) + set(CURL_DISABLE_POP3 ON) + set(CURL_DISABLE_IMAP ON) + set(CURL_DISABLE_SMTP ON) + set(CURL_DISABLE_GOPHER ON) +endif() + +option(CURL_DISABLE_COOKIES "to disable cookies support" OFF) +mark_as_advanced(CURL_DISABLE_COOKIES) + +option(CURL_DISABLE_CRYPTO_AUTH "to disable cryptographic authentication" OFF) +mark_as_advanced(CURL_DISABLE_CRYPTO_AUTH) +option(CURL_DISABLE_VERBOSE_STRINGS "to disable verbose strings" OFF) +mark_as_advanced(CURL_DISABLE_VERBOSE_STRINGS) +option(DISABLED_THREADSAFE "Set to explicitly specify we don't want to use thread-safe functions" OFF) +mark_as_advanced(DISABLED_THREADSAFE) +option(ENABLE_IPV6 "Define if you want to enable IPv6 support" ON) +mark_as_advanced(ENABLE_IPV6) +if(ENABLE_IPV6 AND NOT WIN32) + include(CheckStructHasMember) + check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h" + HAVE_SOCKADDR_IN6_SIN6_ADDR) + check_struct_has_member("struct sockaddr_in6" sin6_scope_id "netinet/in.h" + HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID) + if(NOT HAVE_SOCKADDR_IN6_SIN6_ADDR) + message(WARNING "struct sockaddr_in6 not available, disabling IPv6 support") + # Force the feature off as this name is used as guard macro... + set(ENABLE_IPV6 OFF + CACHE BOOL "Define if you want to enable IPv6 support" FORCE) + endif() +endif() + +option(ENABLE_MANUAL "to provide the built-in manual" ON) +unset(USE_MANUAL CACHE) # TODO: cache NROFF/NROFF_MANOPT/USE_MANUAL vars? +if(ENABLE_MANUAL) + find_program(NROFF NAMES gnroff nroff) + if(NROFF) + # Need a way to write to stdin, this will do + file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt" "test") + # Tests for a valid nroff option to generate a manpage + foreach(_MANOPT "-man" "-mandoc") + execute_process(COMMAND "${NROFF}" ${_MANOPT} + OUTPUT_VARIABLE NROFF_MANOPT_OUTPUT + INPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt" + ERROR_QUIET) + # Save the option if it was valid + if(NROFF_MANOPT_OUTPUT) + message("Found *nroff option: -- ${_MANOPT}") + set(NROFF_MANOPT ${_MANOPT}) + set(USE_MANUAL 1) + break() + endif() + endforeach() + # No need for the temporary file + file(REMOVE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt") + if(NOT USE_MANUAL) + message(WARNING "Found no *nroff option to get plaintext from man pages") + endif() + else() + message(WARNING "Found no *nroff program") + endif() +endif() +# Required for building manual, docs, tests +find_package(Perl REQUIRED) + +# We need ansi c-flags, especially on HP +set(CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") +set(CMAKE_REQUIRED_FLAGS ${CMAKE_ANSI_CFLAGS}) + +# Disable warnings on Borland to avoid changing 3rd party code. +if(BORLAND) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w-") +endif(BORLAND) + +# If we are on AIX, do the _ALL_SOURCE magic +if(${CMAKE_SYSTEM_NAME} MATCHES AIX) + set(_ALL_SOURCE 1) +endif(${CMAKE_SYSTEM_NAME} MATCHES AIX) + +# Include all the necessary files for macros +include (CheckFunctionExists) +include (CheckIncludeFile) +include (CheckIncludeFiles) +include (CheckLibraryExists) +include (CheckSymbolExists) +include (CheckTypeSize) +include (CheckCSourceCompiles) +include (CMakeDependentOption) + +# On windows preload settings +if(WIN32) + set(CMAKE_REQUIRED_DEFINITIONS "${CMAKE_REQUIRED_DEFINITIONS} -D_WINSOCKAPI_=") + include(${CMAKE_CURRENT_SOURCE_DIR}/CMake/Platforms/WindowsCache.cmake) +endif(WIN32) + +if(ENABLE_THREADED_RESOLVER) + if(WIN32) + set(USE_THREADS_WIN32 ON) + else() + check_include_file_concat("pthread.h" HAVE_PTHREAD_H) + if(HAVE_PTHREAD_H) + set(CMAKE_THREAD_PREFER_PTHREAD 1) + find_package(Threads) + if(CMAKE_USE_PTHREADS_INIT) + set(CURL_LIBS ${CURL_LIBS} ${CMAKE_THREAD_LIBS_INIT}) + set(USE_THREADS_POSIX 1) + endif() + endif() + endif() +endif() + +# Check for all needed libraries +check_library_exists_concat("dl" dlopen HAVE_LIBDL) +check_library_exists_concat("socket" connect HAVE_LIBSOCKET) +check_library_exists("c" gethostbyname "" NOT_NEED_LIBNSL) + +# Yellowtab Zeta needs different libraries than BeOS 5. +if(BEOS) + set(NOT_NEED_LIBNSL 1) + check_library_exists_concat("bind" gethostbyname HAVE_LIBBIND) + check_library_exists_concat("bnetapi" closesocket HAVE_LIBBNETAPI) +endif(BEOS) + +if(NOT NOT_NEED_LIBNSL) + check_library_exists_concat("nsl" gethostbyname HAVE_LIBNSL) +endif(NOT NOT_NEED_LIBNSL) + +check_function_exists(gethostname HAVE_GETHOSTNAME) + +if(WIN32) + check_library_exists_concat("ws2_32" getch HAVE_LIBWS2_32) + check_library_exists_concat("winmm" getch HAVE_LIBWINMM) +endif() + +# check SSL libraries +# TODO support GNUTLS, NSS, POLARSSL, AXTLS, CYASSL + +if(APPLE) + option(CMAKE_USE_DARWINSSL "enable Apple OS native SSL/TLS" OFF) +endif() +if(WIN32) + option(CMAKE_USE_WINSSL "enable Windows native SSL/TLS" OFF) + cmake_dependent_option(CURL_WINDOWS_SSPI "Use windows libraries to allow NTLM authentication without openssl" ON + CMAKE_USE_WINSSL OFF) +endif() +option(CMAKE_USE_MBEDTLS "Enable mbedTLS for SSL/TLS" OFF) + +set(openssl_default ON) +if(WIN32 OR CMAKE_USE_DARWINSSL OR CMAKE_USE_WINSSL OR CMAKE_USE_MBEDTLS) + set(openssl_default OFF) +endif() +option(CMAKE_USE_OPENSSL "Use OpenSSL code. Experimental" ${openssl_default}) + +collect_true(enabled_ssl_options enabled_ssl_options_count + CMAKE_USE_WINSSL + CMAKE_USE_DARWINSSL + CMAKE_USE_OPENSSL + CMAKE_USE_MBEDTLS +) +if(enabled_ssl_options_count GREATER 1) + message(FATAL_ERROR "Multiple SSL options specified: ${enabled_ssl_options}. Please pick at most one and disable the rest.") +endif() + +if(CMAKE_USE_WINSSL) + set(SSL_ENABLED ON) + set(USE_SCHANNEL ON) # Windows native SSL/TLS support + set(USE_WINDOWS_SSPI ON) # CMAKE_USE_WINSSL implies CURL_WINDOWS_SSPI + list(APPEND CURL_LIBS "crypt32") +endif() +if(CURL_WINDOWS_SSPI) + set(USE_WINDOWS_SSPI ON) + set(CMAKE_REQUIRED_DEFINITIONS "${CMAKE_REQUIRED_DEFINITIONS} -DSECURITY_WIN32") +endif() + +if(CMAKE_USE_DARWINSSL) + find_library(COREFOUNDATION_FRAMEWORK "CoreFoundation") + if(NOT COREFOUNDATION_FRAMEWORK) + message(FATAL_ERROR "CoreFoundation framework not found") + endif() + + find_library(SECURITY_FRAMEWORK "Security") + if(NOT SECURITY_FRAMEWORK) + message(FATAL_ERROR "Security framework not found") + endif() + + set(SSL_ENABLED ON) + set(USE_DARWINSSL ON) + list(APPEND CURL_LIBS "${COREFOUNDATION_FRAMEWORK}" "${SECURITY_FRAMEWORK}") +endif() + +if(CMAKE_USE_OPENSSL) + find_package(OpenSSL REQUIRED) + set(SSL_ENABLED ON) + set(USE_OPENSSL ON) + set(HAVE_LIBCRYPTO ON) + set(HAVE_LIBSSL ON) + list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES}) + include_directories(${OPENSSL_INCLUDE_DIR}) + set(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR}) + check_include_file("openssl/crypto.h" HAVE_OPENSSL_CRYPTO_H) + check_include_file("openssl/engine.h" HAVE_OPENSSL_ENGINE_H) + check_include_file("openssl/err.h" HAVE_OPENSSL_ERR_H) + check_include_file("openssl/pem.h" HAVE_OPENSSL_PEM_H) + check_include_file("openssl/pkcs12.h" HAVE_OPENSSL_PKCS12_H) + check_include_file("openssl/rsa.h" HAVE_OPENSSL_RSA_H) + check_include_file("openssl/ssl.h" HAVE_OPENSSL_SSL_H) + check_include_file("openssl/x509.h" HAVE_OPENSSL_X509_H) + check_include_file("openssl/rand.h" HAVE_OPENSSL_RAND_H) + check_symbol_exists(RAND_status "${CURL_INCLUDES}" HAVE_RAND_STATUS) + check_symbol_exists(RAND_screen "${CURL_INCLUDES}" HAVE_RAND_SCREEN) + check_symbol_exists(RAND_egd "${CURL_INCLUDES}" HAVE_RAND_EGD) +endif() + +if(CMAKE_USE_MBEDTLS) + find_package(MbedTLS REQUIRED) + set(SSL_ENABLED ON) + set(USE_MBEDTLS ON) + list(APPEND CURL_LIBS ${MBEDTLS_LIBRARIES}) + include_directories(${MBEDTLS_INCLUDE_DIRS}) +endif() + +option(USE_NGHTTP2 "Use Nghttp2 library" OFF) +if(USE_NGHTTP2) + find_package(NGHTTP2 REQUIRED) + include_directories(${NGHTTP2_INCLUDE_DIRS}) + list(APPEND CURL_LIBS ${NGHTTP2_LIBRARIES}) +endif() + +if(NOT CURL_DISABLE_LDAP) + if(WIN32) + option(USE_WIN32_LDAP "Use Windows LDAP implementation" ON) + if(USE_WIN32_LDAP) + check_library_exists_concat("wldap32" cldap_open HAVE_WLDAP32) + if(NOT HAVE_WLDAP32) + set(USE_WIN32_LDAP OFF) + endif() + endif() + endif() + + option(CMAKE_USE_OPENLDAP "Use OpenLDAP code." OFF) + mark_as_advanced(CMAKE_USE_OPENLDAP) + set(CMAKE_LDAP_LIB "ldap" CACHE STRING "Name or full path to ldap library") + set(CMAKE_LBER_LIB "lber" CACHE STRING "Name or full path to lber library") + + if(CMAKE_USE_OPENLDAP AND USE_WIN32_LDAP) + message(FATAL_ERROR "Cannot use USE_WIN32_LDAP and CMAKE_USE_OPENLDAP at the same time") + endif() + + # Now that we know, we're not using windows LDAP... + if(USE_WIN32_LDAP) + check_include_file_concat("winldap.h" HAVE_WINLDAP_H) + check_include_file_concat("winber.h" HAVE_WINBER_H) + else() + # Check for LDAP + set(CMAKE_REQUIRED_LIBRARIES ${OPENSSL_LIBRARIES}) + check_library_exists_concat(${CMAKE_LDAP_LIB} ldap_init HAVE_LIBLDAP) + check_library_exists_concat(${CMAKE_LBER_LIB} ber_init HAVE_LIBLBER) + + set(CMAKE_REQUIRED_INCLUDES_BAK ${CMAKE_REQUIRED_INCLUDES}) + set(CMAKE_LDAP_INCLUDE_DIR "" CACHE STRING "Path to LDAP include directory") + if(CMAKE_LDAP_INCLUDE_DIR) + list(APPEND CMAKE_REQUIRED_INCLUDES ${CMAKE_LDAP_INCLUDE_DIR}) + endif() + check_include_file_concat("ldap.h" HAVE_LDAP_H) + check_include_file_concat("lber.h" HAVE_LBER_H) + + if(NOT HAVE_LDAP_H) + message(STATUS "LDAP_H not found CURL_DISABLE_LDAP set ON") + set(CURL_DISABLE_LDAP ON CACHE BOOL "" FORCE) + set(CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES_BAK}) #LDAP includes won't be used + elseif(NOT HAVE_LIBLDAP) + message(STATUS "LDAP library '${CMAKE_LDAP_LIB}' not found CURL_DISABLE_LDAP set ON") + set(CURL_DISABLE_LDAP ON CACHE BOOL "" FORCE) + set(CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES_BAK}) #LDAP includes won't be used + else() + if(CMAKE_USE_OPENLDAP) + set(USE_OPENLDAP ON) + endif() + if(CMAKE_LDAP_INCLUDE_DIR) + include_directories(${CMAKE_LDAP_INCLUDE_DIR}) + endif() + set(NEED_LBER_H ON) + set(_HEADER_LIST) + if(HAVE_WINDOWS_H) + list(APPEND _HEADER_LIST "windows.h") + endif() + if(HAVE_SYS_TYPES_H) + list(APPEND _HEADER_LIST "sys/types.h") + endif() + list(APPEND _HEADER_LIST "ldap.h") + + set(_SRC_STRING "") + foreach(_HEADER ${_HEADER_LIST}) + set(_INCLUDE_STRING "${_INCLUDE_STRING}#include <${_HEADER}>\n") + endforeach() + + set(_SRC_STRING + " + ${_INCLUDE_STRING} + int main(int argc, char ** argv) + { + BerValue *bvp = NULL; + BerElement *bep = ber_init(bvp); + ber_free(bep, 1); + return 0; + }" + ) + set(CMAKE_REQUIRED_DEFINITIONS "${CMAKE_REQUIRED_DEFINITIONS} -DLDAP_DEPRECATED=1") + list(APPEND CMAKE_REQUIRED_LIBRARIES ${CMAKE_LDAP_LIB}) + if(HAVE_LIBLBER) + list(APPEND CMAKE_REQUIRED_LIBRARIES ${CMAKE_LBER_LIB}) + endif() + check_c_source_compiles("${_SRC_STRING}" NOT_NEED_LBER_H) + + if(NOT_NEED_LBER_H) + set(NEED_LBER_H OFF) + else() + set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -DNEED_LBER_H") + endif() + endif() + endif() + +endif() + +# No ldap, no ldaps. +if(CURL_DISABLE_LDAP) + if(NOT CURL_DISABLE_LDAPS) + message(STATUS "LDAP needs to be enabled to support LDAPS") + set(CURL_DISABLE_LDAPS ON CACHE BOOL "" FORCE) + endif() +endif() + +if(NOT CURL_DISABLE_LDAPS) + check_include_file_concat("ldap_ssl.h" HAVE_LDAP_SSL_H) + check_include_file_concat("ldapssl.h" HAVE_LDAPSSL_H) +endif() + +# Check for idn +check_library_exists_concat("idn2" idn2_lookup_ul HAVE_LIBIDN2) + +# Check for symbol dlopen (same as HAVE_LIBDL) +check_library_exists("${CURL_LIBS}" dlopen "" HAVE_DLOPEN) + +option(CURL_ZLIB "Set to ON to enable building curl with zlib support." ON) +set(HAVE_LIBZ OFF) +set(HAVE_ZLIB_H OFF) +set(HAVE_ZLIB OFF) +if(CURL_ZLIB) + find_package(ZLIB QUIET) + if(ZLIB_FOUND) + set(HAVE_ZLIB_H ON) + set(HAVE_ZLIB ON) + set(HAVE_LIBZ ON) + list(APPEND CURL_LIBS ${ZLIB_LIBRARIES}) + include_directories(${ZLIB_INCLUDE_DIRS}) + list(APPEND CMAKE_REQUIRED_INCLUDES ${ZLIB_INCLUDE_DIRS}) + endif() +endif() + +#libSSH2 +option(CMAKE_USE_LIBSSH2 "Use libSSH2" ON) +mark_as_advanced(CMAKE_USE_LIBSSH2) +set(USE_LIBSSH2 OFF) +set(HAVE_LIBSSH2 OFF) +set(HAVE_LIBSSH2_H OFF) + +if(CMAKE_USE_LIBSSH2) + find_package(LibSSH2) + if(LIBSSH2_FOUND) + list(APPEND CURL_LIBS ${LIBSSH2_LIBRARY}) + set(CMAKE_REQUIRED_LIBRARIES ${LIBSSH2_LIBRARY}) + list(APPEND CMAKE_REQUIRED_INCLUDES "${LIBSSH2_INCLUDE_DIR}") + include_directories("${LIBSSH2_INCLUDE_DIR}") + set(HAVE_LIBSSH2 ON) + set(USE_LIBSSH2 ON) + + # find_package has already found the headers + set(HAVE_LIBSSH2_H ON) + set(CURL_INCLUDES ${CURL_INCLUDES} "${LIBSSH2_INCLUDE_DIR}/libssh2.h") + set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -DHAVE_LIBSSH2_H") + + # now check for specific libssh2 symbols as they were added in different versions + set(CMAKE_EXTRA_INCLUDE_FILES "libssh2.h") + check_function_exists(libssh2_version HAVE_LIBSSH2_VERSION) + check_function_exists(libssh2_init HAVE_LIBSSH2_INIT) + check_function_exists(libssh2_exit HAVE_LIBSSH2_EXIT) + check_function_exists(libssh2_scp_send64 HAVE_LIBSSH2_SCP_SEND64) + check_function_exists(libssh2_session_handshake HAVE_LIBSSH2_SESSION_HANDSHAKE) + set(CMAKE_EXTRA_INCLUDE_FILES "") + + endif(LIBSSH2_FOUND) +endif(CMAKE_USE_LIBSSH2) + +option(CMAKE_USE_GSSAPI "Use GSSAPI implementation (right now only Heimdal is supported with CMake build)" OFF) +mark_as_advanced(CMAKE_USE_GSSAPI) + +if(CMAKE_USE_GSSAPI) + find_package(GSS) + + set(HAVE_GSSAPI ${GSS_FOUND}) + if(GSS_FOUND) + + message(STATUS "Found ${GSS_FLAVOUR} GSSAPI version: \"${GSS_VERSION}\"") + + list(APPEND CMAKE_REQUIRED_INCLUDES ${GSS_INCLUDE_DIRECTORIES}) + check_include_file_concat("gssapi/gssapi.h" HAVE_GSSAPI_GSSAPI_H) + check_include_file_concat("gssapi/gssapi_generic.h" HAVE_GSSAPI_GSSAPI_GENERIC_H) + check_include_file_concat("gssapi/gssapi_krb5.h" HAVE_GSSAPI_GSSAPI_KRB5_H) + + if(GSS_FLAVOUR STREQUAL "Heimdal") + set(HAVE_GSSHEIMDAL ON) + else() # MIT + set(HAVE_GSSMIT ON) + set(_INCLUDE_LIST "") + if(HAVE_GSSAPI_GSSAPI_H) + list(APPEND _INCLUDE_LIST "gssapi/gssapi.h") + endif() + if(HAVE_GSSAPI_GSSAPI_GENERIC_H) + list(APPEND _INCLUDE_LIST "gssapi/gssapi_generic.h") + endif() + if(HAVE_GSSAPI_GSSAPI_KRB5_H) + list(APPEND _INCLUDE_LIST "gssapi/gssapi_krb5.h") + endif() + + string(REPLACE ";" " " _COMPILER_FLAGS_STR "${GSS_COMPILER_FLAGS}") + string(REPLACE ";" " " _LINKER_FLAGS_STR "${GSS_LINKER_FLAGS}") + + foreach(_dir ${GSS_LINK_DIRECTORIES}) + set(_LINKER_FLAGS_STR "${_LINKER_FLAGS_STR} -L\"${_dir}\"") + endforeach() + + set(CMAKE_REQUIRED_FLAGS "${_COMPILER_FLAGS_STR} ${_LINKER_FLAGS_STR}") + set(CMAKE_REQUIRED_LIBRARIES ${GSS_LIBRARIES}) + check_symbol_exists("GSS_C_NT_HOSTBASED_SERVICE" ${_INCLUDE_LIST} HAVE_GSS_C_NT_HOSTBASED_SERVICE) + if(NOT HAVE_GSS_C_NT_HOSTBASED_SERVICE) + set(HAVE_OLD_GSSMIT ON) + endif() + + endif() + + include_directories(${GSS_INCLUDE_DIRECTORIES}) + link_directories(${GSS_LINK_DIRECTORIES}) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GSS_COMPILER_FLAGS}") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${GSS_LINKER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${GSS_LINKER_FLAGS}") + list(APPEND CURL_LIBS ${GSS_LIBRARIES}) + + else() + message(WARNING "GSSAPI support has been requested but no supporting libraries found. Skipping.") + endif() +endif() + +option(ENABLE_UNIX_SOCKETS "Define if you want Unix domain sockets support" ON) +if(ENABLE_UNIX_SOCKETS) + include(CheckStructHasMember) + check_struct_has_member("struct sockaddr_un" sun_path "sys/un.h" USE_UNIX_SOCKETS) +else() + unset(USE_UNIX_SOCKETS CACHE) +endif() + + +# +# CA handling +# +set(CURL_CA_BUNDLE "auto" CACHE STRING + "Path to the CA bundle. Set 'none' to disable or 'auto' for auto-detection. Defaults to 'auto'.") +set(CURL_CA_FALLBACK OFF CACHE BOOL + "Set ON to use built-in CA store of TLS backend. Defaults to OFF") +set(CURL_CA_PATH "auto" CACHE STRING + "Location of default CA path. Set 'none' to disable or 'auto' for auto-detection. Defaults to 'auto'.") + +if("${CURL_CA_BUNDLE}" STREQUAL "") + message(FATAL_ERROR "Invalid value of CURL_CA_BUNDLE. Use 'none', 'auto' or file path.") +elseif("${CURL_CA_BUNDLE}" STREQUAL "none") + unset(CURL_CA_BUNDLE CACHE) +elseif("${CURL_CA_BUNDLE}" STREQUAL "auto") + unset(CURL_CA_BUNDLE CACHE) + set(CURL_CA_BUNDLE_AUTODETECT TRUE) +else() + set(CURL_CA_BUNDLE_SET TRUE) +endif() + +if("${CURL_CA_PATH}" STREQUAL "") + message(FATAL_ERROR "Invalid value of CURL_CA_PATH. Use 'none', 'auto' or directory path.") +elseif("${CURL_CA_PATH}" STREQUAL "none") + unset(CURL_CA_PATH CACHE) +elseif("${CURL_CA_PATH}" STREQUAL "auto") + unset(CURL_CA_PATH CACHE) + set(CURL_CA_PATH_AUTODETECT TRUE) +else() + set(CURL_CA_PATH_SET TRUE) +endif() + +if(CURL_CA_BUNDLE_SET AND CURL_CA_PATH_AUTODETECT) + # Skip autodetection of unset CA path because CA bundle is set explicitly +elseif(CURL_CA_PATH_SET AND CURL_CA_BUNDLE_AUTODETECT) + # Skip autodetection of unset CA bundle because CA path is set explicitly +elseif(CURL_CA_PATH_AUTODETECT OR CURL_CA_BUNDLE_AUTODETECT) + # first try autodetecting a CA bundle, then a CA path + + if(CURL_CA_BUNDLE_AUTODETECT) + set(SEARCH_CA_BUNDLE_PATHS + /etc/ssl/certs/ca-certificates.crt + /etc/pki/tls/certs/ca-bundle.crt + /usr/share/ssl/certs/ca-bundle.crt + /usr/local/share/certs/ca-root-nss.crt + /etc/ssl/cert.pem) + + foreach(SEARCH_CA_BUNDLE_PATH ${SEARCH_CA_BUNDLE_PATHS}) + if(EXISTS "${SEARCH_CA_BUNDLE_PATH}") + message(STATUS "Found CA bundle: ${SEARCH_CA_BUNDLE_PATH}") + set(CURL_CA_BUNDLE "${SEARCH_CA_BUNDLE_PATH}") + set(CURL_CA_BUNDLE_SET TRUE CACHE BOOL "Path to the CA bundle has been set") + break() + endif() + endforeach() + endif() + + if(CURL_CA_PATH_AUTODETECT AND (NOT CURL_CA_PATH_SET)) + if(EXISTS "/etc/ssl/certs") + set(CURL_CA_PATH "/etc/ssl/certs") + set(CURL_CA_PATH_SET TRUE CACHE BOOL "Path to the CA bundle has been set") + endif() + endif() +endif() + +if(CURL_CA_PATH_SET AND NOT USE_OPENSSL AND NOT USE_MBEDTLS) + message(FATAL_ERROR + "CA path only supported by OpenSSL, GnuTLS or mbed TLS. " + "Set CURL_CA_PATH=none or enable one of those TLS backends.") +endif() + + +# Check for header files +if(NOT UNIX) + check_include_file_concat("windows.h" HAVE_WINDOWS_H) + check_include_file_concat("winsock.h" HAVE_WINSOCK_H) + check_include_file_concat("ws2tcpip.h" HAVE_WS2TCPIP_H) + check_include_file_concat("winsock2.h" HAVE_WINSOCK2_H) + if(NOT CURL_WINDOWS_SSPI AND USE_OPENSSL) + set(CURL_LIBS ${CURL_LIBS} "crypt32") + endif() +endif(NOT UNIX) + +check_include_file_concat("stdio.h" HAVE_STDIO_H) +check_include_file_concat("inttypes.h" HAVE_INTTYPES_H) +check_include_file_concat("sys/filio.h" HAVE_SYS_FILIO_H) +check_include_file_concat("sys/ioctl.h" HAVE_SYS_IOCTL_H) +check_include_file_concat("sys/param.h" HAVE_SYS_PARAM_H) +check_include_file_concat("sys/poll.h" HAVE_SYS_POLL_H) +check_include_file_concat("sys/resource.h" HAVE_SYS_RESOURCE_H) +check_include_file_concat("sys/select.h" HAVE_SYS_SELECT_H) +check_include_file_concat("sys/socket.h" HAVE_SYS_SOCKET_H) +check_include_file_concat("sys/sockio.h" HAVE_SYS_SOCKIO_H) +check_include_file_concat("sys/stat.h" HAVE_SYS_STAT_H) +check_include_file_concat("sys/time.h" HAVE_SYS_TIME_H) +check_include_file_concat("sys/types.h" HAVE_SYS_TYPES_H) +check_include_file_concat("sys/uio.h" HAVE_SYS_UIO_H) +check_include_file_concat("sys/un.h" HAVE_SYS_UN_H) +check_include_file_concat("sys/utime.h" HAVE_SYS_UTIME_H) +check_include_file_concat("sys/xattr.h" HAVE_SYS_XATTR_H) +check_include_file_concat("alloca.h" HAVE_ALLOCA_H) +check_include_file_concat("arpa/inet.h" HAVE_ARPA_INET_H) +check_include_file_concat("arpa/tftp.h" HAVE_ARPA_TFTP_H) +check_include_file_concat("assert.h" HAVE_ASSERT_H) +check_include_file_concat("crypto.h" HAVE_CRYPTO_H) +check_include_file_concat("des.h" HAVE_DES_H) +check_include_file_concat("err.h" HAVE_ERR_H) +check_include_file_concat("errno.h" HAVE_ERRNO_H) +check_include_file_concat("fcntl.h" HAVE_FCNTL_H) +check_include_file_concat("idn2.h" HAVE_IDN2_H) +check_include_file_concat("ifaddrs.h" HAVE_IFADDRS_H) +check_include_file_concat("io.h" HAVE_IO_H) +check_include_file_concat("krb.h" HAVE_KRB_H) +check_include_file_concat("libgen.h" HAVE_LIBGEN_H) +check_include_file_concat("limits.h" HAVE_LIMITS_H) +check_include_file_concat("locale.h" HAVE_LOCALE_H) +check_include_file_concat("net/if.h" HAVE_NET_IF_H) +check_include_file_concat("netdb.h" HAVE_NETDB_H) +check_include_file_concat("netinet/in.h" HAVE_NETINET_IN_H) +check_include_file_concat("netinet/tcp.h" HAVE_NETINET_TCP_H) + +check_include_file_concat("pem.h" HAVE_PEM_H) +check_include_file_concat("poll.h" HAVE_POLL_H) +check_include_file_concat("pwd.h" HAVE_PWD_H) +check_include_file_concat("rsa.h" HAVE_RSA_H) +check_include_file_concat("setjmp.h" HAVE_SETJMP_H) +check_include_file_concat("sgtty.h" HAVE_SGTTY_H) +check_include_file_concat("signal.h" HAVE_SIGNAL_H) +check_include_file_concat("ssl.h" HAVE_SSL_H) +check_include_file_concat("stdbool.h" HAVE_STDBOOL_H) +check_include_file_concat("stdint.h" HAVE_STDINT_H) +check_include_file_concat("stdio.h" HAVE_STDIO_H) +check_include_file_concat("stdlib.h" HAVE_STDLIB_H) +check_include_file_concat("string.h" HAVE_STRING_H) +check_include_file_concat("strings.h" HAVE_STRINGS_H) +check_include_file_concat("stropts.h" HAVE_STROPTS_H) +check_include_file_concat("termio.h" HAVE_TERMIO_H) +check_include_file_concat("termios.h" HAVE_TERMIOS_H) +check_include_file_concat("time.h" HAVE_TIME_H) +check_include_file_concat("unistd.h" HAVE_UNISTD_H) +check_include_file_concat("utime.h" HAVE_UTIME_H) +check_include_file_concat("x509.h" HAVE_X509_H) + +check_include_file_concat("process.h" HAVE_PROCESS_H) +check_include_file_concat("stddef.h" HAVE_STDDEF_H) +check_include_file_concat("dlfcn.h" HAVE_DLFCN_H) +check_include_file_concat("malloc.h" HAVE_MALLOC_H) +check_include_file_concat("memory.h" HAVE_MEMORY_H) +check_include_file_concat("netinet/if_ether.h" HAVE_NETINET_IF_ETHER_H) +check_include_file_concat("stdint.h" HAVE_STDINT_H) +check_include_file_concat("sockio.h" HAVE_SOCKIO_H) +check_include_file_concat("sys/utsname.h" HAVE_SYS_UTSNAME_H) + +check_type_size(size_t SIZEOF_SIZE_T) +check_type_size(ssize_t SIZEOF_SSIZE_T) +check_type_size("long long" SIZEOF_LONG_LONG) +check_type_size("long" SIZEOF_LONG) +check_type_size("short" SIZEOF_SHORT) +check_type_size("int" SIZEOF_INT) +check_type_size("__int64" SIZEOF___INT64) +check_type_size("long double" SIZEOF_LONG_DOUBLE) +check_type_size("time_t" SIZEOF_TIME_T) +if(NOT HAVE_SIZEOF_SSIZE_T) + if(SIZEOF_LONG EQUAL SIZEOF_SIZE_T) + set(ssize_t long) + endif(SIZEOF_LONG EQUAL SIZEOF_SIZE_T) + if(NOT ssize_t AND SIZEOF___INT64 EQUAL SIZEOF_SIZE_T) + set(ssize_t __int64) + endif(NOT ssize_t AND SIZEOF___INT64 EQUAL SIZEOF_SIZE_T) +endif(NOT HAVE_SIZEOF_SSIZE_T) +# off_t is sized later, after the HAVE_FILE_OFFSET_BITS test + +# Different sizeofs, etc. + +# define CURL_SIZEOF_LONG 4 +# define CURL_TYPEOF_CURL_OFF_T long long +# define CURL_FORMAT_CURL_OFF_T "lld" +# define CURL_FORMAT_CURL_OFF_TU "llu" +# define CURL_FORMAT_OFF_T "%lld" +# define CURL_SIZEOF_CURL_OFF_T 8 +# define CURL_SUFFIX_CURL_OFF_T LL +# define CURL_SUFFIX_CURL_OFF_TU ULL + +set(CURL_SIZEOF_LONG ${SIZEOF_LONG}) + +if(SIZEOF_LONG EQUAL 8) + set(CURL_TYPEOF_CURL_OFF_T long) + set(CURL_SIZEOF_CURL_OFF_T 8) + set(CURL_FORMAT_CURL_OFF_T "ld") + set(CURL_FORMAT_CURL_OFF_TU "lu") + set(CURL_FORMAT_OFF_T "%ld") + set(CURL_SUFFIX_CURL_OFF_T L) + set(CURL_SUFFIX_CURL_OFF_TU UL) +endif(SIZEOF_LONG EQUAL 8) + +if(SIZEOF_LONG_LONG EQUAL 8) + set(CURL_TYPEOF_CURL_OFF_T "long long") + set(CURL_SIZEOF_CURL_OFF_T 8) + set(CURL_FORMAT_CURL_OFF_T "lld") + set(CURL_FORMAT_CURL_OFF_TU "llu") + set(CURL_FORMAT_OFF_T "%lld") + set(CURL_SUFFIX_CURL_OFF_T LL) + set(CURL_SUFFIX_CURL_OFF_TU ULL) +endif(SIZEOF_LONG_LONG EQUAL 8) + +if(NOT CURL_TYPEOF_CURL_OFF_T) + set(CURL_TYPEOF_CURL_OFF_T ${ssize_t}) + set(CURL_SIZEOF_CURL_OFF_T ${SIZEOF_SSIZE_T}) + # TODO: need adjustment here. + set(CURL_FORMAT_CURL_OFF_T "ld") + set(CURL_FORMAT_CURL_OFF_TU "lu") + set(CURL_FORMAT_OFF_T "%ld") + set(CURL_SUFFIX_CURL_OFF_T L) + set(CURL_SUFFIX_CURL_OFF_TU LU) +endif(NOT CURL_TYPEOF_CURL_OFF_T) + +if(HAVE_SIZEOF_LONG_LONG) + set(HAVE_LONGLONG 1) + set(HAVE_LL 1) +endif(HAVE_SIZEOF_LONG_LONG) + +find_file(RANDOM_FILE urandom /dev) +mark_as_advanced(RANDOM_FILE) + +# Check for some functions that are used +if(HAVE_LIBWS2_32) + set(CMAKE_REQUIRED_LIBRARIES ws2_32) +elseif(HAVE_LIBSOCKET) + set(CMAKE_REQUIRED_LIBRARIES socket) +endif() + +check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME) +check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET) +# poll on macOS is unreliable, it first did not exist, then was broken until +# fixed in 10.9 only to break again in 10.12. +if(NOT APPLE) + check_symbol_exists(poll "${CURL_INCLUDES}" HAVE_POLL) +endif() +check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT) +check_symbol_exists(strdup "${CURL_INCLUDES}" HAVE_STRDUP) +check_symbol_exists(strstr "${CURL_INCLUDES}" HAVE_STRSTR) +check_symbol_exists(strtok_r "${CURL_INCLUDES}" HAVE_STRTOK_R) +check_symbol_exists(strftime "${CURL_INCLUDES}" HAVE_STRFTIME) +check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME) +check_symbol_exists(strcasecmp "${CURL_INCLUDES}" HAVE_STRCASECMP) +check_symbol_exists(stricmp "${CURL_INCLUDES}" HAVE_STRICMP) +check_symbol_exists(strcmpi "${CURL_INCLUDES}" HAVE_STRCMPI) +check_symbol_exists(strncmpi "${CURL_INCLUDES}" HAVE_STRNCMPI) +check_symbol_exists(alarm "${CURL_INCLUDES}" HAVE_ALARM) +if(NOT HAVE_STRNCMPI) + set(HAVE_STRCMPI) +endif(NOT HAVE_STRNCMPI) +check_symbol_exists(gethostbyaddr "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR) +check_symbol_exists(gethostbyaddr_r "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR_R) +check_symbol_exists(gettimeofday "${CURL_INCLUDES}" HAVE_GETTIMEOFDAY) +check_symbol_exists(inet_addr "${CURL_INCLUDES}" HAVE_INET_ADDR) +check_symbol_exists(inet_ntoa "${CURL_INCLUDES}" HAVE_INET_NTOA) +check_symbol_exists(inet_ntoa_r "${CURL_INCLUDES}" HAVE_INET_NTOA_R) +check_symbol_exists(tcsetattr "${CURL_INCLUDES}" HAVE_TCSETATTR) +check_symbol_exists(tcgetattr "${CURL_INCLUDES}" HAVE_TCGETATTR) +check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR) +check_symbol_exists(closesocket "${CURL_INCLUDES}" HAVE_CLOSESOCKET) +check_symbol_exists(setvbuf "${CURL_INCLUDES}" HAVE_SETVBUF) +check_symbol_exists(sigsetjmp "${CURL_INCLUDES}" HAVE_SIGSETJMP) +check_symbol_exists(getpass_r "${CURL_INCLUDES}" HAVE_GETPASS_R) +check_symbol_exists(strlcat "${CURL_INCLUDES}" HAVE_STRLCAT) +check_symbol_exists(getpwuid "${CURL_INCLUDES}" HAVE_GETPWUID) +check_symbol_exists(geteuid "${CURL_INCLUDES}" HAVE_GETEUID) +check_symbol_exists(utime "${CURL_INCLUDES}" HAVE_UTIME) +check_symbol_exists(gmtime_r "${CURL_INCLUDES}" HAVE_GMTIME_R) +check_symbol_exists(localtime_r "${CURL_INCLUDES}" HAVE_LOCALTIME_R) + +check_symbol_exists(gethostbyname "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME) +check_symbol_exists(gethostbyname_r "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME_R) + +check_symbol_exists(signal "${CURL_INCLUDES}" HAVE_SIGNAL_FUNC) +check_symbol_exists(SIGALRM "${CURL_INCLUDES}" HAVE_SIGNAL_MACRO) +if(HAVE_SIGNAL_FUNC AND HAVE_SIGNAL_MACRO) + set(HAVE_SIGNAL 1) +endif(HAVE_SIGNAL_FUNC AND HAVE_SIGNAL_MACRO) +check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME) +check_symbol_exists(strtoll "${CURL_INCLUDES}" HAVE_STRTOLL) +check_symbol_exists(_strtoi64 "${CURL_INCLUDES}" HAVE__STRTOI64) +check_symbol_exists(strerror_r "${CURL_INCLUDES}" HAVE_STRERROR_R) +check_symbol_exists(siginterrupt "${CURL_INCLUDES}" HAVE_SIGINTERRUPT) +check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR) +check_symbol_exists(fork "${CURL_INCLUDES}" HAVE_FORK) +check_symbol_exists(getaddrinfo "${CURL_INCLUDES}" HAVE_GETADDRINFO) +check_symbol_exists(freeaddrinfo "${CURL_INCLUDES}" HAVE_FREEADDRINFO) +check_symbol_exists(freeifaddrs "${CURL_INCLUDES}" HAVE_FREEIFADDRS) +check_symbol_exists(pipe "${CURL_INCLUDES}" HAVE_PIPE) +check_symbol_exists(ftruncate "${CURL_INCLUDES}" HAVE_FTRUNCATE) +check_symbol_exists(getprotobyname "${CURL_INCLUDES}" HAVE_GETPROTOBYNAME) +check_symbol_exists(getrlimit "${CURL_INCLUDES}" HAVE_GETRLIMIT) +check_symbol_exists(setlocale "${CURL_INCLUDES}" HAVE_SETLOCALE) +check_symbol_exists(setrlimit "${CURL_INCLUDES}" HAVE_SETRLIMIT) +check_symbol_exists(fcntl "${CURL_INCLUDES}" HAVE_FCNTL) +check_symbol_exists(ioctl "${CURL_INCLUDES}" HAVE_IOCTL) +check_symbol_exists(setsockopt "${CURL_INCLUDES}" HAVE_SETSOCKOPT) + +# symbol exists in win32, but function does not. +check_function_exists(inet_pton HAVE_INET_PTON) + +check_symbol_exists(fsetxattr "${CURL_INCLUDES}" HAVE_FSETXATTR) +if(HAVE_FSETXATTR) + foreach(CURL_TEST HAVE_FSETXATTR_5 HAVE_FSETXATTR_6) + curl_internal_test_run(${CURL_TEST}) + endforeach(CURL_TEST) +endif(HAVE_FSETXATTR) + +# sigaction and sigsetjmp are special. Use special mechanism for +# detecting those, but only if previous attempt failed. +if(HAVE_SIGNAL_H) + check_symbol_exists(sigaction "signal.h" HAVE_SIGACTION) +endif(HAVE_SIGNAL_H) + +if(NOT HAVE_SIGSETJMP) + if(HAVE_SETJMP_H) + check_symbol_exists(sigsetjmp "setjmp.h" HAVE_MACRO_SIGSETJMP) + if(HAVE_MACRO_SIGSETJMP) + set(HAVE_SIGSETJMP 1) + endif(HAVE_MACRO_SIGSETJMP) + endif(HAVE_SETJMP_H) +endif(NOT HAVE_SIGSETJMP) + +# If there is no stricmp(), do not allow LDAP to parse URLs +if(NOT HAVE_STRICMP) + set(HAVE_LDAP_URL_PARSE 1) +endif(NOT HAVE_STRICMP) + +# Do curl specific tests +foreach(CURL_TEST + HAVE_FCNTL_O_NONBLOCK + HAVE_IOCTLSOCKET + HAVE_IOCTLSOCKET_CAMEL + HAVE_IOCTLSOCKET_CAMEL_FIONBIO + HAVE_IOCTLSOCKET_FIONBIO + HAVE_IOCTL_FIONBIO + HAVE_IOCTL_SIOCGIFADDR + HAVE_SETSOCKOPT_SO_NONBLOCK + HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID + TIME_WITH_SYS_TIME + HAVE_O_NONBLOCK + HAVE_GETHOSTBYADDR_R_5 + HAVE_GETHOSTBYADDR_R_7 + HAVE_GETHOSTBYADDR_R_8 + HAVE_GETHOSTBYADDR_R_5_REENTRANT + HAVE_GETHOSTBYADDR_R_7_REENTRANT + HAVE_GETHOSTBYADDR_R_8_REENTRANT + HAVE_GETHOSTBYNAME_R_3 + HAVE_GETHOSTBYNAME_R_5 + HAVE_GETHOSTBYNAME_R_6 + HAVE_GETHOSTBYNAME_R_3_REENTRANT + HAVE_GETHOSTBYNAME_R_5_REENTRANT + HAVE_GETHOSTBYNAME_R_6_REENTRANT + HAVE_SOCKLEN_T + HAVE_IN_ADDR_T + HAVE_BOOL_T + STDC_HEADERS + RETSIGTYPE_TEST + HAVE_INET_NTOA_R_DECL + HAVE_INET_NTOA_R_DECL_REENTRANT + HAVE_GETADDRINFO + HAVE_FILE_OFFSET_BITS + ) + curl_internal_test(${CURL_TEST}) +endforeach(CURL_TEST) + +if(HAVE_FILE_OFFSET_BITS) + set(_FILE_OFFSET_BITS 64) + set(CMAKE_REQUIRED_FLAGS "-D_FILE_OFFSET_BITS=64") +endif(HAVE_FILE_OFFSET_BITS) +check_type_size("off_t" SIZEOF_OFF_T) +set(CMAKE_REQUIRED_FLAGS) + +foreach(CURL_TEST + HAVE_GLIBC_STRERROR_R + HAVE_POSIX_STRERROR_R + ) + curl_internal_test_run(${CURL_TEST}) +endforeach(CURL_TEST) + +# Check for reentrant +foreach(CURL_TEST + HAVE_GETHOSTBYADDR_R_5 + HAVE_GETHOSTBYADDR_R_7 + HAVE_GETHOSTBYADDR_R_8 + HAVE_GETHOSTBYNAME_R_3 + HAVE_GETHOSTBYNAME_R_5 + HAVE_GETHOSTBYNAME_R_6 + HAVE_INET_NTOA_R_DECL_REENTRANT) + if(NOT ${CURL_TEST}) + if(${CURL_TEST}_REENTRANT) + set(NEED_REENTRANT 1) + endif(${CURL_TEST}_REENTRANT) + endif(NOT ${CURL_TEST}) +endforeach(CURL_TEST) + +if(NEED_REENTRANT) + foreach(CURL_TEST + HAVE_GETHOSTBYADDR_R_5 + HAVE_GETHOSTBYADDR_R_7 + HAVE_GETHOSTBYADDR_R_8 + HAVE_GETHOSTBYNAME_R_3 + HAVE_GETHOSTBYNAME_R_5 + HAVE_GETHOSTBYNAME_R_6) + set(${CURL_TEST} 0) + if(${CURL_TEST}_REENTRANT) + set(${CURL_TEST} 1) + endif(${CURL_TEST}_REENTRANT) + endforeach(CURL_TEST) +endif(NEED_REENTRANT) + +if(HAVE_INET_NTOA_R_DECL_REENTRANT) + set(HAVE_INET_NTOA_R_DECL 1) + set(NEED_REENTRANT 1) +endif(HAVE_INET_NTOA_R_DECL_REENTRANT) + +# Some other minor tests + +if(NOT HAVE_IN_ADDR_T) + set(in_addr_t "unsigned long") +endif(NOT HAVE_IN_ADDR_T) + +# Fix libz / zlib.h + +if(NOT CURL_SPECIAL_LIBZ) + if(NOT HAVE_LIBZ) + set(HAVE_ZLIB_H 0) + endif(NOT HAVE_LIBZ) + + if(NOT HAVE_ZLIB_H) + set(HAVE_LIBZ 0) + endif(NOT HAVE_ZLIB_H) +endif(NOT CURL_SPECIAL_LIBZ) + +# Check for nonblocking +set(HAVE_DISABLED_NONBLOCKING 1) +if(HAVE_FIONBIO OR + HAVE_IOCTLSOCKET OR + HAVE_IOCTLSOCKET_CASE OR + HAVE_O_NONBLOCK) + set(HAVE_DISABLED_NONBLOCKING) +endif(HAVE_FIONBIO OR + HAVE_IOCTLSOCKET OR + HAVE_IOCTLSOCKET_CASE OR + HAVE_O_NONBLOCK) + +if(RETSIGTYPE_TEST) + set(RETSIGTYPE void) +else(RETSIGTYPE_TEST) + set(RETSIGTYPE int) +endif(RETSIGTYPE_TEST) + +if(CMAKE_COMPILER_IS_GNUCC AND APPLE) + include(CheckCCompilerFlag) + check_c_compiler_flag(-Wno-long-double HAVE_C_FLAG_Wno_long_double) + if(HAVE_C_FLAG_Wno_long_double) + # The Mac version of GCC warns about use of long double. Disable it. + get_source_file_property(MPRINTF_COMPILE_FLAGS mprintf.c COMPILE_FLAGS) + if(MPRINTF_COMPILE_FLAGS) + set(MPRINTF_COMPILE_FLAGS "${MPRINTF_COMPILE_FLAGS} -Wno-long-double") + else(MPRINTF_COMPILE_FLAGS) + set(MPRINTF_COMPILE_FLAGS "-Wno-long-double") + endif(MPRINTF_COMPILE_FLAGS) + set_source_files_properties(mprintf.c PROPERTIES + COMPILE_FLAGS ${MPRINTF_COMPILE_FLAGS}) + endif(HAVE_C_FLAG_Wno_long_double) +endif(CMAKE_COMPILER_IS_GNUCC AND APPLE) + +if(HAVE_SOCKLEN_T) + set(CURL_TYPEOF_CURL_SOCKLEN_T "socklen_t") + if(WIN32) + set(CMAKE_EXTRA_INCLUDE_FILES "winsock2.h;ws2tcpip.h") + elseif(HAVE_SYS_SOCKET_H) + set(CMAKE_EXTRA_INCLUDE_FILES "sys/socket.h") + endif() + check_type_size("socklen_t" CURL_SIZEOF_CURL_SOCKLEN_T) + set(CMAKE_EXTRA_INCLUDE_FILES) + if(NOT HAVE_CURL_SIZEOF_CURL_SOCKLEN_T) + message(FATAL_ERROR + "Check for sizeof socklen_t failed, see CMakeFiles/CMakerror.log") + endif() +else() + set(CURL_TYPEOF_CURL_SOCKLEN_T int) + set(CURL_SIZEOF_CURL_SOCKLEN_T ${SIZEOF_INT}) +endif() + +# TODO test which of these headers are required for the typedefs used in curlbuild.h +if(WIN32) + set(CURL_PULL_WS2TCPIP_H ${HAVE_WS2TCPIP_H}) +else() + set(CURL_PULL_SYS_TYPES_H ${HAVE_SYS_TYPES_H}) + set(CURL_PULL_SYS_SOCKET_H ${HAVE_SYS_SOCKET_H}) + set(CURL_PULL_SYS_POLL_H ${HAVE_SYS_POLL_H}) +endif() +set(CURL_PULL_STDINT_H ${HAVE_STDINT_H}) +set(CURL_PULL_INTTYPES_H ${HAVE_INTTYPES_H}) + +include(CMake/OtherTests.cmake) + +add_definitions(-DHAVE_CONFIG_H) + +# For windows, do not allow the compiler to use default target (Vista). +if(WIN32) + add_definitions(-D_WIN32_WINNT=0x0501) +endif(WIN32) + +# For windows, all compilers used by cmake should support large files +if(WIN32) + set(USE_WIN32_LARGE_FILES ON) +endif(WIN32) + +if(MSVC) + add_definitions(-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) +endif(MSVC) + +# Ugly (but functional) way to include "Makefile.inc" by transforming it (= regenerate it). +function(TRANSFORM_MAKEFILE_INC INPUT_FILE OUTPUT_FILE) + file(READ ${INPUT_FILE} MAKEFILE_INC_TEXT) + string(REPLACE "$(top_srcdir)" "\${CURL_SOURCE_DIR}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) + string(REPLACE "$(top_builddir)" "\${CURL_BINARY_DIR}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) + + string(REGEX REPLACE "\\\\\n" "!π!α!" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) + string(REGEX REPLACE "([a-zA-Z_][a-zA-Z0-9_]*)[\t ]*=[\t ]*([^\n]*)" "SET(\\1 \\2)" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) + string(REPLACE "!π!α!" "\n" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) + + string(REGEX REPLACE "\\$\\(([a-zA-Z_][a-zA-Z0-9_]*)\\)" "\${\\1}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) # Replace $() with ${} + string(REGEX REPLACE "@([a-zA-Z_][a-zA-Z0-9_]*)@" "\${\\1}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) # Replace @@ with ${}, even if that may not be read by CMake scripts. + file(WRITE ${OUTPUT_FILE} ${MAKEFILE_INC_TEXT}) + +endfunction() + +add_subdirectory(docs) +add_subdirectory(lib) +if(BUILD_CURL_EXE) + add_subdirectory(src) +endif() + +include(CTest) +if(BUILD_TESTING) + add_subdirectory(tests) +endif() + +# Helper to populate a list (_items) with a label when conditions (the remaining +# args) are satisfied +function(_add_if label) + # TODO need to disable policy CMP0054 (CMake 3.1) to allow this indirection + if(${ARGN}) + set(_items ${_items} "${label}" PARENT_SCOPE) + endif() +endfunction() + +# Clear list and try to detect available features +set(_items) +_add_if("WinSSL" SSL_ENABLED AND USE_WINDOWS_SSPI) +_add_if("OpenSSL" SSL_ENABLED AND USE_OPENSSL) +_add_if("DarwinSSL" SSL_ENABLED AND USE_DARWINSSL) +_add_if("mbedTLS" SSL_ENABLED AND USE_MBEDTLS) +_add_if("IPv6" ENABLE_IPV6) +_add_if("unix-sockets" USE_UNIX_SOCKETS) +_add_if("libz" HAVE_LIBZ) +_add_if("AsynchDNS" USE_ARES OR USE_THREADS_POSIX OR USE_THREADS_WIN32) +_add_if("IDN" HAVE_LIBIDN2) +_add_if("Largefile" (CURL_SIZEOF_CURL_OFF_T GREATER 4) AND + ((SIZEOF_OFF_T GREATER 4) OR USE_WIN32_LARGE_FILES)) +# TODO SSP1 (WinSSL) check is missing +_add_if("SSPI" USE_WINDOWS_SSPI) +_add_if("GSS-API" HAVE_GSSAPI) +# TODO SSP1 missing for SPNEGO +_add_if("SPNEGO" NOT CURL_DISABLE_CRYPTO_AUTH AND + (HAVE_GSSAPI OR USE_WINDOWS_SSPI)) +_add_if("Kerberos" NOT CURL_DISABLE_CRYPTO_AUTH AND + (HAVE_GSSAPI OR USE_WINDOWS_SSPI)) +# NTLM support requires crypto function adaptions from various SSL libs +# TODO alternative SSL libs tests for SSP1, GNUTLS, NSS +if(NOT CURL_DISABLE_CRYPTO_AUTH AND (USE_OPENSSL OR USE_WINDOWS_SSPI OR USE_DARWINSSL OR USE_MBEDTLS)) + _add_if("NTLM" 1) + # TODO missing option (autoconf: --enable-ntlm-wb) + _add_if("NTLM_WB" NOT CURL_DISABLE_HTTP AND NTLM_WB_ENABLED) +endif() +# TODO missing option (--enable-tls-srp), depends on GNUTLS_SRP/OPENSSL_SRP +_add_if("TLS-SRP" USE_TLS_SRP) +# TODO option --with-nghttp2 tests for nghttp2 lib and nghttp2/nghttp2.h header +_add_if("HTTP2" USE_NGHTTP2) +string(REPLACE ";" " " SUPPORT_FEATURES "${_items}") +message(STATUS "Enabled features: ${SUPPORT_FEATURES}") + +# Clear list and try to detect available protocols +set(_items) +_add_if("HTTP" NOT CURL_DISABLE_HTTP) +_add_if("HTTPS" NOT CURL_DISABLE_HTTP AND SSL_ENABLED) +_add_if("FTP" NOT CURL_DISABLE_FTP) +_add_if("FTPS" NOT CURL_DISABLE_FTP AND SSL_ENABLED) +_add_if("FILE" NOT CURL_DISABLE_FILE) +_add_if("TELNET" NOT CURL_DISABLE_TELNET) +_add_if("LDAP" NOT CURL_DISABLE_LDAP) +# CURL_DISABLE_LDAP implies CURL_DISABLE_LDAPS +# TODO check HAVE_LDAP_SSL (in autoconf this is enabled with --enable-ldaps) +_add_if("LDAPS" NOT CURL_DISABLE_LDAPS AND + ((USE_OPENLDAP AND SSL_ENABLED) OR + (NOT USE_OPENLDAP AND HAVE_LDAP_SSL))) +_add_if("DICT" NOT CURL_DISABLE_DICT) +_add_if("TFTP" NOT CURL_DISABLE_TFTP) +_add_if("GOPHER" NOT CURL_DISABLE_GOPHER) +_add_if("POP3" NOT CURL_DISABLE_POP3) +_add_if("POP3S" NOT CURL_DISABLE_POP3 AND SSL_ENABLED) +_add_if("IMAP" NOT CURL_DISABLE_IMAP) +_add_if("IMAPS" NOT CURL_DISABLE_IMAP AND SSL_ENABLED) +_add_if("SMTP" NOT CURL_DISABLE_SMTP) +_add_if("SMTPS" NOT CURL_DISABLE_SMTP AND SSL_ENABLED) +_add_if("SCP" USE_LIBSSH2) +_add_if("SFTP" USE_LIBSSH2) +_add_if("RTSP" NOT CURL_DISABLE_RTSP) +_add_if("RTMP" USE_LIBRTMP) +list(SORT _items) +string(REPLACE ";" " " SUPPORT_PROTOCOLS "${_items}") +message(STATUS "Enabled protocols: ${SUPPORT_PROTOCOLS}") + +# curl-config needs the following options to be set. +set(CC "${CMAKE_C_COMPILER}") +# TODO probably put a -D... options here? +set(CONFIGURE_OPTIONS "") +# TODO when to set "-DCURL_STATICLIB" for CPPFLAG_CURL_STATICLIB? +set(CPPFLAG_CURL_STATICLIB "") +set(CURLVERSION "${CURL_VERSION}") +set(ENABLE_SHARED "yes") +if(CURL_STATICLIB) + set(ENABLE_STATIC "yes") +else() + set(ENABLE_STATIC "no") +endif() +set(exec_prefix "\${prefix}") +set(includedir "\${prefix}/include") +set(LDFLAGS "${CMAKE_SHARED_LINKER_FLAGS}") +set(LIBCURL_LIBS "") +set(libdir "${CMAKE_INSTALL_PREFIX}/lib") +foreach(_lib ${CMAKE_C_IMPLICIT_LINK_LIBRARIES} ${CURL_LIBS}) + if(_lib MATCHES ".*/.*") + set(LIBCURL_LIBS "${LIBCURL_LIBS} ${_lib}") + else() + set(LIBCURL_LIBS "${LIBCURL_LIBS} -l${_lib}") + endif() +endforeach() +# "a" (Linux) or "lib" (Windows) +string(REPLACE "." "" libext "${CMAKE_STATIC_LIBRARY_SUFFIX}") +set(prefix "${CMAKE_INSTALL_PREFIX}") +# Set this to "yes" to append all libraries on which -lcurl is dependent +set(REQUIRE_LIB_DEPS "no") +# SUPPORT_FEATURES +# SUPPORT_PROTOCOLS +set(VERSIONNUM "${CURL_VERSION_NUM}") + +# Finally generate a "curl-config" matching this config +configure_file("${CURL_SOURCE_DIR}/curl-config.in" + "${CURL_BINARY_DIR}/curl-config" @ONLY) +install(FILES "${CURL_BINARY_DIR}/curl-config" + DESTINATION bin + PERMISSIONS + OWNER_READ OWNER_WRITE OWNER_EXECUTE + GROUP_READ GROUP_EXECUTE + WORLD_READ WORLD_EXECUTE) + +# Finally generate a pkg-config file matching this config +configure_file("${CURL_SOURCE_DIR}/libcurl.pc.in" + "${CURL_BINARY_DIR}/libcurl.pc" @ONLY) +install(FILES "${CURL_BINARY_DIR}/libcurl.pc" + DESTINATION lib/pkgconfig) + +# This needs to be run very last so other parts of the scripts can take advantage of this. +if(NOT CURL_CONFIG_HAS_BEEN_RUN_BEFORE) + set(CURL_CONFIG_HAS_BEEN_RUN_BEFORE 1 CACHE INTERNAL "Flag to track whether this is the first time running CMake or if CMake has been configured before") +endif() + +# Installation. +# First, install generated curlbuild.h +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/include/curl/curlbuild.h" + DESTINATION include/curl ) +# Next, install other headers excluding curlbuild.h +install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/curl" + DESTINATION include + FILES_MATCHING PATTERN "*.h" + PATTERN "curlbuild.h" EXCLUDE) + + +# Workaround for MSVS10 to avoid the Dialog Hell +# FIXME: This could be removed with future version of CMake. +if(MSVC_VERSION EQUAL 1600) + set(CURL_SLN_FILENAME "${CMAKE_CURRENT_BINARY_DIR}/CURL.sln") + if(EXISTS "${CURL_SLN_FILENAME}") + file(APPEND "${CURL_SLN_FILENAME}" "\n# This should be regenerated!\n") + endif() +endif() diff --git a/deps-win32/curl-7.54.1/COPYING b/deps-win32/curl-7.54.1/COPYING new file mode 100644 index 0000000..1e45a5e --- /dev/null +++ b/deps-win32/curl-7.54.1/COPYING @@ -0,0 +1,22 @@ +COPYRIGHT AND PERMISSION NOTICE + +Copyright (c) 1996 - 2017, Daniel Stenberg, , and many +contributors, see the THANKS file. + +All rights reserved. + +Permission to use, copy, modify, and distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright +notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of a copyright holder shall not +be used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization of the copyright holder. diff --git a/deps-win32/curl-7.54.1/MacOSX-Framework b/deps-win32/curl-7.54.1/MacOSX-Framework new file mode 100644 index 0000000..19b338f --- /dev/null +++ b/deps-win32/curl-7.54.1/MacOSX-Framework @@ -0,0 +1,146 @@ +#!/bin/bash +# This script performs all of the steps needed to build a +# universal binary libcurl.framework for Mac OS X 10.4 or greater. +# +# Hendrik Visage: +# Generalizations added since Snowleopard (10.6) do not include +# the 10.4u SDK. +# +# Also note: +# 10.5 is the *ONLY* SDK that support PPC64 :( -- 10.6 do not have ppc64 support +#If you need to have PPC64 support then change below to 1 +PPC64_NEEDED=0 +# Apple does not support building for PPC anymore in Xcode 4 and later. +# If you're using Xcode 3 or earlier and need PPC support, then change +# the setting below to 1 +PPC_NEEDED=0 + +# For me the default is to develop for the platform I am on, and if you +#desire compatibility with older versions then change USE_OLD to 1 :) +USE_OLD=0 + +VERSION=`/usr/bin/sed -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' include/curl/curlver.h` +FRAMEWORK_VERSION=Versions/Release-$VERSION + +#I also wanted to "copy over" the system, and thus the reason I added the +# version to Versions/Release-7.20.1 etc. +# now a simple rsync -vaP libcurl.framework /Library/Frameworks will install it +# and setup the right paths to this version, leaving the system version +# "intact", so you can "fix" it later with the links to Versions/A/... + +DEVELOPER_PATH=`xcode-select --print-path` +# Around Xcode 4.3, SDKs were moved from the Developer folder into the +# MacOSX.platform folder +if test -d "$DEVELOPER_PATH/Platforms/MacOSX.platform/Developer/SDKs"; then + SDK_PATH="$DEVELOPER_PATH/Platforms/MacOSX.platform/Developer/SDKs" +else + SDK_PATH="$DEVELOPER_PATH/SDKs"; +fi +OLD_SDK=`ls $SDK_PATH|head -1` +NEW_SDK=`ls -r $SDK_PATH|head -1` + +if test "0"$USE_OLD -gt 0 +then + SDK32=$OLD_SDK +else + SDK32=$NEW_SDK +fi + +MACVER=`echo $SDK32|sed -e s/[a-zA-Z]//g -e s/.\$//` + +SDK32_DIR=$SDK_PATH/$SDK32 +MINVER32='-mmacosx-version-min='$MACVER +if test $PPC_NEEDED -gt 0; then + ARCHES32='-arch i386 -arch ppc' +else + ARCHES32='-arch i386' +fi + +if test $PPC64_NEEDED -gt 0 +then + SDK64=10.5 + ARCHES64='-arch x86_64 -arch ppc64' + SDK64=`ls $SDK_PATH|grep 10.5|head -1` +else + ARCHES64='-arch x86_64' + #We "know" that 10.4 and earlier do not support 64bit + OLD_SDK64=`ls $SDK_PATH|egrep -v "10.[0-4]"|head -1` + NEW_SDK64=`ls -r $SDK_PATH|egrep -v "10.[0-4][^0-9]" | head -1` + if test $USE_OLD -gt 0 + then + SDK64=$OLD_SDK64 + else + SDK64=$NEW_SDK64 + fi +fi + +SDK64_DIR=$SDK_PATH/$SDK64 +MACVER64=`echo $SDK64|sed -e s/[a-zA-Z]//g -e s/.\$//` + +MINVER64='-mmacosx-version-min='$MACVER64 + +if test ! -z $SDK32; then + echo "----Configuring libcurl for 32 bit universal framework..." + make clean + ./configure --disable-dependency-tracking --disable-static --with-gssapi --with-darwinssl \ + CFLAGS="-Os -isysroot $SDK32_DIR $ARCHES32" \ + LDFLAGS="-Wl,-syslibroot,$SDK32_DIR $ARCHES32 -Wl,-headerpad_max_install_names" \ + CC=$CC + + echo "----Building 32 bit libcurl..." + make -j `sysctl -n hw.logicalcpu_max` + + echo "----Creating 32 bit framework..." + rm -r libcurl.framework + mkdir -p libcurl.framework/${FRAMEWORK_VERSION}/Resources + cp lib/.libs/libcurl.dylib libcurl.framework/${FRAMEWORK_VERSION}/libcurl + install_name_tool -id @rpath/libcurl.framework/${FRAMEWORK_VERSION}/libcurl libcurl.framework/${FRAMEWORK_VERSION}/libcurl + /usr/bin/sed -e "s/7\.12\.3/$VERSION/" lib/libcurl.plist >libcurl.framework/${FRAMEWORK_VERSION}/Resources/Info.plist + mkdir -p libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl + cp include/curl/*.h libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl + pushd libcurl.framework + ln -fs ${FRAMEWORK_VERSION}/libcurl libcurl + ln -fs ${FRAMEWORK_VERSION}/Resources Resources + ln -fs ${FRAMEWORK_VERSION}/Headers Headers + cd Versions + ln -fs $(basename "${FRAMEWORK_VERSION}") Current + + echo Testing for SDK64 + if test -d $SDK64_DIR; then + echo entering... + popd + make clean + echo "----Configuring libcurl for 64 bit universal framework..." + ./configure --disable-dependency-tracking --disable-static --with-gssapi --with-darwinssl \ + CFLAGS="-Os -isysroot $SDK64_DIR $ARCHES64" \ + LDFLAGS="-Wl,-syslibroot,$SDK64_DIR $ARCHES64 -Wl,-headerpad_max_install_names" \ + CC=$CC + + echo "----Building 64 bit libcurl..." + make -j `sysctl -n hw.logicalcpu_max` + + echo "----Appending 64 bit framework to 32 bit framework..." + cp lib/.libs/libcurl.dylib libcurl.framework/${FRAMEWORK_VERSION}/libcurl64 + install_name_tool -id @rpath/libcurl.framework/${FRAMEWORK_VERSION}/libcurl libcurl.framework/${FRAMEWORK_VERSION}/libcurl64 + cp libcurl.framework/${FRAMEWORK_VERSION}/libcurl libcurl.framework/${FRAMEWORK_VERSION}/libcurl32 + pwd + lipo libcurl.framework/${FRAMEWORK_VERSION}/libcurl32 libcurl.framework/${FRAMEWORK_VERSION}/libcurl64 -create -output libcurl.framework/${FRAMEWORK_VERSION}/libcurl + rm libcurl.framework/${FRAMEWORK_VERSION}/libcurl32 libcurl.framework/${FRAMEWORK_VERSION}/libcurl64 + cp libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild.h libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild32.h + cp include/curl/curlbuild.h libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild64.h + cat >libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild.h <, et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### + +AUTOMAKE_OPTIONS = foreign + +ACLOCAL_AMFLAGS = -I m4 + +CMAKE_DIST = CMakeLists.txt CMake/CMakeConfigurableFile.in \ + CMake/CurlTests.c CMake/FindGSS.cmake CMake/OtherTests.cmake \ + CMake/Platforms/WindowsCache.cmake CMake/Utilities.cmake \ + include/curl/curlbuild.h.cmake CMake/Macros.cmake \ + CMake/CurlSymbolHiding.cmake CMake/FindCARES.cmake \ + CMake/FindLibSSH2.cmake CMake/FindNGHTTP2.cmake \ + CMake/FindMbedTLS.cmake + +VC6_LIBTMPL = projects/Windows/VC6/lib/libcurl.tmpl +VC6_LIBDSP = projects/Windows/VC6/lib/libcurl.dsp.dist +VC6_LIBDSP_DEPS = $(VC6_LIBTMPL) Makefile.am lib/Makefile.inc +VC6_SRCTMPL = projects/Windows/VC6/src/curl.tmpl +VC6_SRCDSP = projects/Windows/VC6/src/curl.dsp.dist +VC6_SRCDSP_DEPS = $(VC6_SRCTMPL) Makefile.am src/Makefile.inc + +VC7_LIBTMPL = projects/Windows/VC7/lib/libcurl.tmpl +VC7_LIBVCPROJ = projects/Windows/VC7/lib/libcurl.vcproj.dist +VC7_LIBVCPROJ_DEPS = $(VC7_LIBTMPL) Makefile.am lib/Makefile.inc +VC7_SRCTMPL = projects/Windows/VC7/src/curl.tmpl +VC7_SRCVCPROJ = projects/Windows/VC7/src/curl.vcproj.dist +VC7_SRCVCPROJ_DEPS = $(VC7_SRCTMPL) Makefile.am src/Makefile.inc + +VC71_LIBTMPL = projects/Windows/VC7.1/lib/libcurl.tmpl +VC71_LIBVCPROJ = projects/Windows/VC7.1/lib/libcurl.vcproj.dist +VC71_LIBVCPROJ_DEPS = $(VC71_LIBTMPL) Makefile.am lib/Makefile.inc +VC71_SRCTMPL = projects/Windows/VC7.1/src/curl.tmpl +VC71_SRCVCPROJ = projects/Windows/VC7.1/src/curl.vcproj.dist +VC71_SRCVCPROJ_DEPS = $(VC71_SRCTMPL) Makefile.am src/Makefile.inc + +VC8_LIBTMPL = projects/Windows/VC8/lib/libcurl.tmpl +VC8_LIBVCPROJ = projects/Windows/VC8/lib/libcurl.vcproj.dist +VC8_LIBVCPROJ_DEPS = $(VC8_LIBTMPL) Makefile.am lib/Makefile.inc +VC8_SRCTMPL = projects/Windows/VC8/src/curl.tmpl +VC8_SRCVCPROJ = projects/Windows/VC8/src/curl.vcproj.dist +VC8_SRCVCPROJ_DEPS = $(VC8_SRCTMPL) Makefile.am src/Makefile.inc + +VC9_LIBTMPL = projects/Windows/VC9/lib/libcurl.tmpl +VC9_LIBVCPROJ = projects/Windows/VC9/lib/libcurl.vcproj.dist +VC9_LIBVCPROJ_DEPS = $(VC9_LIBTMPL) Makefile.am lib/Makefile.inc +VC9_SRCTMPL = projects/Windows/VC9/src/curl.tmpl +VC9_SRCVCPROJ = projects/Windows/VC9/src/curl.vcproj.dist +VC9_SRCVCPROJ_DEPS = $(VC9_SRCTMPL) Makefile.am src/Makefile.inc + +VC10_LIBTMPL = projects/Windows/VC10/lib/libcurl.tmpl +VC10_LIBVCXPROJ = projects/Windows/VC10/lib/libcurl.vcxproj.dist +VC10_LIBVCXPROJ_DEPS = $(VC10_LIBTMPL) Makefile.am lib/Makefile.inc +VC10_SRCTMPL = projects/Windows/VC10/src/curl.tmpl +VC10_SRCVCXPROJ = projects/Windows/VC10/src/curl.vcxproj.dist +VC10_SRCVCXPROJ_DEPS = $(VC10_SRCTMPL) Makefile.am src/Makefile.inc + +VC11_LIBTMPL = projects/Windows/VC11/lib/libcurl.tmpl +VC11_LIBVCXPROJ = projects/Windows/VC11/lib/libcurl.vcxproj.dist +VC11_LIBVCXPROJ_DEPS = $(VC11_LIBTMPL) Makefile.am lib/Makefile.inc +VC11_SRCTMPL = projects/Windows/VC11/src/curl.tmpl +VC11_SRCVCXPROJ = projects/Windows/VC11/src/curl.vcxproj.dist +VC11_SRCVCXPROJ_DEPS = $(VC11_SRCTMPL) Makefile.am src/Makefile.inc + +VC12_LIBTMPL = projects/Windows/VC12/lib/libcurl.tmpl +VC12_LIBVCXPROJ = projects/Windows/VC12/lib/libcurl.vcxproj.dist +VC12_LIBVCXPROJ_DEPS = $(VC12_LIBTMPL) Makefile.am lib/Makefile.inc +VC12_SRCTMPL = projects/Windows/VC12/src/curl.tmpl +VC12_SRCVCXPROJ = projects/Windows/VC12/src/curl.vcxproj.dist +VC12_SRCVCXPROJ_DEPS = $(VC12_SRCTMPL) Makefile.am src/Makefile.inc + +VC14_LIBTMPL = projects/Windows/VC14/lib/libcurl.tmpl +VC14_LIBVCXPROJ = projects/Windows/VC14/lib/libcurl.vcxproj.dist +VC14_LIBVCXPROJ_DEPS = $(VC14_LIBTMPL) Makefile.am lib/Makefile.inc +VC14_SRCTMPL = projects/Windows/VC14/src/curl.tmpl +VC14_SRCVCXPROJ = projects/Windows/VC14/src/curl.vcxproj.dist +VC14_SRCVCXPROJ_DEPS = $(VC14_SRCTMPL) Makefile.am src/Makefile.inc + +VC_DIST = projects/README \ + projects/build-openssl.bat \ + projects/build-wolfssl.bat \ + projects/checksrc.bat \ + projects/Windows/VC6/curl-all.dsw \ + projects/Windows/VC6/lib/libcurl.dsw \ + projects/Windows/VC6/src/curl.dsw \ + projects/Windows/VC7/curl-all.sln \ + projects/Windows/VC7/lib/libcurl.sln \ + projects/Windows/VC7/src/curl.sln \ + projects/Windows/VC7.1/curl-all.sln \ + projects/Windows/VC7.1/lib/libcurl.sln \ + projects/Windows/VC7.1/src/curl.sln \ + projects/Windows/VC8/curl-all.sln \ + projects/Windows/VC8/lib/libcurl.sln \ + projects/Windows/VC8/src/curl.sln \ + projects/Windows/VC9/curl-all.sln \ + projects/Windows/VC9/lib/libcurl.sln \ + projects/Windows/VC9/src/curl.sln \ + projects/Windows/VC10/curl-all.sln \ + projects/Windows/VC10/lib/libcurl.sln \ + projects/Windows/VC10/lib/libcurl.vcxproj.filters \ + projects/Windows/VC10/src/curl.sln \ + projects/Windows/VC10/src/curl.vcxproj.filters \ + projects/Windows/VC11/curl-all.sln \ + projects/Windows/VC11/lib/libcurl.sln \ + projects/Windows/VC11/lib/libcurl.vcxproj.filters \ + projects/Windows/VC11/src/curl.sln \ + projects/Windows/VC11/src/curl.vcxproj.filters \ + projects/Windows/VC12/curl-all.sln \ + projects/Windows/VC12/lib/libcurl.sln \ + projects/Windows/VC12/lib/libcurl.vcxproj.filters \ + projects/Windows/VC12/src/curl.sln \ + projects/Windows/VC12/src/curl.vcxproj.filters \ + projects/Windows/VC14/curl-all.sln \ + projects/Windows/VC14/lib/libcurl.sln \ + projects/Windows/VC14/lib/libcurl.vcxproj.filters \ + projects/Windows/VC14/src/curl.sln \ + projects/Windows/VC14/src/curl.vcxproj.filters \ + projects/generate.bat \ + projects/wolfssl_options.h \ + projects/wolfssl_override.props + +WINBUILD_DIST = winbuild/BUILD.WINDOWS.txt winbuild/gen_resp_file.bat \ + winbuild/MakefileBuild.vc winbuild/Makefile.vc + +EXTRA_DIST = CHANGES COPYING maketgz Makefile.dist curl-config.in \ + RELEASE-NOTES buildconf libcurl.pc.in MacOSX-Framework scripts/zsh.pl \ + scripts/updatemanpages.pl $(CMAKE_DIST) $(VC_DIST) $(WINBUILD_DIST) \ + lib/libcurl.vers.in buildconf.bat scripts/coverage.sh + +CLEANFILES = $(VC6_LIBDSP) $(VC6_SRCDSP) $(VC7_LIBVCPROJ) $(VC7_SRCVCPROJ) \ + $(VC71_LIBVCPROJ) $(VC71_SRCVCPROJ) $(VC8_LIBVCPROJ) $(VC8_SRCVCPROJ) \ + $(VC9_LIBVCPROJ) $(VC9_SRCVCPROJ) $(VC10_LIBVCXPROJ) $(VC10_SRCVCXPROJ) \ + $(VC11_LIBVCXPROJ) $(VC11_SRCVCXPROJ) $(VC12_LIBVCXPROJ) $(VC12_SRCVCXPROJ) \ + $(VC14_LIBVCXPROJ) $(VC14_SRCVCXPROJ) + +bin_SCRIPTS = curl-config + +SUBDIRS = lib docs src include +DIST_SUBDIRS = $(SUBDIRS) tests packages scripts + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libcurl.pc + +# List of files required to generate VC IDE .dsp, .vcproj and .vcxproj files +include lib/Makefile.inc +include src/Makefile.inc + +dist-hook: + rm -rf $(top_builddir)/tests/log + find $(distdir) -name "*.dist" -exec rm {} \; + (distit=`find $(srcdir) -name "*.dist" | grep -v ./ares/`; \ + for file in $$distit; do \ + strip=`echo $$file | sed -e s/^$(srcdir)// -e s/\.dist//`; \ + cp $$file $(distdir)$$strip; \ + done) + +html: + cd docs && $(MAKE) html + +pdf: + cd docs && $(MAKE) pdf + +check: test examples check-docs + +if CROSSCOMPILING +test-full: test +test-torture: test + +test: + @echo "NOTICE: we can't run the tests when cross-compiling!" + +else + +test: + @(cd tests; $(MAKE) all quiet-test) + +test-full: + @(cd tests; $(MAKE) all full-test) + +test-nonflaky: + @(cd tests; $(MAKE) all nonflaky-test) + +test-torture: + @(cd tests; $(MAKE) all torture-test) + +test-event: + @(cd tests; $(MAKE) all event-test) + +test-am: + @(cd tests; $(MAKE) all am-test) + +endif + +examples: + @(cd docs/examples; $(MAKE) check) + +check-docs: + @(cd docs/libcurl; $(MAKE) check) + +# This is a hook to have 'make clean' also clean up the docs and the tests +# dir. The extra check for the Makefiles being present is necessary because +# 'make distcheck' will make clean first in these directories _before_ it runs +# this hook. +clean-local: + @(if test -f tests/Makefile; then cd tests; $(MAKE) clean; fi) + @(if test -f docs/Makefile; then cd docs; $(MAKE) clean; fi) + +# +# Build source and binary rpms. For rpm-3.0 and above, the ~/.rpmmacros +# must contain the following line: +# %_topdir /home/loic/local/rpm +# and that /home/loic/local/rpm contains the directory SOURCES, BUILD etc. +# +# cd /home/loic/local/rpm ; mkdir -p SOURCES BUILD RPMS/i386 SPECS SRPMS +# +# If additional configure flags are needed to build the package, add the +# following in ~/.rpmmacros +# %configure CFLAGS="%{optflags}" ./configure %{_target_platform} --prefix=%{_prefix} ${AM_CONFIGFLAGS} +# and run make rpm in the following way: +# AM_CONFIGFLAGS='--with-uri=/home/users/loic/local/RedHat-6.2' make rpm +# + +rpms: + $(MAKE) RPMDIST=curl rpm + $(MAKE) RPMDIST=curl-ssl rpm + +rpm: + RPM_TOPDIR=`rpm --showrc | $(PERL) -n -e 'print if(s/.*_topdir\s+(.*)/$$1/)'` ; \ + cp $(srcdir)/packages/Linux/RPM/$(RPMDIST).spec $$RPM_TOPDIR/SPECS ; \ + cp $(PACKAGE)-$(VERSION).tar.gz $$RPM_TOPDIR/SOURCES ; \ + rpm -ba --clean --rmsource $$RPM_TOPDIR/SPECS/$(RPMDIST).spec ; \ + mv $$RPM_TOPDIR/RPMS/i386/$(RPMDIST)-*.rpm . ; \ + mv $$RPM_TOPDIR/SRPMS/$(RPMDIST)-*.src.rpm . + +# +# Build a Solaris pkgadd format file +# run 'make pkgadd' once you've done './configure' and 'make' to make a Solaris pkgadd format +# file (which ends up back in this directory). +# The pkgadd file is in 'pkgtrans' format, so to install on Solaris, do +# pkgadd -d ./HAXXcurl-* +# + +# gak - libtool requires an absolute directory, hence the pwd below... +pkgadd: + umask 022 ; \ + $(MAKE) install DESTDIR=`/bin/pwd`/packages/Solaris/root ; \ + cat COPYING > $(srcdir)/packages/Solaris/copyright ; \ + cd $(srcdir)/packages/Solaris && $(MAKE) package + +# +# Build a cygwin binary tarball installation file +# resulting .tar.bz2 file will end up at packages/Win32/cygwin +cygwinbin: + $(MAKE) -C packages/Win32/cygwin cygwinbin + +# We extend the standard install with a custom hook: +install-data-hook: + cd include && $(MAKE) install + cd docs && $(MAKE) install + +# We extend the standard uninstall with a custom hook: +uninstall-hook: + cd include && $(MAKE) uninstall + cd docs && $(MAKE) uninstall + +ca-bundle: lib/mk-ca-bundle.pl + @echo "generating a fresh ca-bundle.crt" + @perl $< -b -l -u lib/ca-bundle.crt + +ca-firefox: lib/firefox-db2pem.sh + @echo "generating a fresh ca-bundle.crt" + ./lib/firefox-db2pem.sh lib/ca-bundle.crt + +checksrc: + cd lib && $(MAKE) checksrc + cd src && $(MAKE) checksrc + cd tests && $(MAKE) checksrc + cd include/curl && $(MAKE) checksrc + cd docs/examples && $(MAKE) checksrc + +.PHONY: vc-ide + +vc-ide: $(VC6_LIBDSP_DEPS) $(VC6_SRCDSP_DEPS) $(VC7_LIBVCPROJ_DEPS) \ + $(VC7_SRCVCPROJ_DEPS) $(VC71_LIBVCPROJ_DEPS) $(VC71_SRCVCPROJ_DEPS) \ + $(VC8_LIBVCPROJ_DEPS) $(VC8_SRCVCPROJ_DEPS) $(VC9_LIBVCPROJ_DEPS) \ + $(VC9_SRCVCPROJ_DEPS) $(VC10_LIBVCXPROJ_DEPS) $(VC10_SRCVCXPROJ_DEPS) \ + $(VC11_LIBVCXPROJ_DEPS) $(VC11_SRCVCXPROJ_DEPS) $(VC12_LIBVCXPROJ_DEPS) \ + $(VC12_SRCVCXPROJ_DEPS) $(VC14_LIBVCXPROJ_DEPS) $(VC14_SRCVCXPROJ_DEPS) + @(win32_lib_srcs='$(LIB_CFILES)'; \ + win32_lib_hdrs='$(LIB_HFILES) config-win32.h'; \ + win32_lib_rc='$(LIB_RCFILES)'; \ + win32_lib_vauth_srcs='$(LIB_VAUTH_CFILES)'; \ + win32_lib_vauth_hdrs='$(LIB_VAUTH_HFILES)'; \ + win32_lib_vtls_srcs='$(LIB_VTLS_CFILES)'; \ + win32_lib_vtls_hdrs='$(LIB_VTLS_HFILES)'; \ + win32_src_srcs='$(CURL_CFILES)'; \ + win32_src_hdrs='$(CURL_HFILES)'; \ + win32_src_rc='$(CURL_RCFILES)'; \ + win32_src_x_srcs='$(CURLX_CFILES)'; \ + win32_src_x_hdrs='$(CURLX_HFILES) ../lib/config-win32.h'; \ + \ + sorted_lib_srcs=`for file in $$win32_lib_srcs; do echo $$file; done | sort`; \ + sorted_lib_hdrs=`for file in $$win32_lib_hdrs; do echo $$file; done | sort`; \ + sorted_lib_vauth_srcs=`for file in $$win32_lib_vauth_srcs; do echo $$file; done | sort`; \ + sorted_lib_vauth_hdrs=`for file in $$win32_lib_vauth_hdrs; do echo $$file; done | sort`; \ + sorted_lib_vtls_srcs=`for file in $$win32_lib_vtls_srcs; do echo $$file; done | sort`; \ + sorted_lib_vtls_hdrs=`for file in $$win32_lib_vtls_hdrs; do echo $$file; done | sort`; \ + sorted_src_srcs=`for file in $$win32_src_srcs; do echo $$file; done | sort`; \ + sorted_src_hdrs=`for file in $$win32_src_hdrs; do echo $$file; done | sort`; \ + sorted_src_x_srcs=`for file in $$win32_src_x_srcs; do echo $$file; done | sort`; \ + sorted_src_x_hdrs=`for file in $$win32_src_x_hdrs; do echo $$file; done | sort`; \ + \ + awk_code='\ +function gen_element(type, dir, file)\ +{\ + sub(/vauth\//, "", file);\ + sub(/vtls\//, "", file);\ +\ + spaces=" ";\ + if(dir == "lib\\vauth" || dir == "lib\\vtls")\ + tabs=" ";\ + else\ + tabs=" ";\ +\ + if(type == "dsp") {\ + printf("# Begin Source File\r\n");\ + printf("\r\n");\ + printf("SOURCE=..\\..\\..\\..\\%s\\%s\r\n", dir, file);\ + printf("# End Source File\r\n");\ + }\ + else if(type == "vcproj1") {\ + printf("%s\r\n",\ + tabs, dir, file);\ + printf("%s\r\n", tabs);\ + }\ + else if(type == "vcproj2") {\ + printf("%s\r\n", tabs);\ + printf("%s\r\n", tabs);\ + }\ + else if(type == "vcxproj") {\ + i = index(file, ".");\ + ext = substr(file, i == 0 ? 0 : i + 1);\ +\ + if(ext == "c")\ + printf("%s\r\n",\ + spaces, dir, file);\ + else if(ext == "h")\ + printf("%s\r\n",\ + spaces, dir, file);\ + else if(ext == "rc")\ + printf("%s\r\n",\ + spaces, dir, file);\ + }\ +}\ +\ +{\ +\ + if($$0 == "CURL_LIB_C_FILES") {\ + split(lib_srcs, arr);\ + for(val in arr) gen_element(proj_type, "lib", arr[val]);\ + }\ + else if($$0 == "CURL_LIB_H_FILES") {\ + split(lib_hdrs, arr);\ + for(val in arr) gen_element(proj_type, "lib", arr[val]);\ + }\ + else if($$0 == "CURL_LIB_RC_FILES") {\ + split(lib_rc, arr);\ + for(val in arr) gen_element(proj_type, "lib", arr[val]);\ + }\ + else if($$0 == "CURL_LIB_VAUTH_C_FILES") {\ + split(lib_vauth_srcs, arr);\ + for(val in arr) gen_element(proj_type, "lib\\vauth", arr[val]);\ + }\ + else if($$0 == "CURL_LIB_VAUTH_H_FILES") {\ + split(lib_vauth_hdrs, arr);\ + for(val in arr) gen_element(proj_type, "lib\\vauth", arr[val]);\ + }\ + else if($$0 == "CURL_LIB_VTLS_C_FILES") {\ + split(lib_vtls_srcs, arr);\ + for(val in arr) gen_element(proj_type, "lib\\vtls", arr[val]);\ + }\ + else if($$0 == "CURL_LIB_VTLS_H_FILES") {\ + split(lib_vtls_hdrs, arr);\ + for(val in arr) gen_element(proj_type, "lib\\vtls", arr[val]);\ + }\ + else if($$0 == "CURL_SRC_C_FILES") {\ + split(src_srcs, arr);\ + for(val in arr) gen_element(proj_type, "src", arr[val]);\ + }\ + else if($$0 == "CURL_SRC_H_FILES") {\ + split(src_hdrs, arr);\ + for(val in arr) gen_element(proj_type, "src", arr[val]);\ + }\ + else if($$0 == "CURL_SRC_RC_FILES") {\ + split(src_rc, arr);\ + for(val in arr) gen_element(proj_type, "src", arr[val]);\ + }\ + else if($$0 == "CURL_SRC_X_C_FILES") {\ + split(src_x_srcs, arr);\ + for(val in arr) {\ + sub(/..\/lib\//, "", arr[val]);\ + gen_element(proj_type, "lib", arr[val]);\ + }\ + }\ + else if($$0 == "CURL_SRC_X_H_FILES") {\ + split(src_x_hdrs, arr);\ + for(val in arr) {\ + sub(/..\/lib\//, "", arr[val]);\ + gen_element(proj_type, "lib", arr[val]);\ + }\ + }\ + else\ + printf("%s\r\n", $$0);\ +}';\ + \ + echo "generating '$(VC6_LIBDSP)'"; \ + awk -v proj_type=dsp \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC6_LIBTMPL) > $(VC6_LIBDSP) || { exit 1; }; \ + \ + echo "generating '$(VC6_SRCDSP)'"; \ + awk -v proj_type=dsp \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC6_SRCTMPL) > $(VC6_SRCDSP) || { exit 1; }; \ + \ + echo "generating '$(VC7_LIBVCPROJ)'"; \ + awk -v proj_type=vcproj1 \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC7_LIBTMPL) > $(VC7_LIBVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC7_SRCVCPROJ)'"; \ + awk -v proj_type=vcproj1 \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC7_SRCTMPL) > $(VC7_SRCVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC71_LIBVCPROJ)'"; \ + awk -v proj_type=vcproj1 \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC71_LIBTMPL) > $(VC71_LIBVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC71_SRCVCPROJ)'"; \ + awk -v proj_type=vcproj1 \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC71_SRCTMPL) > $(VC71_SRCVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC8_LIBVCPROJ)'"; \ + awk -v proj_type=vcproj2 \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC8_LIBTMPL) > $(VC8_LIBVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC8_SRCVCPROJ)'"; \ + awk -v proj_type=vcproj2 \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC8_SRCTMPL) > $(VC8_SRCVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC9_LIBVCPROJ)'"; \ + awk -v proj_type=vcproj2 \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC9_LIBTMPL) > $(VC9_LIBVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC9_SRCVCPROJ)'"; \ + awk -v proj_type=vcproj2 \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC9_SRCTMPL) > $(VC9_SRCVCPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC10_LIBVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC10_LIBTMPL) > $(VC10_LIBVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC10_SRCVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC10_SRCTMPL) > $(VC10_SRCVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC11_LIBVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC11_LIBTMPL) > $(VC11_LIBVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC11_SRCVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC11_SRCTMPL) > $(VC11_SRCVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC12_LIBVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC12_LIBTMPL) > $(VC12_LIBVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC12_SRCVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC12_SRCTMPL) > $(VC12_SRCVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC14_LIBVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v lib_srcs="$$sorted_lib_srcs" \ + -v lib_hdrs="$$sorted_lib_hdrs" \ + -v lib_rc="$$win32_lib_rc" \ + -v lib_vauth_srcs="$$sorted_lib_vauth_srcs" \ + -v lib_vauth_hdrs="$$sorted_lib_vauth_hdrs" \ + -v lib_vtls_srcs="$$sorted_lib_vtls_srcs" \ + -v lib_vtls_hdrs="$$sorted_lib_vtls_hdrs" \ + "$$awk_code" $(srcdir)/$(VC14_LIBTMPL) > $(VC14_LIBVCXPROJ) || { exit 1; }; \ + \ + echo "generating '$(VC14_SRCVCXPROJ)'"; \ + awk -v proj_type=vcxproj \ + -v src_srcs="$$sorted_src_srcs" \ + -v src_hdrs="$$sorted_src_hdrs" \ + -v src_rc="$$win32_src_rc" \ + -v src_x_srcs="$$sorted_src_x_srcs" \ + -v src_x_hdrs="$$sorted_src_x_hdrs" \ + "$$awk_code" $(srcdir)/$(VC14_SRCTMPL) > $(VC14_SRCVCXPROJ) || { exit 1; };) diff --git a/deps-win32/curl-7.54.1/README b/deps-win32/curl-7.54.1/README new file mode 100644 index 0000000..f0b3b93 --- /dev/null +++ b/deps-win32/curl-7.54.1/README @@ -0,0 +1,49 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +README + + Curl is a command line tool for transferring data specified with URL + syntax. Find out how to use curl by reading the curl.1 man page or the + MANUAL document. Find out how to install Curl by reading the INSTALL + document. + + libcurl is the library curl is using to do its job. It is readily + available to be used by your software. Read the libcurl.3 man page to + learn how! + + You find answers to the most frequent questions we get in the FAQ document. + + Study the COPYING file for distribution terms and similar. If you distribute + curl binaries or other binaries that involve libcurl, you might enjoy the + LICENSE-MIXING document. + +CONTACT + + If you have problems, questions, ideas or suggestions, please contact us + by posting to a suitable mailing list. See https://curl.haxx.se/mail/ + + All contributors to the project are listed in the THANKS document. + +WEB SITE + + Visit the curl web site for the latest news and downloads: + + https://curl.haxx.se/ + +GIT + + To download the very latest source off the GIT server do this: + + git clone https://github.com/curl/curl.git + + (you'll get a directory named curl created, filled with the source code) + +NOTICE + + Curl contains pieces of source code that is Copyright (c) 1998, 1999 + Kungliga Tekniska Högskolan. This notice is included here to comply with the + distribution terms. diff --git a/deps-win32/curl-7.54.1/RELEASE-NOTES b/deps-win32/curl-7.54.1/RELEASE-NOTES new file mode 100644 index 0000000..532a203 --- /dev/null +++ b/deps-win32/curl-7.54.1/RELEASE-NOTES @@ -0,0 +1,219 @@ +Curl and libcurl 7.54.1 + + Public curl releases: 166 + Command line options: 207 + curl_easy_setopt() options: 245 + Public functions in libcurl: 61 + Contributors: 1571 + +This release includes the following changes: + + o curl: show the libcurl release date in --version output [32] + +This release includes the following bugfixes: + + o CVE-2017-9502: file: URL buffer overflow [65] + o openssl: fix memory leak in servercert + o tests: remove the html and PDF versions from the tarball + o mbedtls: enable NTLM (& SMB) even if MD4 support is unavailable + o typecheck-gcc: handle function pointers properly [1] + o llist: no longer uses malloc [2] + o gnutls: removed some code when --disable-verbose is configured + o lib: fix maybe-uninitialized warnings + o multi: clarify condition in curl_multi_wait [3] + o schannel: Don't treat encrypted partial record as pending data [4] + o configure: fix the -ldl check for openssl, add -lpthread check [5] + o configure: accept -Og and -Ofast GCC flags [6] + o Makefile: avoid use of GNU-specific form of $< [7] + o if2ip: fix -Wcast-align warning + o configure: stop prepending to LDFLAGS, CPPFLAGS [8] + o curl: set a 100K buffer size by default [9] + o typecheck-gcc: fix _curl_is_slist_info [10] + o nss: do not leak PKCS #11 slot while loading a key [11] + o nss: load libnssckbi.so if no other trust is specified [12] + o examples: ftpuploadfrommem.c [13] + o url: declare get_protocol_family() static [14] + o examples/cookie_interface.c: changed to example.com + o test1443: test --remote-time + o curl: use utimes instead of obsolescent utime when available + o url: fixed a memory leak on OOM while setting CURLOPT_BUFFERSIZE + o curl_rtmp: fix missing-variable-declarations warnings + o tests: fixed OOM handling of unit tests to abort test + o curl_setup: Ensure no more than one IDN lib is enabled [15] + o tool: Fix missing prototype warnings for CURL_DOES_CONVERSIONS [16] + o CURLOPT_BUFFERSIZE: 1024 bytes is now the minimum size [17] + o curl: non-boolean command line args reject --no- prefixes [18] + o telnet: Write full buffer instead of byte-by-byte [19] + o typecheck-gcc: add missing string options [20] + o typecheck-gcc: add support for CURLINFO_SOCKET [21] + o opt man pages: they all have examples now + o curl_setup_once: use SEND_QUAL_ARG2 for swrite [22] + o test557: set a known good numeric locale + o schannel: return a more specific error code for SEC_E_UNTRUSTED_ROOT + o tests/server: make string literals const + o runtests: use -R for random order [23] + o unit1305: fix compiler warning + o curl_slist_append.3: clarify a NULL input creates a new list + o tests/server: run checksrc by default in debug-builds + o tests: fix -Wcast-qual warnings + o runtests.pl: simplify the datacheck read section + o curl: remove --environment and tool_writeenv.c [24] + o buildconf: fix hang on IRIX [25] + o tftp: silence bad-function-cast warning + o asyn-thread: fix unused macro warnings + o tool_parsecfg: fix -Wcast-qual warning + o sendrecv: fix MinGW-w64 warning + o test537: use correct variable type [26] + o rand: treat fake entropy the same regardless of endianness [27] + o curl: generate the --help output [28] + o tests: removed redundant --trace-ascii arguments + o multi: assign IDs to all timers and make each timer singleton + o multi: use a fixed array of timers instead of malloc [29] + o mbedtls: Support server renegotiation request [30] + o pipeline: fix mistakenly trying to pipeline POSTs [31] + o lib510: don't write past the end of the buffer if it's too small + o CURLOPT_HTTPPROXYTUNNEL.3: clarify, add example + o SecureTransport/DarwinSSL: Implement public key pinning [33] + o curl.1: clarify --config + o curl_sasl: fix build error with CURL_DISABLE_CRYPTO_AUTH + USE_NTLM [34] + o darwinssl: Fix exception when processing a client-side certificate [35] + o curl.1: mention --oauth2-bearer's argument + o mkhelp.pl: do not add current time into curl binary [36] + o asiohiper.cpp / evhiperfifo.c: deal with negative timerfunction input [37] + o ssh: fix memory leak in disconnect due to timeout [38] + o tests: stabilize test 1034 [39] + o cmake: auto detection of CURL_CA_BUNDLE/CURL_CA_PATH [40] + o assert: avoid, use DEBUGASSERT instead [41] + o LDAP: using ldap_bind_s on Windows with methods [42] + o redirect: store the "would redirect to" URL when max redirs is reached [43] + o winbuild: fix the nghttp2 build [44] + o examples: fix -Wimplicit-fallthrough warnings + o time: fix type conversions and compiler warnings [45] + o mbedtls: fix variable shadow warning + o test557: fix ubsan runtime error due to int left shift [46] + o transfer: init the infilesize from the postfields [47] + o docs: clarify NO_PROXY further [48] + o build-wolfssl: Sync config with wolfSSL 3.11 + o curl-compilers.m4: enable -Wshift-sign-overflow for clang [49] + o example/externalsocket.c: make it use CLOSESOCKETFUNCTION too + o lib574.c: use correct callback proto + o lib583: fix compiler warning + o curl-compilers.m4: fix compiler_num for clang [50] + o typecheck-gcc.h: separate getinfo slist checks from other pointers [51] + o typecheck-gcc.h: check CURLINFO_TLS_SSL_PTR and CURLINFO_TLS_SESSION + o typecheck-gcc.h: check CURLINFO_CERTINFO [52] + o build: provide easy code coverage measuring [53] + o test1537: dedicated tests of the URL (un)escape API calls [54] + o curl_endian: remove unused functions [55] + o test1538: verify the libcurl strerror API calls + o MD(4|5): silence cast-align clang warning + o dedotdot: fixed output for ".." and "." only input [56] + o cyassl: define build macros before including ssl.h [57] + o updatemanpages.pl: error out on too old git version + o curl_sasl: fix unused-variable warning + o x509asn1: fix implicit-fallthrough warning with GCC 7 + o libtest: fix implicit-fallthrough warnings with GCC 7 + o BINDINGS: add Ring binding [58] + o curl_ntlm_core: pass unsigned char to toupper + o test1262: verify ftp download with -z for "if older than this" + o test1521: test all curl_easy_setopt options [59] + o typecheck-gcc: allow CURLOPT_STDERR to be NULL too + o metalink: remove unused printf() argument + o file: make speedcheck use current time for checks [60] + o configure: fix link with librtmp when specifying path [61] + o examples/multi-uv.c: fix deprecated symbol [62] + o cmake: Fix inconsistency regarding mbed TLS include directory [63] + o setopt: check CURLOPT_ADDRESS_SCOPE option range + o gitignore: ignore all vim swap files [64] + o urlglob: fix division by zero + o libressl: OCSP and intermediate certs workaround no longer needed [66] + +This release includes the following known bugs: + + o see docs/KNOWN_BUGS (https://curl.haxx.se/docs/knownbugs.html) + +This release would not have looked like this without help, code, reports and +advice from friends like these: + + Akhil Kedia, Alan Jenkins, Anatol Belski, Bernhard M. Wiedemann, + Brian Childs, canavan at github, Chris Carlmar, Dan Fandrich, + Daniel Stenberg, Edward Thomson, Gisle Vanem, GwanYeong Kim, + Helmut K. C. Tessarek, Joel Depooter, jonrumsey at github, Kai Engert, + Kamil Dudka, Kevin Ji, Lloyd Fournier, Mahmoud Samir Fayed, Marcel Raad, + Martin Kepplinger, Max Dymond, Michael Kaufmann, Nick Zitzmann, Paul Harris, + Phil Crump, Piotr Dobrogost, Ray Satiro, Richard Hsu, Ron Eldor, + Ryuichi KAWAMATA, Sergei Nikulov, Simon Warta, stootill at github, + Stuart Henderson, TheAssassin at github, Thomas Klausner, Travis Burtrum, + Vincas Razma, wyattoday at github, + (41 contributors) + + Thanks! (and sorry if I forgot to mention someone) + +References to bug reports and discussions on issues: + + [1] = https://curl.haxx.se/bug/?i=1403 + [2] = https://curl.haxx.se/bug/?i=1435 + [3] = https://curl.haxx.se/bug/?i=1439 + [4] = https://curl.haxx.se/bug/?i=1392 + [5] = https://curl.haxx.se/bug/?i=1427 + [6] = https://curl.haxx.se/bug/?i=1440 + [7] = https://curl.haxx.se/bug/?i=1432 + [8] = https://curl.haxx.se/bug/?i=1420 + [9] = https://curl.haxx.se/bug/?i=1446 + [10] = https://curl.haxx.se/bug/?i=1447 + [11] = https://bugzilla.redhat.com/1444860 + [12] = https://curl.haxx.se/bug/?i=1414 + [13] = https://curl.haxx.se/bug/?i=1451 + [14] = https://curl.haxx.se/mail/lib-2017-04/0127.html + [15] = https://github.com/curl/curl/issues/1441#issuecomment-297689856 + [16] = https://curl.haxx.se/bug/?i=1460 + [17] = https://curl.haxx.se/bug/?i=1449 + [18] = https://curl.haxx.se/bug/?i=1453 + [19] = https://curl.haxx.se/bug/?i=1389 + [20] = https://curl.haxx.se/bug/?i=1452 + [21] = https://curl.haxx.se/bug/?i=1452 + [22] = https://curl.haxx.se/bug/?i=1464 + [23] = https://curl.haxx.se/bug/?i=1466 + [24] = https://curl.haxx.se/bug/?i=1463 + [25] = https://curl.haxx.se/bug/?i=1471 + [26] = https://curl.haxx.se/bug/?i=1469 + [27] = https://curl.haxx.se/bug/?i=1315 + [28] = https://curl.haxx.se/bug/?i=1465 + [29] = https://curl.haxx.se/bug/?i=1472 + [30] = https://curl.haxx.se/bug/?i=1475 + [31] = https://curl.haxx.se/bug/?i=1481 + [32] = https://curl.haxx.se/bug/?i=1474 + [33] = https://curl.haxx.se/bug/?i=1400 + [34] = https://curl.haxx.se/bug/?i=1487 + [35] = https://curl.haxx.se/bug/?i=1450 + [36] = https://curl.haxx.se/bug/?i=1490 + [37] = https://curl.haxx.se/bug/?i=1253 + [38] = https://curl.haxx.se/bug/?i=1479 + [39] = https://curl.haxx.se/bug/?i=1488 + [40] = https://curl.haxx.se/bug/?i=1461 + [41] = https://curl.haxx.se/bug/?i=1504 + [42] = https://curl.haxx.se/bug/?i=878 + [43] = https://curl.haxx.se/bug/?i=1489 + [44] = https://curl.haxx.se/bug/?i=1321 + [45] = https://curl.haxx.se/bug/?i=1499 + [46] = https://curl.haxx.se/bug/?i=1516 + [47] = https://curl.haxx.se/bug/?i=1294 + [48] = https://curl.haxx.se/bug/?i=1208 + [49] = https://curl.haxx.se/bug/?i=1516 + [50] = https://curl.haxx.se/bug/?i=1522 + [51] = https://curl.haxx.se/bug/?i=1524 + [52] = https://curl.haxx.se/bug/?i=846 + [53] = https://curl.haxx.se/bug/?i=1528 + [54] = https://curl.haxx.se/bug/?i=1530 + [55] = https://curl.haxx.se/bug/?i=1529 + [56] = https://curl.haxx.se/bug/?i=1532 + [57] = https://curl.haxx.se/bug/?i=1536 + [58] = https://curl.haxx.se/bug/?i=1539 + [59] = https://curl.haxx.se/bug/?i=1543 + [60] = https://curl.haxx.se/bug/?i=1550 + [61] = https://curl.haxx.se/mail/lib-2017-06/0017.html + [62] = https://curl.haxx.se/bug/?i=1557 + [63] = https://curl.haxx.se/bug/?i=1541 + [64] = https://curl.haxx.se/bug/?i=1561 + [65] = https://curl.haxx.se/docs/adv_20170614.html + [66] = https://curl.haxx.se/mail/lib-2017-06/0038.html diff --git a/deps-win32/curl-7.54.1/acinclude.m4 b/deps-win32/curl-7.54.1/acinclude.m4 new file mode 100644 index 0000000..2abae8d --- /dev/null +++ b/deps-win32/curl-7.54.1/acinclude.m4 @@ -0,0 +1,3245 @@ +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 1998 - 2016, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +#*************************************************************************** + +dnl CURL_CHECK_DEF (SYMBOL, [INCLUDES], [SILENT]) +dnl ------------------------------------------------- +dnl Use the C preprocessor to find out if the given object-style symbol +dnl is defined and get its expansion. This macro will not use default +dnl includes even if no INCLUDES argument is given. This macro will run +dnl silently when invoked with three arguments. If the expansion would +dnl result in a set of double-quoted strings the returned expansion will +dnl actually be a single double-quoted string concatenating all them. + +AC_DEFUN([CURL_CHECK_DEF], [ + AC_REQUIRE([CURL_CPP_P])dnl + OLDCPPFLAGS=$CPPFLAGS + # CPPPFLAG comes from CURL_CPP_P + CPPFLAGS="$CPPFLAGS $CPPPFLAG" + AS_VAR_PUSHDEF([ac_HaveDef], [curl_cv_have_def_$1])dnl + AS_VAR_PUSHDEF([ac_Def], [curl_cv_def_$1])dnl + if test -z "$SED"; then + AC_MSG_ERROR([SED not set. Cannot continue without SED being set.]) + fi + if test -z "$GREP"; then + AC_MSG_ERROR([GREP not set. Cannot continue without GREP being set.]) + fi + ifelse($3,,[AC_MSG_CHECKING([for preprocessor definition of $1])]) + tmp_exp="" + AC_PREPROC_IFELSE([ + AC_LANG_SOURCE( +ifelse($2,,,[$2])[[ +#ifdef $1 +CURL_DEF_TOKEN $1 +#endif + ]]) + ],[ + tmp_exp=`eval "$ac_cpp conftest.$ac_ext" 2>/dev/null | \ + "$GREP" CURL_DEF_TOKEN 2>/dev/null | \ + "$SED" 's/.*CURL_DEF_TOKEN[[ ]][[ ]]*//' 2>/dev/null | \ + "$SED" 's/[["]][[ ]]*[["]]//g' 2>/dev/null` + if test -z "$tmp_exp" || test "$tmp_exp" = "$1"; then + tmp_exp="" + fi + ]) + if test -z "$tmp_exp"; then + AS_VAR_SET(ac_HaveDef, no) + ifelse($3,,[AC_MSG_RESULT([no])]) + else + AS_VAR_SET(ac_HaveDef, yes) + AS_VAR_SET(ac_Def, $tmp_exp) + ifelse($3,,[AC_MSG_RESULT([$tmp_exp])]) + fi + AS_VAR_POPDEF([ac_Def])dnl + AS_VAR_POPDEF([ac_HaveDef])dnl + CPPFLAGS=$OLDCPPFLAGS +]) + + +dnl CURL_CHECK_DEF_CC (SYMBOL, [INCLUDES], [SILENT]) +dnl ------------------------------------------------- +dnl Use the C compiler to find out only if the given symbol is defined +dnl or not, this can not find out its expansion. This macro will not use +dnl default includes even if no INCLUDES argument is given. This macro +dnl will run silently when invoked with three arguments. + +AC_DEFUN([CURL_CHECK_DEF_CC], [ + AS_VAR_PUSHDEF([ac_HaveDef], [curl_cv_have_def_$1])dnl + ifelse($3,,[AC_MSG_CHECKING([for compiler definition of $1])]) + AC_COMPILE_IFELSE([ + AC_LANG_SOURCE( +ifelse($2,,,[$2])[[ +int main (void) +{ +#ifdef $1 + return 0; +#else + force compilation error +#endif +} + ]]) + ],[ + tst_symbol_defined="yes" + ],[ + tst_symbol_defined="no" + ]) + if test "$tst_symbol_defined" = "yes"; then + AS_VAR_SET(ac_HaveDef, yes) + ifelse($3,,[AC_MSG_RESULT([yes])]) + else + AS_VAR_SET(ac_HaveDef, no) + ifelse($3,,[AC_MSG_RESULT([no])]) + fi + AS_VAR_POPDEF([ac_HaveDef])dnl +]) + + +dnl CURL_CHECK_LIB_XNET +dnl ------------------------------------------------- +dnl Verify if X/Open network library is required. + +AC_DEFUN([CURL_CHECK_LIB_XNET], [ + AC_MSG_CHECKING([if X/Open network library is required]) + tst_lib_xnet_required="no" + AC_COMPILE_IFELSE([ + AC_LANG_SOURCE([[ +int main (void) +{ +#if defined(__hpux) && defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600) + return 0; +#elif defined(__hpux) && defined(_XOPEN_SOURCE_EXTENDED) + return 0; +#else + force compilation error +#endif +} + ]]) + ],[ + tst_lib_xnet_required="yes" + LIBS="-lxnet $LIBS" + ]) + AC_MSG_RESULT([$tst_lib_xnet_required]) +]) + + +dnl CURL_CHECK_AIX_ALL_SOURCE +dnl ------------------------------------------------- +dnl Provides a replacement of traditional AC_AIX with +dnl an uniform behaviour across all autoconf versions, +dnl and with our own placement rules. + +AC_DEFUN([CURL_CHECK_AIX_ALL_SOURCE], [ + AH_VERBATIM([_ALL_SOURCE], + [/* Define to 1 if OS is AIX. */ +#ifndef _ALL_SOURCE +# undef _ALL_SOURCE +#endif]) + AC_BEFORE([$0], [AC_SYS_LARGEFILE])dnl + AC_BEFORE([$0], [CURL_CONFIGURE_REENTRANT])dnl + AC_BEFORE([$0], [CURL_CONFIGURE_PULL_SYS_POLL])dnl + AC_MSG_CHECKING([if OS is AIX (to define _ALL_SOURCE)]) + AC_EGREP_CPP([yes_this_is_aix],[ +#ifdef _AIX + yes_this_is_aix +#endif + ],[ + AC_MSG_RESULT([yes]) + AC_DEFINE(_ALL_SOURCE) + ],[ + AC_MSG_RESULT([no]) + ]) +]) + + +dnl CURL_CHECK_HEADER_WINDOWS +dnl ------------------------------------------------- +dnl Check for compilable and valid windows.h header + +AC_DEFUN([CURL_CHECK_HEADER_WINDOWS], [ + AC_CACHE_CHECK([for windows.h], [curl_cv_header_windows_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include + ]],[[ +#if defined(__CYGWIN__) || defined(__CEGCC__) + HAVE_WINDOWS_H shall not be defined. +#else + int dummy=2*WINVER; +#endif + ]]) + ],[ + curl_cv_header_windows_h="yes" + ],[ + curl_cv_header_windows_h="no" + ]) + ]) + case "$curl_cv_header_windows_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_WINDOWS_H, 1, + [Define to 1 if you have the windows.h header file.]) + AC_DEFINE_UNQUOTED(WIN32_LEAN_AND_MEAN, 1, + [Define to avoid automatic inclusion of winsock.h]) + ;; + esac +]) + + +dnl CURL_CHECK_NATIVE_WINDOWS +dnl ------------------------------------------------- +dnl Check if building a native Windows target + +AC_DEFUN([CURL_CHECK_NATIVE_WINDOWS], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINDOWS])dnl + AC_CACHE_CHECK([whether build target is a native Windows one], [curl_cv_native_windows], [ + if test "$curl_cv_header_windows_h" = "no"; then + curl_cv_native_windows="no" + else + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ +#if defined(__MINGW32__) || defined(__MINGW32CE__) || \ + (defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))) + int dummy=1; +#else + Not a native Windows build target. +#endif + ]]) + ],[ + curl_cv_native_windows="yes" + ],[ + curl_cv_native_windows="no" + ]) + fi + ]) + AM_CONDITIONAL(DOING_NATIVE_WINDOWS, test "x$curl_cv_native_windows" = xyes) +]) + + +dnl CURL_CHECK_HEADER_WINSOCK +dnl ------------------------------------------------- +dnl Check for compilable and valid winsock.h header + +AC_DEFUN([CURL_CHECK_HEADER_WINSOCK], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINDOWS])dnl + AC_CACHE_CHECK([for winsock.h], [curl_cv_header_winsock_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include + ]],[[ +#if defined(__CYGWIN__) || defined(__CEGCC__) + HAVE_WINSOCK_H shall not be defined. +#else + int dummy=WSACleanup(); +#endif + ]]) + ],[ + curl_cv_header_winsock_h="yes" + ],[ + curl_cv_header_winsock_h="no" + ]) + ]) + case "$curl_cv_header_winsock_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_WINSOCK_H, 1, + [Define to 1 if you have the winsock.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_WINSOCK2 +dnl ------------------------------------------------- +dnl Check for compilable and valid winsock2.h header + +AC_DEFUN([CURL_CHECK_HEADER_WINSOCK2], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINDOWS])dnl + AC_CACHE_CHECK([for winsock2.h], [curl_cv_header_winsock2_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include + ]],[[ +#if defined(__CYGWIN__) || defined(__CEGCC__) || defined(__MINGW32CE__) + HAVE_WINSOCK2_H shall not be defined. +#else + int dummy=2*IPPROTO_ESP; +#endif + ]]) + ],[ + curl_cv_header_winsock2_h="yes" + ],[ + curl_cv_header_winsock2_h="no" + ]) + ]) + case "$curl_cv_header_winsock2_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_WINSOCK2_H, 1, + [Define to 1 if you have the winsock2.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_WS2TCPIP +dnl ------------------------------------------------- +dnl Check for compilable and valid ws2tcpip.h header + +AC_DEFUN([CURL_CHECK_HEADER_WS2TCPIP], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK2])dnl + AC_CACHE_CHECK([for ws2tcpip.h], [curl_cv_header_ws2tcpip_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#include + ]],[[ +#if defined(__CYGWIN__) || defined(__CEGCC__) || defined(__MINGW32CE__) + HAVE_WS2TCPIP_H shall not be defined. +#else + int dummy=2*IP_PKTINFO; +#endif + ]]) + ],[ + curl_cv_header_ws2tcpip_h="yes" + ],[ + curl_cv_header_ws2tcpip_h="no" + ]) + ]) + case "$curl_cv_header_ws2tcpip_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_WS2TCPIP_H, 1, + [Define to 1 if you have the ws2tcpip.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_WINLDAP +dnl ------------------------------------------------- +dnl Check for compilable and valid winldap.h header + +AC_DEFUN([CURL_CHECK_HEADER_WINLDAP], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINDOWS])dnl + AC_CACHE_CHECK([for winldap.h], [curl_cv_header_winldap_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#endif +#include + ]],[[ +#if defined(__CYGWIN__) || defined(__CEGCC__) + HAVE_WINLDAP_H shall not be defined. +#else + LDAP *ldp = ldap_init("dummy", LDAP_PORT); + ULONG res = ldap_unbind(ldp); +#endif + ]]) + ],[ + curl_cv_header_winldap_h="yes" + ],[ + curl_cv_header_winldap_h="no" + ]) + ]) + case "$curl_cv_header_winldap_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_WINLDAP_H, 1, + [Define to 1 if you have the winldap.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_WINBER +dnl ------------------------------------------------- +dnl Check for compilable and valid winber.h header + +AC_DEFUN([CURL_CHECK_HEADER_WINBER], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINLDAP])dnl + AC_CACHE_CHECK([for winber.h], [curl_cv_header_winber_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#endif +#include +#include + ]],[[ +#if defined(__CYGWIN__) || defined(__CEGCC__) + HAVE_WINBER_H shall not be defined. +#else + BERVAL *bvp = NULL; + BerElement *bep = ber_init(bvp); + ber_free(bep, 1); +#endif + ]]) + ],[ + curl_cv_header_winber_h="yes" + ],[ + curl_cv_header_winber_h="no" + ]) + ]) + case "$curl_cv_header_winber_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_WINBER_H, 1, + [Define to 1 if you have the winber.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_LBER +dnl ------------------------------------------------- +dnl Check for compilable and valid lber.h header, +dnl and check if it is needed even with ldap.h + +AC_DEFUN([CURL_CHECK_HEADER_LBER], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINDOWS])dnl + AC_CACHE_CHECK([for lber.h], [curl_cv_header_lber_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#endif +#ifndef NULL +#define NULL (void *)0 +#endif +#include + ]],[[ + BerValue *bvp = NULL; + BerElement *bep = ber_init(bvp); + ber_free(bep, 1); + ]]) + ],[ + curl_cv_header_lber_h="yes" + ],[ + curl_cv_header_lber_h="no" + ]) + ]) + if test "$curl_cv_header_lber_h" = "yes"; then + AC_DEFINE_UNQUOTED(HAVE_LBER_H, 1, + [Define to 1 if you have the lber.h header file.]) + # + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#endif +#ifndef NULL +#define NULL (void *)0 +#endif +#ifndef LDAP_DEPRECATED +#define LDAP_DEPRECATED 1 +#endif +#include + ]],[[ + BerValue *bvp = NULL; + BerElement *bep = ber_init(bvp); + ber_free(bep, 1); + ]]) + ],[ + curl_cv_need_header_lber_h="no" + ],[ + curl_cv_need_header_lber_h="yes" + ]) + # + case "$curl_cv_need_header_lber_h" in + yes) + AC_DEFINE_UNQUOTED(NEED_LBER_H, 1, + [Define to 1 if you need the lber.h header file even with ldap.h]) + ;; + esac + fi +]) + + +dnl CURL_CHECK_HEADER_LDAP +dnl ------------------------------------------------- +dnl Check for compilable and valid ldap.h header + +AC_DEFUN([CURL_CHECK_HEADER_LDAP], [ + AC_REQUIRE([CURL_CHECK_HEADER_LBER])dnl + AC_CACHE_CHECK([for ldap.h], [curl_cv_header_ldap_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#endif +#ifndef LDAP_DEPRECATED +#define LDAP_DEPRECATED 1 +#endif +#ifdef NEED_LBER_H +#include +#endif +#include + ]],[[ + LDAP *ldp = ldap_init("dummy", LDAP_PORT); + int res = ldap_unbind(ldp); + ]]) + ],[ + curl_cv_header_ldap_h="yes" + ],[ + curl_cv_header_ldap_h="no" + ]) + ]) + case "$curl_cv_header_ldap_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_LDAP_H, 1, + [Define to 1 if you have the ldap.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_LDAP_SSL +dnl ------------------------------------------------- +dnl Check for compilable and valid ldap_ssl.h header + +AC_DEFUN([CURL_CHECK_HEADER_LDAP_SSL], [ + AC_REQUIRE([CURL_CHECK_HEADER_LDAP])dnl + AC_CACHE_CHECK([for ldap_ssl.h], [curl_cv_header_ldap_ssl_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#endif +#ifndef LDAP_DEPRECATED +#define LDAP_DEPRECATED 1 +#endif +#ifdef NEED_LBER_H +#include +#endif +#ifdef HAVE_LDAP_H +#include +#endif +#include + ]],[[ + LDAP *ldp = ldapssl_init("dummy", LDAPS_PORT, 1); + ]]) + ],[ + curl_cv_header_ldap_ssl_h="yes" + ],[ + curl_cv_header_ldap_ssl_h="no" + ]) + ]) + case "$curl_cv_header_ldap_ssl_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_LDAP_SSL_H, 1, + [Define to 1 if you have the ldap_ssl.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_HEADER_LDAPSSL +dnl ------------------------------------------------- +dnl Check for compilable and valid ldapssl.h header + +AC_DEFUN([CURL_CHECK_HEADER_LDAPSSL], [ + AC_REQUIRE([CURL_CHECK_HEADER_LDAP])dnl + AC_CACHE_CHECK([for ldapssl.h], [curl_cv_header_ldapssl_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#endif +#ifndef NULL +#define NULL (void *)0 +#endif +#ifndef LDAP_DEPRECATED +#define LDAP_DEPRECATED 1 +#endif +#ifdef NEED_LBER_H +#include +#endif +#ifdef HAVE_LDAP_H +#include +#endif +#include + ]],[[ + char *cert_label = NULL; + LDAP *ldp = ldap_ssl_init("dummy", LDAPS_PORT, cert_label); + ]]) + ],[ + curl_cv_header_ldapssl_h="yes" + ],[ + curl_cv_header_ldapssl_h="no" + ]) + ]) + case "$curl_cv_header_ldapssl_h" in + yes) + AC_DEFINE_UNQUOTED(HAVE_LDAPSSL_H, 1, + [Define to 1 if you have the ldapssl.h header file.]) + ;; + esac +]) + + +dnl CURL_CHECK_LIBS_WINLDAP +dnl ------------------------------------------------- +dnl Check for libraries needed for WINLDAP support, +dnl and prepended to LIBS any needed libraries. +dnl This macro can take an optional parameter with a +dnl white space separated list of libraries to check +dnl before the WINLDAP default ones. + +AC_DEFUN([CURL_CHECK_LIBS_WINLDAP], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINBER])dnl + # + AC_MSG_CHECKING([for WINLDAP libraries]) + # + u_libs="" + # + ifelse($1,,,[ + for x_lib in $1; do + case "$x_lib" in + -l*) + l_lib="$x_lib" + ;; + *) + l_lib="-l$x_lib" + ;; + esac + if test -z "$u_libs"; then + u_libs="$l_lib" + else + u_libs="$u_libs $l_lib" + fi + done + ]) + # + curl_cv_save_LIBS="$LIBS" + curl_cv_ldap_LIBS="unknown" + # + for x_nlibs in '' "$u_libs" \ + '-lwldap32' ; do + if test "$curl_cv_ldap_LIBS" = "unknown"; then + if test -z "$x_nlibs"; then + LIBS="$curl_cv_save_LIBS" + else + LIBS="$x_nlibs $curl_cv_save_LIBS" + fi + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINLDAP_H +#include +#endif +#ifdef HAVE_WINBER_H +#include +#endif +#endif + ]],[[ + BERVAL *bvp = NULL; + BerElement *bep = ber_init(bvp); + LDAP *ldp = ldap_init("dummy", LDAP_PORT); + ULONG res = ldap_unbind(ldp); + ber_free(bep, 1); + ]]) + ],[ + curl_cv_ldap_LIBS="$x_nlibs" + ]) + fi + done + # + LIBS="$curl_cv_save_LIBS" + # + case X-"$curl_cv_ldap_LIBS" in + X-unknown) + AC_MSG_RESULT([cannot find WINLDAP libraries]) + ;; + X-) + AC_MSG_RESULT([no additional lib required]) + ;; + *) + if test -z "$curl_cv_save_LIBS"; then + LIBS="$curl_cv_ldap_LIBS" + else + LIBS="$curl_cv_ldap_LIBS $curl_cv_save_LIBS" + fi + AC_MSG_RESULT([$curl_cv_ldap_LIBS]) + ;; + esac + # +]) + + +dnl CURL_CHECK_LIBS_LDAP +dnl ------------------------------------------------- +dnl Check for libraries needed for LDAP support, +dnl and prepended to LIBS any needed libraries. +dnl This macro can take an optional parameter with a +dnl white space separated list of libraries to check +dnl before the default ones. + +AC_DEFUN([CURL_CHECK_LIBS_LDAP], [ + AC_REQUIRE([CURL_CHECK_HEADER_LDAP])dnl + # + AC_MSG_CHECKING([for LDAP libraries]) + # + u_libs="" + # + ifelse($1,,,[ + for x_lib in $1; do + case "$x_lib" in + -l*) + l_lib="$x_lib" + ;; + *) + l_lib="-l$x_lib" + ;; + esac + if test -z "$u_libs"; then + u_libs="$l_lib" + else + u_libs="$u_libs $l_lib" + fi + done + ]) + # + curl_cv_save_LIBS="$LIBS" + curl_cv_ldap_LIBS="unknown" + # + for x_nlibs in '' "$u_libs" \ + '-lldap' \ + '-llber -lldap' \ + '-lldap -llber' \ + '-lldapssl -lldapx -lldapsdk' \ + '-lldapsdk -lldapx -lldapssl' ; do + if test "$curl_cv_ldap_LIBS" = "unknown"; then + if test -z "$x_nlibs"; then + LIBS="$curl_cv_save_LIBS" + else + LIBS="$x_nlibs $curl_cv_save_LIBS" + fi + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#endif +#ifndef NULL +#define NULL (void *)0 +#endif +#ifndef LDAP_DEPRECATED +#define LDAP_DEPRECATED 1 +#endif +#ifdef NEED_LBER_H +#include +#endif +#ifdef HAVE_LDAP_H +#include +#endif + ]],[[ + BerValue *bvp = NULL; + BerElement *bep = ber_init(bvp); + LDAP *ldp = ldap_init("dummy", LDAP_PORT); + int res = ldap_unbind(ldp); + ber_free(bep, 1); + ]]) + ],[ + curl_cv_ldap_LIBS="$x_nlibs" + ]) + fi + done + # + LIBS="$curl_cv_save_LIBS" + # + case X-"$curl_cv_ldap_LIBS" in + X-unknown) + AC_MSG_RESULT([cannot find LDAP libraries]) + ;; + X-) + AC_MSG_RESULT([no additional lib required]) + ;; + *) + if test -z "$curl_cv_save_LIBS"; then + LIBS="$curl_cv_ldap_LIBS" + else + LIBS="$curl_cv_ldap_LIBS $curl_cv_save_LIBS" + fi + AC_MSG_RESULT([$curl_cv_ldap_LIBS]) + ;; + esac + # +]) + + +dnl CURL_CHECK_HEADER_MALLOC +dnl ------------------------------------------------- +dnl Check for compilable and valid malloc.h header, +dnl and check if it is needed even with stdlib.h + +AC_DEFUN([CURL_CHECK_HEADER_MALLOC], [ + AC_CACHE_CHECK([for malloc.h], [curl_cv_header_malloc_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include + ]],[[ + void *p = malloc(10); + void *q = calloc(10,10); + free(p); + free(q); + ]]) + ],[ + curl_cv_header_malloc_h="yes" + ],[ + curl_cv_header_malloc_h="no" + ]) + ]) + if test "$curl_cv_header_malloc_h" = "yes"; then + AC_DEFINE_UNQUOTED(HAVE_MALLOC_H, 1, + [Define to 1 if you have the malloc.h header file.]) + # + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include + ]],[[ + void *p = malloc(10); + void *q = calloc(10,10); + free(p); + free(q); + ]]) + ],[ + curl_cv_need_header_malloc_h="no" + ],[ + curl_cv_need_header_malloc_h="yes" + ]) + # + case "$curl_cv_need_header_malloc_h" in + yes) + AC_DEFINE_UNQUOTED(NEED_MALLOC_H, 1, + [Define to 1 if you need the malloc.h header file even with stdlib.h]) + ;; + esac + fi +]) + + +dnl CURL_CHECK_HEADER_MEMORY +dnl ------------------------------------------------- +dnl Check for compilable and valid memory.h header, +dnl and check if it is needed even with stdlib.h for +dnl memory related functions. + +AC_DEFUN([CURL_CHECK_HEADER_MEMORY], [ + AC_CACHE_CHECK([for memory.h], [curl_cv_header_memory_h], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include + ]],[[ + void *p = malloc(10); + void *q = calloc(10,10); + free(p); + free(q); + ]]) + ],[ + curl_cv_header_memory_h="yes" + ],[ + curl_cv_header_memory_h="no" + ]) + ]) + if test "$curl_cv_header_memory_h" = "yes"; then + AC_DEFINE_UNQUOTED(HAVE_MEMORY_H, 1, + [Define to 1 if you have the memory.h header file.]) + # + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include + ]],[[ + void *p = malloc(10); + void *q = calloc(10,10); + free(p); + free(q); + ]]) + ],[ + curl_cv_need_header_memory_h="no" + ],[ + curl_cv_need_header_memory_h="yes" + ]) + # + case "$curl_cv_need_header_memory_h" in + yes) + AC_DEFINE_UNQUOTED(NEED_MEMORY_H, 1, + [Define to 1 if you need the memory.h header file even with stdlib.h]) + ;; + esac + fi +]) + + +dnl CURL_CHECK_FUNC_GETNAMEINFO +dnl ------------------------------------------------- +dnl Test if the getnameinfo function is available, +dnl and check the types of five of its arguments. +dnl If the function succeeds HAVE_GETNAMEINFO will be +dnl defined, defining the types of the arguments in +dnl GETNAMEINFO_TYPE_ARG1, GETNAMEINFO_TYPE_ARG2, +dnl GETNAMEINFO_TYPE_ARG46 and GETNAMEINFO_TYPE_ARG7, +dnl and also defining the type qualifier of first +dnl argument in GETNAMEINFO_QUAL_ARG1. + +AC_DEFUN([CURL_CHECK_FUNC_GETNAMEINFO], [ + AC_REQUIRE([CURL_CHECK_HEADER_WS2TCPIP])dnl + AC_CHECK_HEADERS(sys/types.h sys/socket.h netdb.h) + # + AC_MSG_CHECKING([for getnameinfo]) + AC_LINK_IFELSE([ + AC_LANG_FUNC_LINK_TRY([getnameinfo]) + ],[ + AC_MSG_RESULT([yes]) + curl_cv_getnameinfo="yes" + ],[ + AC_MSG_RESULT([no]) + curl_cv_getnameinfo="no" + ]) + # + if test "$curl_cv_getnameinfo" != "yes"; then + AC_MSG_CHECKING([deeper for getnameinfo]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ + getnameinfo(); + ]]) + ],[ + AC_MSG_RESULT([yes]) + curl_cv_getnameinfo="yes" + ],[ + AC_MSG_RESULT([but still no]) + curl_cv_getnameinfo="no" + ]) + fi + # + if test "$curl_cv_getnameinfo" != "yes"; then + AC_MSG_CHECKING([deeper and deeper for getnameinfo]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#ifdef HAVE_WS2TCPIP_H +#include +#endif +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#endif + ]],[[ + getnameinfo(0, 0, 0, 0, 0, 0, 0); + ]]) + ],[ + AC_MSG_RESULT([yes]) + curl_cv_getnameinfo="yes" + ],[ + AC_MSG_RESULT([but still no]) + curl_cv_getnameinfo="no" + ]) + fi + # + if test "$curl_cv_getnameinfo" = "yes"; then + AC_CACHE_CHECK([types of arguments for getnameinfo], + [curl_cv_func_getnameinfo_args], [ + curl_cv_func_getnameinfo_args="unknown" + for gni_arg1 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do + for gni_arg2 in 'socklen_t' 'size_t' 'int'; do + for gni_arg46 in 'size_t' 'int' 'socklen_t' 'unsigned int' 'DWORD'; do + for gni_arg7 in 'int' 'unsigned int'; do + if test "$curl_cv_func_getnameinfo_args" = "unknown"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#if (!defined(_WIN32_WINNT)) || (_WIN32_WINNT < 0x0501) +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0501 +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#ifdef HAVE_WS2TCPIP_H +#include +#endif +#endif +#define GNICALLCONV WSAAPI +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#define GNICALLCONV +#endif + extern int GNICALLCONV getnameinfo($gni_arg1, $gni_arg2, + char *, $gni_arg46, + char *, $gni_arg46, + $gni_arg7); + ]],[[ + $gni_arg2 salen=0; + $gni_arg46 hostlen=0; + $gni_arg46 servlen=0; + $gni_arg7 flags=0; + int res = getnameinfo(0, salen, 0, hostlen, 0, servlen, flags); + ]]) + ],[ + curl_cv_func_getnameinfo_args="$gni_arg1,$gni_arg2,$gni_arg46,$gni_arg7" + ]) + fi + done + done + done + done + ]) # AC-CACHE-CHECK + if test "$curl_cv_func_getnameinfo_args" = "unknown"; then + AC_MSG_WARN([Cannot find proper types to use for getnameinfo args]) + AC_MSG_WARN([HAVE_GETNAMEINFO will not be defined]) + else + gni_prev_IFS=$IFS; IFS=',' + set dummy `echo "$curl_cv_func_getnameinfo_args" | sed 's/\*/\*/g'` + IFS=$gni_prev_IFS + shift + # + gni_qual_type_arg1=$[1] + # + AC_DEFINE_UNQUOTED(GETNAMEINFO_TYPE_ARG2, $[2], + [Define to the type of arg 2 for getnameinfo.]) + AC_DEFINE_UNQUOTED(GETNAMEINFO_TYPE_ARG46, $[3], + [Define to the type of args 4 and 6 for getnameinfo.]) + AC_DEFINE_UNQUOTED(GETNAMEINFO_TYPE_ARG7, $[4], + [Define to the type of arg 7 for getnameinfo.]) + # + prev_sh_opts=$- + # + case $prev_sh_opts in + *f*) + ;; + *) + set -f + ;; + esac + # + case "$gni_qual_type_arg1" in + const*) + gni_qual_arg1=const + gni_type_arg1=`echo $gni_qual_type_arg1 | sed 's/^const //'` + ;; + *) + gni_qual_arg1= + gni_type_arg1=$gni_qual_type_arg1 + ;; + esac + # + AC_DEFINE_UNQUOTED(GETNAMEINFO_QUAL_ARG1, $gni_qual_arg1, + [Define to the type qualifier of arg 1 for getnameinfo.]) + AC_DEFINE_UNQUOTED(GETNAMEINFO_TYPE_ARG1, $gni_type_arg1, + [Define to the type of arg 1 for getnameinfo.]) + # + case $prev_sh_opts in + *f*) + ;; + *) + set +f + ;; + esac + # + AC_DEFINE_UNQUOTED(HAVE_GETNAMEINFO, 1, + [Define to 1 if you have the getnameinfo function.]) + curl_cv_func_getnameinfo="yes" + fi + fi +]) + + +dnl TYPE_SOCKADDR_STORAGE +dnl ------------------------------------------------- +dnl Check for struct sockaddr_storage. Most IPv6-enabled +dnl hosts have it, but AIX 4.3 is one known exception. + +AC_DEFUN([TYPE_SOCKADDR_STORAGE], +[ + AC_CHECK_TYPE([struct sockaddr_storage], + AC_DEFINE(HAVE_STRUCT_SOCKADDR_STORAGE, 1, + [if struct sockaddr_storage is defined]), , + [ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#endif + ]) +]) + + +dnl CURL_CHECK_NI_WITHSCOPEID +dnl ------------------------------------------------- +dnl Check for working NI_WITHSCOPEID in getnameinfo() + +AC_DEFUN([CURL_CHECK_NI_WITHSCOPEID], [ + AC_REQUIRE([CURL_CHECK_FUNC_GETNAMEINFO])dnl + AC_REQUIRE([TYPE_SOCKADDR_STORAGE])dnl + AC_CHECK_HEADERS(stdio.h sys/types.h sys/socket.h \ + netdb.h netinet/in.h arpa/inet.h) + # + AC_CACHE_CHECK([for working NI_WITHSCOPEID], + [curl_cv_working_ni_withscopeid], [ + AC_RUN_IFELSE([ + AC_LANG_PROGRAM([[ +#ifdef HAVE_STDLIB_H +#include +#endif +#ifdef HAVE_STDIO_H +#include +#endif +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif + ]],[[ +#if defined(NI_WITHSCOPEID) && defined(HAVE_GETNAMEINFO) +#ifdef HAVE_STRUCT_SOCKADDR_STORAGE + struct sockaddr_storage sa; +#else + unsigned char sa[256]; +#endif + char hostbuf[NI_MAXHOST]; + int rc; + GETNAMEINFO_TYPE_ARG2 salen = (GETNAMEINFO_TYPE_ARG2)sizeof(sa); + GETNAMEINFO_TYPE_ARG46 hostlen = (GETNAMEINFO_TYPE_ARG46)sizeof(hostbuf); + GETNAMEINFO_TYPE_ARG7 flags = NI_NUMERICHOST | NI_NUMERICSERV | NI_WITHSCOPEID; + int fd = socket(AF_INET6, SOCK_STREAM, 0); + if(fd < 0) { + perror("socket()"); + return 1; /* Error creating socket */ + } + rc = getsockname(fd, (GETNAMEINFO_TYPE_ARG1)&sa, &salen); + if(rc) { + perror("getsockname()"); + return 2; /* Error retrieving socket name */ + } + rc = getnameinfo((GETNAMEINFO_TYPE_ARG1)&sa, salen, hostbuf, hostlen, NULL, 0, flags); + if(rc) { + printf("rc = %s\n", gai_strerror(rc)); + return 3; /* Error translating socket address */ + } + return 0; /* Ok, NI_WITHSCOPEID works */ +#else + return 4; /* Error, NI_WITHSCOPEID not defined or no getnameinfo() */ +#endif + ]]) # AC-LANG-PROGRAM + ],[ + # Exit code == 0. Program worked. + curl_cv_working_ni_withscopeid="yes" + ],[ + # Exit code != 0. Program failed. + curl_cv_working_ni_withscopeid="no" + ],[ + # Program is not run when cross-compiling. So we assume + # NI_WITHSCOPEID will work if we are able to compile it. + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include +#include +#include + ]],[[ + unsigned int dummy= NI_NUMERICHOST | NI_NUMERICSERV | NI_WITHSCOPEID; + ]]) + ],[ + curl_cv_working_ni_withscopeid="yes" + ],[ + curl_cv_working_ni_withscopeid="no" + ]) # AC-COMPILE-IFELSE + ]) # AC-RUN-IFELSE + ]) # AC-CACHE-CHECK + case "$curl_cv_working_ni_withscopeid" in + yes) + AC_DEFINE(HAVE_NI_WITHSCOPEID, 1, + [Define to 1 if NI_WITHSCOPEID exists and works.]) + ;; + esac +]) + + +dnl CURL_CHECK_FUNC_RECV +dnl ------------------------------------------------- +dnl Test if the socket recv() function is available, +dnl and check its return type and the types of its +dnl arguments. If the function succeeds HAVE_RECV +dnl will be defined, defining the types of the arguments +dnl in RECV_TYPE_ARG1, RECV_TYPE_ARG2, RECV_TYPE_ARG3 +dnl and RECV_TYPE_ARG4, defining the type of the function +dnl return value in RECV_TYPE_RETV. + +AC_DEFUN([CURL_CHECK_FUNC_RECV], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK])dnl + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK2])dnl + AC_CHECK_HEADERS(sys/types.h sys/socket.h) + # + AC_MSG_CHECKING([for recv]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#endif + ]],[[ + recv(0, 0, 0, 0); + ]]) + ],[ + AC_MSG_RESULT([yes]) + curl_cv_recv="yes" + ],[ + AC_MSG_RESULT([no]) + curl_cv_recv="no" + ]) + # + if test "$curl_cv_recv" = "yes"; then + AC_CACHE_CHECK([types of args and return type for recv], + [curl_cv_func_recv_args], [ + curl_cv_func_recv_args="unknown" + for recv_retv in 'int' 'ssize_t'; do + for recv_arg1 in 'int' 'ssize_t' 'SOCKET'; do + for recv_arg2 in 'char *' 'void *'; do + for recv_arg3 in 'size_t' 'int' 'socklen_t' 'unsigned int'; do + for recv_arg4 in 'int' 'unsigned int'; do + if test "$curl_cv_func_recv_args" = "unknown"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#define RECVCALLCONV PASCAL +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#define RECVCALLCONV +#endif + extern $recv_retv RECVCALLCONV + recv($recv_arg1, $recv_arg2, $recv_arg3, $recv_arg4); + ]],[[ + $recv_arg1 s=0; + $recv_arg2 buf=0; + $recv_arg3 len=0; + $recv_arg4 flags=0; + $recv_retv res = recv(s, buf, len, flags); + ]]) + ],[ + curl_cv_func_recv_args="$recv_arg1,$recv_arg2,$recv_arg3,$recv_arg4,$recv_retv" + ]) + fi + done + done + done + done + done + ]) # AC-CACHE-CHECK + if test "$curl_cv_func_recv_args" = "unknown"; then + AC_MSG_ERROR([Cannot find proper types to use for recv args]) + else + recv_prev_IFS=$IFS; IFS=',' + set dummy `echo "$curl_cv_func_recv_args" | sed 's/\*/\*/g'` + IFS=$recv_prev_IFS + shift + # + AC_DEFINE_UNQUOTED(RECV_TYPE_ARG1, $[1], + [Define to the type of arg 1 for recv.]) + AC_DEFINE_UNQUOTED(RECV_TYPE_ARG2, $[2], + [Define to the type of arg 2 for recv.]) + AC_DEFINE_UNQUOTED(RECV_TYPE_ARG3, $[3], + [Define to the type of arg 3 for recv.]) + AC_DEFINE_UNQUOTED(RECV_TYPE_ARG4, $[4], + [Define to the type of arg 4 for recv.]) + AC_DEFINE_UNQUOTED(RECV_TYPE_RETV, $[5], + [Define to the function return type for recv.]) + # + AC_DEFINE_UNQUOTED(HAVE_RECV, 1, + [Define to 1 if you have the recv function.]) + curl_cv_func_recv="yes" + fi + else + AC_MSG_ERROR([Unable to link function recv]) + fi +]) + + +dnl CURL_CHECK_FUNC_SEND +dnl ------------------------------------------------- +dnl Test if the socket send() function is available, +dnl and check its return type and the types of its +dnl arguments. If the function succeeds HAVE_SEND +dnl will be defined, defining the types of the arguments +dnl in SEND_TYPE_ARG1, SEND_TYPE_ARG2, SEND_TYPE_ARG3 +dnl and SEND_TYPE_ARG4, defining the type of the function +dnl return value in SEND_TYPE_RETV, and also defining the +dnl type qualifier of second argument in SEND_QUAL_ARG2. + +AC_DEFUN([CURL_CHECK_FUNC_SEND], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK])dnl + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK2])dnl + AC_CHECK_HEADERS(sys/types.h sys/socket.h) + # + AC_MSG_CHECKING([for send]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#endif + ]],[[ + send(0, 0, 0, 0); + ]]) + ],[ + AC_MSG_RESULT([yes]) + curl_cv_send="yes" + ],[ + AC_MSG_RESULT([no]) + curl_cv_send="no" + ]) + # + if test "$curl_cv_send" = "yes"; then + AC_CACHE_CHECK([types of args and return type for send], + [curl_cv_func_send_args], [ + curl_cv_func_send_args="unknown" + for send_retv in 'int' 'ssize_t'; do + for send_arg1 in 'int' 'ssize_t' 'SOCKET'; do + for send_arg2 in 'char *' 'void *' 'const char *' 'const void *'; do + for send_arg3 in 'size_t' 'int' 'socklen_t' 'unsigned int'; do + for send_arg4 in 'int' 'unsigned int'; do + if test "$curl_cv_func_send_args" = "unknown"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#define SENDCALLCONV PASCAL +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#define SENDCALLCONV +#endif + extern $send_retv SENDCALLCONV + send($send_arg1, $send_arg2, $send_arg3, $send_arg4); + ]],[[ + $send_arg1 s=0; + $send_arg3 len=0; + $send_arg4 flags=0; + $send_retv res = send(s, 0, len, flags); + ]]) + ],[ + curl_cv_func_send_args="$send_arg1,$send_arg2,$send_arg3,$send_arg4,$send_retv" + ]) + fi + done + done + done + done + done + ]) # AC-CACHE-CHECK + if test "$curl_cv_func_send_args" = "unknown"; then + AC_MSG_ERROR([Cannot find proper types to use for send args]) + else + send_prev_IFS=$IFS; IFS=',' + set dummy `echo "$curl_cv_func_send_args" | sed 's/\*/\*/g'` + IFS=$send_prev_IFS + shift + # + send_qual_type_arg2=$[2] + # + AC_DEFINE_UNQUOTED(SEND_TYPE_ARG1, $[1], + [Define to the type of arg 1 for send.]) + AC_DEFINE_UNQUOTED(SEND_TYPE_ARG3, $[3], + [Define to the type of arg 3 for send.]) + AC_DEFINE_UNQUOTED(SEND_TYPE_ARG4, $[4], + [Define to the type of arg 4 for send.]) + AC_DEFINE_UNQUOTED(SEND_TYPE_RETV, $[5], + [Define to the function return type for send.]) + # + prev_sh_opts=$- + # + case $prev_sh_opts in + *f*) + ;; + *) + set -f + ;; + esac + # + case "$send_qual_type_arg2" in + const*) + send_qual_arg2=const + send_type_arg2=`echo $send_qual_type_arg2 | sed 's/^const //'` + ;; + *) + send_qual_arg2= + send_type_arg2=$send_qual_type_arg2 + ;; + esac + # + AC_DEFINE_UNQUOTED(SEND_QUAL_ARG2, $send_qual_arg2, + [Define to the type qualifier of arg 2 for send.]) + AC_DEFINE_UNQUOTED(SEND_TYPE_ARG2, $send_type_arg2, + [Define to the type of arg 2 for send.]) + # + case $prev_sh_opts in + *f*) + ;; + *) + set +f + ;; + esac + # + AC_DEFINE_UNQUOTED(HAVE_SEND, 1, + [Define to 1 if you have the send function.]) + curl_cv_func_send="yes" + fi + else + AC_MSG_ERROR([Unable to link function send]) + fi +]) + +dnl CURL_CHECK_MSG_NOSIGNAL +dnl ------------------------------------------------- +dnl Check for MSG_NOSIGNAL + +AC_DEFUN([CURL_CHECK_MSG_NOSIGNAL], [ + AC_CHECK_HEADERS(sys/types.h sys/socket.h) + AC_CACHE_CHECK([for MSG_NOSIGNAL], [curl_cv_msg_nosignal], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#endif + ]],[[ + int flag=MSG_NOSIGNAL; + ]]) + ],[ + curl_cv_msg_nosignal="yes" + ],[ + curl_cv_msg_nosignal="no" + ]) + ]) + case "$curl_cv_msg_nosignal" in + yes) + AC_DEFINE_UNQUOTED(HAVE_MSG_NOSIGNAL, 1, + [Define to 1 if you have the MSG_NOSIGNAL flag.]) + ;; + esac +]) + + +dnl CURL_CHECK_STRUCT_TIMEVAL +dnl ------------------------------------------------- +dnl Check for timeval struct + +AC_DEFUN([CURL_CHECK_STRUCT_TIMEVAL], [ + AC_REQUIRE([AC_HEADER_TIME])dnl + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK])dnl + AC_REQUIRE([CURL_CHECK_HEADER_WINSOCK2])dnl + AC_CHECK_HEADERS(sys/types.h sys/time.h time.h sys/socket.h) + AC_CACHE_CHECK([for struct timeval], [curl_cv_struct_timeval], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#endif +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#ifdef TIME_WITH_SYS_TIME +#include +#endif +#else +#ifdef HAVE_TIME_H +#include +#endif +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif + ]],[[ + struct timeval ts; + ts.tv_sec = 0; + ts.tv_usec = 0; + ]]) + ],[ + curl_cv_struct_timeval="yes" + ],[ + curl_cv_struct_timeval="no" + ]) + ]) + case "$curl_cv_struct_timeval" in + yes) + AC_DEFINE_UNQUOTED(HAVE_STRUCT_TIMEVAL, 1, + [Define to 1 if you have the timeval struct.]) + ;; + esac +]) + + +dnl TYPE_SIG_ATOMIC_T +dnl ------------------------------------------------- +dnl Check if the sig_atomic_t type is available, and +dnl verify if it is already defined as volatile. + +AC_DEFUN([TYPE_SIG_ATOMIC_T], [ + AC_CHECK_HEADERS(signal.h) + AC_CHECK_TYPE([sig_atomic_t],[ + AC_DEFINE(HAVE_SIG_ATOMIC_T, 1, + [Define to 1 if sig_atomic_t is an available typedef.]) + ], ,[ +#ifdef HAVE_SIGNAL_H +#include +#endif + ]) + case "$ac_cv_type_sig_atomic_t" in + yes) + # + AC_MSG_CHECKING([if sig_atomic_t is already defined as volatile]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#ifdef HAVE_SIGNAL_H +#include +#endif + ]],[[ + static volatile sig_atomic_t dummy = 0; + ]]) + ],[ + AC_MSG_RESULT([no]) + curl_cv_sig_atomic_t_volatile="no" + ],[ + AC_MSG_RESULT([yes]) + curl_cv_sig_atomic_t_volatile="yes" + ]) + # + if test "$curl_cv_sig_atomic_t_volatile" = "yes"; then + AC_DEFINE(HAVE_SIG_ATOMIC_T_VOLATILE, 1, + [Define to 1 if sig_atomic_t is already defined as volatile.]) + fi + ;; + esac +]) + + +dnl TYPE_IN_ADDR_T +dnl ------------------------------------------------- +dnl Check for in_addr_t: it is used to receive the return code of inet_addr() +dnl and a few other things. + +AC_DEFUN([TYPE_IN_ADDR_T], [ + AC_CHECK_TYPE([in_addr_t], ,[ + dnl in_addr_t not available + AC_CACHE_CHECK([for in_addr_t equivalent], + [curl_cv_in_addr_t_equiv], [ + curl_cv_in_addr_t_equiv="unknown" + for t in "unsigned long" int size_t unsigned long; do + if test "$curl_cv_in_addr_t_equiv" = "unknown"; then + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#endif + ]],[[ + $t data = inet_addr ("1.2.3.4"); + ]]) + ],[ + curl_cv_in_addr_t_equiv="$t" + ]) + fi + done + ]) + case "$curl_cv_in_addr_t_equiv" in + unknown) + AC_MSG_ERROR([Cannot find a type to use in place of in_addr_t]) + ;; + *) + AC_DEFINE_UNQUOTED(in_addr_t, $curl_cv_in_addr_t_equiv, + [Type to use in place of in_addr_t when system does not provide it.]) + ;; + esac + ],[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#else +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#endif + ]) +]) + + +dnl CURL_CHECK_FUNC_CLOCK_GETTIME_MONOTONIC +dnl ------------------------------------------------- +dnl Check if monotonic clock_gettime is available. + +AC_DEFUN([CURL_CHECK_FUNC_CLOCK_GETTIME_MONOTONIC], [ + AC_REQUIRE([AC_HEADER_TIME])dnl + AC_CHECK_HEADERS(sys/types.h sys/time.h time.h) + AC_MSG_CHECKING([for monotonic clock_gettime]) + # + if test "x$dontwant_rt" = "xno" ; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#ifdef TIME_WITH_SYS_TIME +#include +#endif +#else +#ifdef HAVE_TIME_H +#include +#endif +#endif + ]],[[ + struct timespec ts; + (void)clock_gettime(CLOCK_MONOTONIC, &ts); + ]]) + ],[ + AC_MSG_RESULT([yes]) + curl_func_clock_gettime="yes" + ],[ + AC_MSG_RESULT([no]) + curl_func_clock_gettime="no" + ]) + fi + dnl Definition of HAVE_CLOCK_GETTIME_MONOTONIC is intentionally postponed + dnl until library linking and run-time checks for clock_gettime succeed. +]) + + +dnl CURL_CHECK_LIBS_CLOCK_GETTIME_MONOTONIC +dnl ------------------------------------------------- +dnl If monotonic clock_gettime is available then, +dnl check and prepended to LIBS any needed libraries. + +AC_DEFUN([CURL_CHECK_LIBS_CLOCK_GETTIME_MONOTONIC], [ + AC_REQUIRE([CURL_CHECK_FUNC_CLOCK_GETTIME_MONOTONIC])dnl + # + if test "$curl_func_clock_gettime" = "yes"; then + # + AC_MSG_CHECKING([for clock_gettime in libraries]) + # + curl_cv_save_LIBS="$LIBS" + curl_cv_gclk_LIBS="unknown" + # + for x_xlibs in '' '-lrt' '-lposix4' ; do + if test "$curl_cv_gclk_LIBS" = "unknown"; then + if test -z "$x_xlibs"; then + LIBS="$curl_cv_save_LIBS" + else + LIBS="$x_xlibs $curl_cv_save_LIBS" + fi + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#ifdef TIME_WITH_SYS_TIME +#include +#endif +#else +#ifdef HAVE_TIME_H +#include +#endif +#endif + ]],[[ + struct timespec ts; + (void)clock_gettime(CLOCK_MONOTONIC, &ts); + ]]) + ],[ + curl_cv_gclk_LIBS="$x_xlibs" + ]) + fi + done + # + LIBS="$curl_cv_save_LIBS" + # + case X-"$curl_cv_gclk_LIBS" in + X-unknown) + AC_MSG_RESULT([cannot find clock_gettime]) + AC_MSG_WARN([HAVE_CLOCK_GETTIME_MONOTONIC will not be defined]) + curl_func_clock_gettime="no" + ;; + X-) + AC_MSG_RESULT([no additional lib required]) + curl_func_clock_gettime="yes" + ;; + *) + if test -z "$curl_cv_save_LIBS"; then + LIBS="$curl_cv_gclk_LIBS" + else + LIBS="$curl_cv_gclk_LIBS $curl_cv_save_LIBS" + fi + AC_MSG_RESULT([$curl_cv_gclk_LIBS]) + curl_func_clock_gettime="yes" + ;; + esac + # + dnl only do runtime verification when not cross-compiling + if test "x$cross_compiling" != "xyes" && + test "$curl_func_clock_gettime" = "yes"; then + AC_MSG_CHECKING([if monotonic clock_gettime works]) + AC_RUN_IFELSE([ + AC_LANG_PROGRAM([[ +#ifdef HAVE_STDLIB_H +#include +#endif +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#ifdef TIME_WITH_SYS_TIME +#include +#endif +#else +#ifdef HAVE_TIME_H +#include +#endif +#endif + ]],[[ + struct timespec ts; + if (0 == clock_gettime(CLOCK_MONOTONIC, &ts)) + exit(0); + else + exit(1); + ]]) + ],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + AC_MSG_WARN([HAVE_CLOCK_GETTIME_MONOTONIC will not be defined]) + curl_func_clock_gettime="no" + LIBS="$curl_cv_save_LIBS" + ]) + fi + # + case "$curl_func_clock_gettime" in + yes) + AC_DEFINE_UNQUOTED(HAVE_CLOCK_GETTIME_MONOTONIC, 1, + [Define to 1 if you have the clock_gettime function and monotonic timer.]) + ;; + esac + # + fi + # +]) + + +dnl CURL_CHECK_LIBS_CONNECT +dnl ------------------------------------------------- +dnl Verify if network connect function is already available +dnl using current libraries or if another one is required. + +AC_DEFUN([CURL_CHECK_LIBS_CONNECT], [ + AC_REQUIRE([CURL_INCLUDES_WINSOCK2])dnl + AC_MSG_CHECKING([for connect in libraries]) + tst_connect_save_LIBS="$LIBS" + tst_connect_need_LIBS="unknown" + for tst_lib in '' '-lsocket' ; do + if test "$tst_connect_need_LIBS" = "unknown"; then + LIBS="$tst_lib $tst_connect_save_LIBS" + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ + $curl_includes_winsock2 + #ifndef HAVE_WINDOWS_H + int connect(int, void*, int); + #endif + ]],[[ + if(0 != connect(0, 0, 0)) + return 1; + ]]) + ],[ + tst_connect_need_LIBS="$tst_lib" + ]) + fi + done + LIBS="$tst_connect_save_LIBS" + # + case X-"$tst_connect_need_LIBS" in + X-unknown) + AC_MSG_RESULT([cannot find connect]) + AC_MSG_ERROR([cannot find connect function in libraries.]) + ;; + X-) + AC_MSG_RESULT([yes]) + ;; + *) + AC_MSG_RESULT([$tst_connect_need_LIBS]) + LIBS="$tst_connect_need_LIBS $tst_connect_save_LIBS" + ;; + esac +]) + + +dnl CURL_DEFINE_UNQUOTED (VARIABLE, [VALUE]) +dnl ------------------------------------------------- +dnl Like AC_DEFINE_UNQUOTED this macro will define a C preprocessor +dnl symbol that can be further used in custom template configuration +dnl files. This macro, unlike AC_DEFINE_UNQUOTED, does not use a third +dnl argument for the description. Symbol definitions done with this +dnl macro are intended to be exclusively used in handcrafted *.h.in +dnl template files. Contrary to what AC_DEFINE_UNQUOTED does, this one +dnl prevents autoheader generation and insertion of symbol template +dnl stub and definition into the first configuration header file. Do +dnl not use this macro as a replacement for AC_DEFINE_UNQUOTED, each +dnl one serves different functional needs. + +AC_DEFUN([CURL_DEFINE_UNQUOTED], [ +cat >>confdefs.h <<_EOF +[@%:@define] $1 ifelse($#, 2, [$2], 1) +_EOF +]) + + +dnl CURL_CONFIGURE_LONG +dnl ------------------------------------------------- +dnl Find out the size of long as reported by sizeof() and define +dnl CURL_SIZEOF_LONG as appropriate to be used in template file +dnl include/curl/curlbuild.h.in to properly configure the library. +dnl The size of long is a build time characteristic and as such +dnl must be recorded in curlbuild.h + +AC_DEFUN([CURL_CONFIGURE_LONG], [ + if test -z "$ac_cv_sizeof_long" || + test "$ac_cv_sizeof_long" -eq "0"; then + AC_MSG_ERROR([cannot find out size of long.]) + fi + CURL_DEFINE_UNQUOTED([CURL_SIZEOF_LONG], [$ac_cv_sizeof_long]) +]) + + +dnl CURL_CONFIGURE_CURL_SOCKLEN_T +dnl ------------------------------------------------- +dnl Find out suitable curl_socklen_t data type definition and size, making +dnl appropriate definitions for template file include/curl/curlbuild.h.in +dnl to properly configure and use the library. +dnl +dnl The need for the curl_socklen_t definition arises mainly to properly +dnl interface HP-UX systems which on one hand have a typedef'ed socklen_t +dnl data type which is 32 or 64-Bit wide depending on the data model being +dnl used, and that on the other hand is only actually used when interfacing +dnl the X/Open sockets provided in the xnet library. + +AC_DEFUN([CURL_CONFIGURE_CURL_SOCKLEN_T], [ + AC_REQUIRE([CURL_INCLUDES_WS2TCPIP])dnl + AC_REQUIRE([CURL_INCLUDES_SYS_SOCKET])dnl + AC_REQUIRE([CURL_PREPROCESS_CALLCONV])dnl + # + AC_BEFORE([$0], [CURL_CONFIGURE_PULL_SYS_POLL])dnl + # + AC_MSG_CHECKING([for curl_socklen_t data type]) + curl_typeof_curl_socklen_t="unknown" + for arg1 in int SOCKET; do + for arg2 in 'struct sockaddr' void; do + for t in socklen_t int size_t 'unsigned int' long 'unsigned long' void; do + if test "$curl_typeof_curl_socklen_t" = "unknown"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + $curl_includes_ws2tcpip + $curl_includes_sys_socket + $curl_preprocess_callconv + extern int FUNCALLCONV getpeername($arg1, $arg2 *, $t *); + ]],[[ + $t *lenptr = 0; + if(0 != getpeername(0, 0, lenptr)) + return 1; + ]]) + ],[ + curl_typeof_curl_socklen_t="$t" + ]) + fi + done + done + done + for t in socklen_t int; do + if test "$curl_typeof_curl_socklen_t" = "void"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + $curl_includes_sys_socket + typedef $t curl_socklen_t; + ]],[[ + curl_socklen_t dummy; + ]]) + ],[ + curl_typeof_curl_socklen_t="$t" + ]) + fi + done + AC_MSG_RESULT([$curl_typeof_curl_socklen_t]) + if test "$curl_typeof_curl_socklen_t" = "void" || + test "$curl_typeof_curl_socklen_t" = "unknown"; then + AC_MSG_ERROR([cannot find data type for curl_socklen_t.]) + fi + # + AC_MSG_CHECKING([size of curl_socklen_t]) + curl_sizeof_curl_socklen_t="unknown" + curl_pull_headers_socklen_t="unknown" + if test "$curl_cv_header_ws2tcpip_h" = "yes"; then + tst_pull_header_checks='none ws2tcpip' + tst_size_checks='4' + else + tst_pull_header_checks='none systypes syssocket' + tst_size_checks='4 8 2' + fi + for tst_size in $tst_size_checks; do + for tst_pull_headers in $tst_pull_header_checks; do + if test "$curl_sizeof_curl_socklen_t" = "unknown"; then + case $tst_pull_headers in + ws2tcpip) + tmp_includes="$curl_includes_ws2tcpip" + ;; + systypes) + tmp_includes="$curl_includes_sys_types" + ;; + syssocket) + tmp_includes="$curl_includes_sys_socket" + ;; + *) + tmp_includes="" + ;; + esac + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + $tmp_includes + typedef $curl_typeof_curl_socklen_t curl_socklen_t; + typedef char dummy_arr[sizeof(curl_socklen_t) == $tst_size ? 1 : -1]; + ]],[[ + curl_socklen_t dummy; + ]]) + ],[ + curl_sizeof_curl_socklen_t="$tst_size" + curl_pull_headers_socklen_t="$tst_pull_headers" + ]) + fi + done + done + AC_MSG_RESULT([$curl_sizeof_curl_socklen_t]) + if test "$curl_sizeof_curl_socklen_t" = "unknown"; then + AC_MSG_ERROR([cannot find out size of curl_socklen_t.]) + fi + # + case $curl_pull_headers_socklen_t in + ws2tcpip) + CURL_DEFINE_UNQUOTED([CURL_PULL_WS2TCPIP_H]) + ;; + systypes) + CURL_DEFINE_UNQUOTED([CURL_PULL_SYS_TYPES_H]) + ;; + syssocket) + CURL_DEFINE_UNQUOTED([CURL_PULL_SYS_TYPES_H]) + CURL_DEFINE_UNQUOTED([CURL_PULL_SYS_SOCKET_H]) + ;; + esac + CURL_DEFINE_UNQUOTED([CURL_TYPEOF_CURL_SOCKLEN_T], [$curl_typeof_curl_socklen_t]) + CURL_DEFINE_UNQUOTED([CURL_SIZEOF_CURL_SOCKLEN_T], [$curl_sizeof_curl_socklen_t]) +]) + + +dnl CURL_CONFIGURE_PULL_SYS_POLL +dnl ------------------------------------------------- +dnl Find out if system header file sys/poll.h must be included by the +dnl external interface, making appropriate definitions for template file +dnl include/curl/curlbuild.h.in to properly configure and use the library. +dnl +dnl The need for the sys/poll.h inclusion arises mainly to properly +dnl interface AIX systems which define macros 'events' and 'revents'. + +AC_DEFUN([CURL_CONFIGURE_PULL_SYS_POLL], [ + AC_REQUIRE([CURL_INCLUDES_POLL])dnl + # + tst_poll_events_macro_defined="unknown" + # + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + $curl_includes_poll + ]],[[ +#if defined(events) || defined(revents) + return 0; +#else + force compilation error +#endif + ]]) + ],[ + tst_poll_events_macro_defined="yes" + ],[ + tst_poll_events_macro_defined="no" + ]) + # + if test "$tst_poll_events_macro_defined" = "yes"; then + if test "x$ac_cv_header_sys_poll_h" = "xyes"; then + CURL_DEFINE_UNQUOTED([CURL_PULL_SYS_POLL_H]) + fi + fi + # +]) + + +dnl CURL_CHECK_FUNC_SELECT +dnl ------------------------------------------------- +dnl Test if the socket select() function is available, +dnl and check its return type and the types of its +dnl arguments. If the function succeeds HAVE_SELECT +dnl will be defined, defining the types of the +dnl arguments in SELECT_TYPE_ARG1, SELECT_TYPE_ARG234 +dnl and SELECT_TYPE_ARG5, defining the type of the +dnl function return value in SELECT_TYPE_RETV, and +dnl also defining the type qualifier of fifth argument +dnl in SELECT_QUAL_ARG5. + +AC_DEFUN([CURL_CHECK_FUNC_SELECT], [ + AC_REQUIRE([CURL_CHECK_STRUCT_TIMEVAL])dnl + AC_CHECK_HEADERS(sys/select.h sys/socket.h) + # + AC_MSG_CHECKING([for select]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#endif +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#ifdef TIME_WITH_SYS_TIME +#include +#endif +#else +#ifdef HAVE_TIME_H +#include +#endif +#endif +#ifndef HAVE_WINDOWS_H +#ifdef HAVE_SYS_SELECT_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#endif + ]],[[ + select(0, 0, 0, 0, 0); + ]]) + ],[ + AC_MSG_RESULT([yes]) + curl_cv_select="yes" + ],[ + AC_MSG_RESULT([no]) + curl_cv_select="no" + ]) + # + if test "$curl_cv_select" = "yes"; then + AC_CACHE_CHECK([types of args and return type for select], + [curl_cv_func_select_args], [ + curl_cv_func_select_args="unknown" + for sel_retv in 'int' 'ssize_t'; do + for sel_arg1 in 'int' 'ssize_t' 'size_t' 'unsigned long int' 'unsigned int'; do + for sel_arg234 in 'fd_set *' 'int *' 'void *'; do + for sel_arg5 in 'struct timeval *' 'const struct timeval *'; do + if test "$curl_cv_func_select_args" = "unknown"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#undef inline +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#define SELECTCALLCONV PASCAL +#endif +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#ifdef TIME_WITH_SYS_TIME +#include +#endif +#else +#ifdef HAVE_TIME_H +#include +#endif +#endif +#ifndef HAVE_WINDOWS_H +#ifdef HAVE_SYS_SELECT_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#define SELECTCALLCONV +#endif +#ifndef HAVE_STRUCT_TIMEVAL + struct timeval { + long tv_sec; + long tv_usec; + }; +#endif + extern $sel_retv SELECTCALLCONV select($sel_arg1, + $sel_arg234, + $sel_arg234, + $sel_arg234, + $sel_arg5); + ]],[[ + $sel_arg1 nfds=0; + $sel_arg234 rfds=0; + $sel_arg234 wfds=0; + $sel_arg234 efds=0; + $sel_retv res = select(nfds, rfds, wfds, efds, 0); + ]]) + ],[ + curl_cv_func_select_args="$sel_arg1,$sel_arg234,$sel_arg5,$sel_retv" + ]) + fi + done + done + done + done + ]) # AC-CACHE-CHECK + if test "$curl_cv_func_select_args" = "unknown"; then + AC_MSG_WARN([Cannot find proper types to use for select args]) + AC_MSG_WARN([HAVE_SELECT will not be defined]) + else + select_prev_IFS=$IFS; IFS=',' + set dummy `echo "$curl_cv_func_select_args" | sed 's/\*/\*/g'` + IFS=$select_prev_IFS + shift + # + sel_qual_type_arg5=$[3] + # + AC_DEFINE_UNQUOTED(SELECT_TYPE_ARG1, $[1], + [Define to the type of arg 1 for select.]) + AC_DEFINE_UNQUOTED(SELECT_TYPE_ARG234, $[2], + [Define to the type of args 2, 3 and 4 for select.]) + AC_DEFINE_UNQUOTED(SELECT_TYPE_RETV, $[4], + [Define to the function return type for select.]) + # + prev_sh_opts=$- + # + case $prev_sh_opts in + *f*) + ;; + *) + set -f + ;; + esac + # + case "$sel_qual_type_arg5" in + const*) + sel_qual_arg5=const + sel_type_arg5=`echo $sel_qual_type_arg5 | sed 's/^const //'` + ;; + *) + sel_qual_arg5= + sel_type_arg5=$sel_qual_type_arg5 + ;; + esac + # + AC_DEFINE_UNQUOTED(SELECT_QUAL_ARG5, $sel_qual_arg5, + [Define to the type qualifier of arg 5 for select.]) + AC_DEFINE_UNQUOTED(SELECT_TYPE_ARG5, $sel_type_arg5, + [Define to the type of arg 5 for select.]) + # + case $prev_sh_opts in + *f*) + ;; + *) + set +f + ;; + esac + # + AC_DEFINE_UNQUOTED(HAVE_SELECT, 1, + [Define to 1 if you have the select function.]) + curl_cv_func_select="yes" + fi + fi +]) + + +dnl CURL_VERIFY_RUNTIMELIBS +dnl ------------------------------------------------- +dnl Verify that the shared libs found so far can be used when running +dnl programs, since otherwise the situation will create odd configure errors +dnl that are misleading people. +dnl +dnl Make sure this test is run BEFORE the first test in the script that +dnl runs anything, which at the time of this writing is the AC_CHECK_SIZEOF +dnl macro. It must also run AFTER all lib-checking macros are complete. + +AC_DEFUN([CURL_VERIFY_RUNTIMELIBS], [ + + dnl this test is of course not sensible if we are cross-compiling! + if test "x$cross_compiling" != xyes; then + + dnl just run a program to verify that the libs checked for previous to this + dnl point also is available run-time! + AC_MSG_CHECKING([run-time libs availability]) + AC_TRY_RUN([ +main() +{ + return 0; +} +], + AC_MSG_RESULT([fine]), + AC_MSG_RESULT([failed]) + AC_MSG_ERROR([one or more libs available at link-time are not available run-time. Libs used at link-time: $LIBS]) + ) + + dnl if this test fails, configure has already stopped + fi +]) + + +dnl CURL_CHECK_VARIADIC_MACROS +dnl ------------------------------------------------- +dnl Check compiler support of variadic macros + +AC_DEFUN([CURL_CHECK_VARIADIC_MACROS], [ + AC_CACHE_CHECK([for compiler support of C99 variadic macro style], + [curl_cv_variadic_macros_c99], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#define c99_vmacro3(first, ...) fun3(first, __VA_ARGS__) +#define c99_vmacro2(first, ...) fun2(first, __VA_ARGS__) + int fun3(int arg1, int arg2, int arg3); + int fun2(int arg1, int arg2); + int fun3(int arg1, int arg2, int arg3) + { return arg1 + arg2 + arg3; } + int fun2(int arg1, int arg2) + { return arg1 + arg2; } + ]],[[ + int res3 = c99_vmacro3(1, 2, 3); + int res2 = c99_vmacro2(1, 2); + ]]) + ],[ + curl_cv_variadic_macros_c99="yes" + ],[ + curl_cv_variadic_macros_c99="no" + ]) + ]) + case "$curl_cv_variadic_macros_c99" in + yes) + AC_DEFINE_UNQUOTED(HAVE_VARIADIC_MACROS_C99, 1, + [Define to 1 if compiler supports C99 variadic macro style.]) + ;; + esac + AC_CACHE_CHECK([for compiler support of old gcc variadic macro style], + [curl_cv_variadic_macros_gcc], [ + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#define gcc_vmacro3(first, args...) fun3(first, args) +#define gcc_vmacro2(first, args...) fun2(first, args) + int fun3(int arg1, int arg2, int arg3); + int fun2(int arg1, int arg2); + int fun3(int arg1, int arg2, int arg3) + { return arg1 + arg2 + arg3; } + int fun2(int arg1, int arg2) + { return arg1 + arg2; } + ]],[[ + int res3 = gcc_vmacro3(1, 2, 3); + int res2 = gcc_vmacro2(1, 2); + ]]) + ],[ + curl_cv_variadic_macros_gcc="yes" + ],[ + curl_cv_variadic_macros_gcc="no" + ]) + ]) + case "$curl_cv_variadic_macros_gcc" in + yes) + AC_DEFINE_UNQUOTED(HAVE_VARIADIC_MACROS_GCC, 1, + [Define to 1 if compiler supports old gcc variadic macro style.]) + ;; + esac +]) + + +dnl CURL_CHECK_CA_BUNDLE +dnl ------------------------------------------------- +dnl Check if a default ca-bundle should be used +dnl +dnl regarding the paths this will scan: +dnl /etc/ssl/certs/ca-certificates.crt Debian systems +dnl /etc/pki/tls/certs/ca-bundle.crt Redhat and Mandriva +dnl /usr/share/ssl/certs/ca-bundle.crt old(er) Redhat +dnl /usr/local/share/certs/ca-root-nss.crt FreeBSD +dnl /etc/ssl/cert.pem OpenBSD, FreeBSD (symlink) +dnl /etc/ssl/certs/ (ca path) SUSE + +AC_DEFUN([CURL_CHECK_CA_BUNDLE], [ + + AC_MSG_CHECKING([default CA cert bundle/path]) + + AC_ARG_WITH(ca-bundle, +AC_HELP_STRING([--with-ca-bundle=FILE], +[Path to a file containing CA certificates (example: /etc/ca-bundle.crt)]) +AC_HELP_STRING([--without-ca-bundle], [Don't use a default CA bundle]), + [ + want_ca="$withval" + if test "x$want_ca" = "xyes"; then + AC_MSG_ERROR([--with-ca-bundle=FILE requires a path to the CA bundle]) + fi + ], + [ want_ca="unset" ]) + AC_ARG_WITH(ca-path, +AC_HELP_STRING([--with-ca-path=DIRECTORY], +[Path to a directory containing CA certificates stored individually, with \ +their filenames in a hash format. This option can be used with OpenSSL, \ +GnuTLS and PolarSSL backends. Refer to OpenSSL c_rehash for details. \ +(example: /etc/certificates)]) +AC_HELP_STRING([--without-ca-path], [Don't use a default CA path]), + [ + want_capath="$withval" + if test "x$want_capath" = "xyes"; then + AC_MSG_ERROR([--with-ca-path=DIRECTORY requires a path to the CA path directory]) + fi + ], + [ want_capath="unset"]) + + ca_warning=" (warning: certs not found)" + capath_warning=" (warning: certs not found)" + check_capath="" + + if test "x$want_ca" != "xno" -a "x$want_ca" != "xunset" -a \ + "x$want_capath" != "xno" -a "x$want_capath" != "xunset"; then + dnl both given + ca="$want_ca" + capath="$want_capath" + elif test "x$want_ca" != "xno" -a "x$want_ca" != "xunset"; then + dnl --with-ca-bundle given + ca="$want_ca" + capath="no" + elif test "x$want_capath" != "xno" -a "x$want_capath" != "xunset"; then + dnl --with-ca-path given + if test "x$OPENSSL_ENABLED" != "x1" -a "x$GNUTLS_ENABLED" != "x1" -a "x$POLARSSL_ENABLED" != "x1"; then + AC_MSG_ERROR([--with-ca-path only works with OpenSSL, GnuTLS or PolarSSL]) + fi + capath="$want_capath" + ca="no" + else + dnl first try autodetecting a CA bundle , then a CA path + dnl both autodetections can be skipped by --without-ca-* + ca="no" + capath="no" + if test "x$cross_compiling" != "xyes"; then + dnl NOT cross-compiling and... + dnl neither of the --with-ca-* options are provided + if test "x$want_ca" = "xunset"; then + dnl the path we previously would have installed the curl ca bundle + dnl to, and thus we now check for an already existing cert in that + dnl place in case we find no other + if test "x$prefix" != xNONE; then + cac="${prefix}/share/curl/curl-ca-bundle.crt" + else + cac="$ac_default_prefix/share/curl/curl-ca-bundle.crt" + fi + + for a in /etc/ssl/certs/ca-certificates.crt \ + /etc/pki/tls/certs/ca-bundle.crt \ + /usr/share/ssl/certs/ca-bundle.crt \ + /usr/local/share/certs/ca-root-nss.crt \ + /etc/ssl/cert.pem \ + "$cac"; do + if test -f "$a"; then + ca="$a" + break + fi + done + fi + if test "x$want_capath" = "xunset" -a "x$ca" = "xno" -a \ + "x$OPENSSL_ENABLED" = "x1"; then + check_capath="/etc/ssl/certs/" + fi + else + dnl no option given and cross-compiling + AC_MSG_WARN([skipped the ca-cert path detection when cross-compiling]) + fi + fi + + if test "x$ca" = "xno" || test -f "$ca"; then + ca_warning="" + fi + + if test "x$capath" != "xno"; then + check_capath="$capath" + fi + + if test ! -z "$check_capath"; then + for a in "$check_capath"; do + if test -d "$a" && ls "$a"/[[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]].0 >/dev/null 2>/dev/null; then + if test "x$capath" = "xno"; then + capath="$a" + fi + capath_warning="" + break + fi + done + fi + + if test "x$capath" = "xno"; then + capath_warning="" + fi + + if test "x$ca" != "xno"; then + CURL_CA_BUNDLE='"'$ca'"' + AC_DEFINE_UNQUOTED(CURL_CA_BUNDLE, "$ca", [Location of default ca bundle]) + AC_SUBST(CURL_CA_BUNDLE) + AC_MSG_RESULT([$ca]) + fi + if test "x$capath" != "xno"; then + CURL_CA_PATH="\"$capath\"" + AC_DEFINE_UNQUOTED(CURL_CA_PATH, "$capath", [Location of default ca path]) + AC_MSG_RESULT([$capath (capath)]) + fi + if test "x$ca" = "xno" && test "x$capath" = "xno"; then + AC_MSG_RESULT([no]) + fi + + AC_MSG_CHECKING([whether to use builtin CA store of SSL library]) + AC_ARG_WITH(ca-fallback, +AC_HELP_STRING([--with-ca-fallback], [Use the built in CA store of the SSL library]) +AC_HELP_STRING([--without-ca-fallback], [Don't use the built in CA store of the SSL library]), + [ + if test "x$with_ca_fallback" != "xyes" -a "x$with_ca_fallback" != "xno"; then + AC_MSG_ERROR([--with-ca-fallback only allows yes or no as parameter]) + fi + ], + [ with_ca_fallback="no"]) + AC_MSG_RESULT([$with_ca_fallback]) + if test "x$with_ca_fallback" = "xyes"; then + if test "x$OPENSSL_ENABLED" != "x1" -a "x$GNUTLS_ENABLED" != "x1"; then + AC_MSG_ERROR([--with-ca-fallback only works with OpenSSL or GnuTLS]) + fi + AC_DEFINE_UNQUOTED(CURL_CA_FALLBACK, 1, [define "1" to use built in CA store of SSL library ]) + fi +]) + + +dnl DO_CURL_OFF_T_CHECK (TYPE, SIZE) +dnl ------------------------------------------------- +dnl Internal macro for CURL_CONFIGURE_CURL_OFF_T + +AC_DEFUN([DO_CURL_OFF_T_CHECK], [ + AC_REQUIRE([CURL_INCLUDES_INTTYPES])dnl + if test "$curl_typeof_curl_off_t" = "unknown" && test ! -z "$1"; then + tmp_includes="" + tmp_source="" + tmp_fmt="" + case XC_SH_TR_SH([$1]) in + int64_t) + tmp_includes="$curl_includes_inttypes" + tmp_source="char f@<:@@:>@ = PRId64;" + tmp_fmt="PRId64" + ;; + int32_t) + tmp_includes="$curl_includes_inttypes" + tmp_source="char f@<:@@:>@ = PRId32;" + tmp_fmt="PRId32" + ;; + int16_t) + tmp_includes="$curl_includes_inttypes" + tmp_source="char f@<:@@:>@ = PRId16;" + tmp_fmt="PRId16" + ;; + esac + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + $tmp_includes + typedef $1 curl_off_t; + typedef char dummy_arr[sizeof(curl_off_t) == $2 ? 1 : -1]; + ]],[[ + $tmp_source + curl_off_t dummy; + ]]) + ],[ + if test -z "$tmp_fmt"; then + curl_typeof_curl_off_t="$1" + curl_sizeof_curl_off_t="$2" + else + CURL_CHECK_DEF([$tmp_fmt], [$curl_includes_inttypes], [silent]) + AS_VAR_PUSHDEF([tmp_HaveFmtDef], [curl_cv_have_def_$tmp_fmt])dnl + AS_VAR_PUSHDEF([tmp_FmtDef], [curl_cv_def_$tmp_fmt])dnl + if test AS_VAR_GET(tmp_HaveFmtDef) = "yes"; then + curl_format_curl_off_t=AS_VAR_GET(tmp_FmtDef) + curl_typeof_curl_off_t="$1" + curl_sizeof_curl_off_t="$2" + fi + AS_VAR_POPDEF([tmp_FmtDef])dnl + AS_VAR_POPDEF([tmp_HaveFmtDef])dnl + fi + ]) + fi +]) + + +dnl DO_CURL_OFF_T_SUFFIX_CHECK (TYPE) +dnl ------------------------------------------------- +dnl Internal macro for CURL_CONFIGURE_CURL_OFF_T + +AC_DEFUN([DO_CURL_OFF_T_SUFFIX_CHECK], [ + AC_REQUIRE([CURL_INCLUDES_INTTYPES])dnl + AC_MSG_CHECKING([constant suffix string for curl_off_t]) + # + curl_suffix_curl_off_t="unknown" + curl_suffix_curl_off_tu="unknown" + # + case XC_SH_TR_SH([$1]) in + long_long | __longlong | __longlong_t) + tst_suffixes="LL::" + ;; + long) + tst_suffixes="L::" + ;; + int) + tst_suffixes="::" + ;; + __int64 | int64_t) + tst_suffixes="LL:i64::" + ;; + __int32 | int32_t) + tst_suffixes="L:i32::" + ;; + __int16 | int16_t) + tst_suffixes="L:i16::" + ;; + *) + AC_MSG_ERROR([unexpected data type $1]) + ;; + esac + # + old_IFS=$IFS; IFS=':' + for tmp_ssuf in $tst_suffixes ; do + IFS=$old_IFS + if test "x$curl_suffix_curl_off_t" = "xunknown"; then + case $tmp_ssuf in + i64 | i32 | i16) + tmp_usuf="u$tmp_ssuf" + ;; + LL | L) + tmp_usuf="U$tmp_ssuf" + ;; + *) + tmp_usuf="" + ;; + esac + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + $curl_includes_inttypes + typedef $1 new_t; + ]],[[ + new_t s1; + new_t s2; + s1 = -10$tmp_ssuf ; + s2 = 20$tmp_ssuf ; + if(s1 > s2) + return 1; + ]]) + ],[ + curl_suffix_curl_off_t="$tmp_ssuf" + curl_suffix_curl_off_tu="$tmp_usuf" + ]) + fi + done + IFS=$old_IFS + # + if test "x$curl_suffix_curl_off_t" = "xunknown"; then + AC_MSG_ERROR([cannot find constant suffix string for curl_off_t.]) + else + AC_MSG_RESULT([$curl_suffix_curl_off_t]) + AC_MSG_CHECKING([constant suffix string for unsigned curl_off_t]) + AC_MSG_RESULT([$curl_suffix_curl_off_tu]) + fi + # +]) + + +dnl CURL_CONFIGURE_CURL_OFF_T +dnl ------------------------------------------------- +dnl Find out suitable curl_off_t data type definition and associated +dnl items, and make the appropriate definitions used in template file +dnl include/curl/curlbuild.h.in to properly configure the library. + +AC_DEFUN([CURL_CONFIGURE_CURL_OFF_T], [ + AC_REQUIRE([CURL_INCLUDES_INTTYPES])dnl + # + AC_BEFORE([$0],[AC_SYS_LARGEFILE])dnl + AC_BEFORE([$0],[CURL_CONFIGURE_REENTRANT])dnl + AC_BEFORE([$0],[CURL_CHECK_AIX_ALL_SOURCE])dnl + # + if test -z "$SED"; then + AC_MSG_ERROR([SED not set. Cannot continue without SED being set.]) + fi + # + AC_CHECK_SIZEOF(long) + AC_CHECK_SIZEOF(void*) + # + if test -z "$ac_cv_sizeof_long" || + test "$ac_cv_sizeof_long" -eq "0"; then + AC_MSG_ERROR([cannot find out size of long.]) + fi + if test -z "$ac_cv_sizeof_voidp" || + test "$ac_cv_sizeof_voidp" -eq "0"; then + AC_MSG_ERROR([cannot find out size of void*.]) + fi + # + x_LP64_long="" + x_LP32_long="" + # + if test "$ac_cv_sizeof_long" -eq "8" && + test "$ac_cv_sizeof_voidp" -ge "8"; then + x_LP64_long="long" + elif test "$ac_cv_sizeof_long" -eq "4" && + test "$ac_cv_sizeof_voidp" -ge "4"; then + x_LP32_long="long" + fi + # + dnl DO_CURL_OFF_T_CHECK results are stored in next 3 vars + # + curl_typeof_curl_off_t="unknown" + curl_sizeof_curl_off_t="unknown" + curl_format_curl_off_t="unknown" + curl_format_curl_off_tu="unknown" + # + if test "$curl_typeof_curl_off_t" = "unknown"; then + AC_MSG_CHECKING([for 64-bit curl_off_t data type]) + for t8 in \ + "$x_LP64_long" \ + 'int64_t' \ + '__int64' \ + 'long long' \ + '__longlong' \ + '__longlong_t' ; do + DO_CURL_OFF_T_CHECK([$t8], [8]) + done + AC_MSG_RESULT([$curl_typeof_curl_off_t]) + fi + if test "$curl_typeof_curl_off_t" = "unknown"; then + AC_MSG_CHECKING([for 32-bit curl_off_t data type]) + for t4 in \ + "$x_LP32_long" \ + 'int32_t' \ + '__int32' \ + 'int' ; do + DO_CURL_OFF_T_CHECK([$t4], [4]) + done + AC_MSG_RESULT([$curl_typeof_curl_off_t]) + fi + if test "$curl_typeof_curl_off_t" = "unknown"; then + AC_MSG_ERROR([cannot find data type for curl_off_t.]) + fi + # + AC_MSG_CHECKING([size of curl_off_t]) + AC_MSG_RESULT([$curl_sizeof_curl_off_t]) + # + AC_MSG_CHECKING([formatting string directive for curl_off_t]) + if test "$curl_format_curl_off_t" != "unknown"; then + x_pull_headers="yes" + curl_format_curl_off_t=`echo "$curl_format_curl_off_t" | "$SED" 's/[["]]//g'` + curl_format_curl_off_tu=`echo "$curl_format_curl_off_t" | "$SED" 's/i$/u/'` + curl_format_curl_off_tu=`echo "$curl_format_curl_off_tu" | "$SED" 's/d$/u/'` + curl_format_curl_off_tu=`echo "$curl_format_curl_off_tu" | "$SED" 's/D$/U/'` + else + x_pull_headers="no" + case XC_SH_TR_SH([$curl_typeof_curl_off_t]) in + long_long | __longlong | __longlong_t) + curl_format_curl_off_t="lld" + curl_format_curl_off_tu="llu" + ;; + long) + curl_format_curl_off_t="ld" + curl_format_curl_off_tu="lu" + ;; + int) + curl_format_curl_off_t="d" + curl_format_curl_off_tu="u" + ;; + __int64) + curl_format_curl_off_t="I64d" + curl_format_curl_off_tu="I64u" + ;; + __int32) + curl_format_curl_off_t="I32d" + curl_format_curl_off_tu="I32u" + ;; + __int16) + curl_format_curl_off_t="I16d" + curl_format_curl_off_tu="I16u" + ;; + *) + AC_MSG_ERROR([cannot find print format string for curl_off_t.]) + ;; + esac + fi + AC_MSG_RESULT(["$curl_format_curl_off_t"]) + # + AC_MSG_CHECKING([formatting string directive for unsigned curl_off_t]) + AC_MSG_RESULT(["$curl_format_curl_off_tu"]) + # + DO_CURL_OFF_T_SUFFIX_CHECK([$curl_typeof_curl_off_t]) + # + if test "$x_pull_headers" = "yes"; then + if test "x$ac_cv_header_sys_types_h" = "xyes"; then + CURL_DEFINE_UNQUOTED([CURL_PULL_SYS_TYPES_H]) + fi + if test "x$ac_cv_header_stdint_h" = "xyes"; then + CURL_DEFINE_UNQUOTED([CURL_PULL_STDINT_H]) + fi + if test "x$ac_cv_header_inttypes_h" = "xyes"; then + CURL_DEFINE_UNQUOTED([CURL_PULL_INTTYPES_H]) + fi + fi + # + CURL_DEFINE_UNQUOTED([CURL_TYPEOF_CURL_OFF_T], [$curl_typeof_curl_off_t]) + CURL_DEFINE_UNQUOTED([CURL_FORMAT_CURL_OFF_T], ["$curl_format_curl_off_t"]) + CURL_DEFINE_UNQUOTED([CURL_FORMAT_CURL_OFF_TU], ["$curl_format_curl_off_tu"]) + CURL_DEFINE_UNQUOTED([CURL_FORMAT_OFF_T], ["%$curl_format_curl_off_t"]) + CURL_DEFINE_UNQUOTED([CURL_SIZEOF_CURL_OFF_T], [$curl_sizeof_curl_off_t]) + CURL_DEFINE_UNQUOTED([CURL_SUFFIX_CURL_OFF_T], [$curl_suffix_curl_off_t]) + CURL_DEFINE_UNQUOTED([CURL_SUFFIX_CURL_OFF_TU], [$curl_suffix_curl_off_tu]) + # +]) + + +dnl CURL_CHECK_WIN32_LARGEFILE +dnl ------------------------------------------------- +dnl Check if curl's WIN32 large file will be used + +AC_DEFUN([CURL_CHECK_WIN32_LARGEFILE], [ + AC_REQUIRE([CURL_CHECK_HEADER_WINDOWS])dnl + AC_MSG_CHECKING([whether build target supports WIN32 file API]) + curl_win32_file_api="no" + if test "$curl_cv_header_windows_h" = "yes"; then + if test x"$enable_largefile" != "xno"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ +#if !defined(_WIN32_WCE) && \ + (defined(__MINGW32__) || \ + (defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64)))) + int dummy=1; +#else + WIN32 large file API not supported. +#endif + ]]) + ],[ + curl_win32_file_api="win32_large_files" + ]) + fi + if test "$curl_win32_file_api" = "no"; then + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ +#if defined(_WIN32_WCE) || defined(__MINGW32__) || defined(_MSC_VER) + int dummy=1; +#else + WIN32 small file API not supported. +#endif + ]]) + ],[ + curl_win32_file_api="win32_small_files" + ]) + fi + fi + case "$curl_win32_file_api" in + win32_large_files) + AC_MSG_RESULT([yes (large file enabled)]) + AC_DEFINE_UNQUOTED(USE_WIN32_LARGE_FILES, 1, + [Define to 1 if you are building a Windows target with large file support.]) + ;; + win32_small_files) + AC_MSG_RESULT([yes (large file disabled)]) + AC_DEFINE_UNQUOTED(USE_WIN32_SMALL_FILES, 1, + [Define to 1 if you are building a Windows target without large file support.]) + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +]) + +dnl CURL_EXPORT_PCDIR ($pcdir) +dnl ------------------------ +dnl if $pcdir is not empty, set PKG_CONFIG_LIBDIR to $pcdir and export +dnl +dnl we need this macro since pkg-config distinguishes among empty and unset +dnl variable while checking PKG_CONFIG_LIBDIR +dnl + +AC_DEFUN([CURL_EXPORT_PCDIR], [ + if test -n "$1"; then + PKG_CONFIG_LIBDIR="$1" + export PKG_CONFIG_LIBDIR + fi +]) + +dnl CURL_CHECK_PKGCONFIG ($module, [$pcdir]) +dnl ------------------------ +dnl search for the pkg-config tool. Set the PKGCONFIG variable to hold the +dnl path to it, or 'no' if not found/present. +dnl +dnl If pkg-config is present, check that it has info about the $module or +dnl return "no" anyway! +dnl +dnl Optionally PKG_CONFIG_LIBDIR may be given as $pcdir. +dnl + +AC_DEFUN([CURL_CHECK_PKGCONFIG], [ + if test -n "$PKG_CONFIG"; then + PKGCONFIG="$PKG_CONFIG" + else + AC_PATH_TOOL([PKGCONFIG], [pkg-config], [no], + [$PATH:/usr/bin:/usr/local/bin]) + fi + + if test "x$PKGCONFIG" != "xno"; then + AC_MSG_CHECKING([for $1 options with pkg-config]) + dnl ask pkg-config about $1 + itexists=`CURL_EXPORT_PCDIR([$2]) dnl + $PKGCONFIG --exists $1 >/dev/null 2>&1 && echo 1` + + if test -z "$itexists"; then + dnl pkg-config does not have info about the given module! set the + dnl variable to 'no' + PKGCONFIG="no" + AC_MSG_RESULT([no]) + else + AC_MSG_RESULT([found]) + fi + fi +]) + + +dnl CURL_GENERATE_CONFIGUREHELP_PM +dnl ------------------------------------------------- +dnl Generate test harness configurehelp.pm module, defining and +dnl initializing some perl variables with values which are known +dnl when the configure script runs. For portability reasons, test +dnl harness needs information on how to run the C preprocessor. + +AC_DEFUN([CURL_GENERATE_CONFIGUREHELP_PM], [ + AC_REQUIRE([AC_PROG_CPP])dnl + tmp_cpp=`eval echo "$ac_cpp" 2>/dev/null` + if test -z "$tmp_cpp"; then + tmp_cpp='cpp' + fi + cat >./tests/configurehelp.pm <<_EOF +[@%:@] This is a generated file. Do not edit. + +package configurehelp; + +use strict; +use warnings; +use Exporter; + +use vars qw( + @ISA + @EXPORT_OK + \$Cpreprocessor + ); + +@ISA = qw(Exporter); + +@EXPORT_OK = qw( + \$Cpreprocessor + ); + +\$Cpreprocessor = '$tmp_cpp'; + +1; +_EOF +]) + +dnl CURL_CPP_P +dnl +dnl Check if $cpp -P should be used for extract define values due to gcc 5 +dnl splitting up strings and defines between line outputs. gcc by default +dnl (without -P) will show TEST EINVAL TEST as +dnl +dnl # 13 "conftest.c" +dnl TEST +dnl # 13 "conftest.c" 3 4 +dnl 22 +dnl # 13 "conftest.c" +dnl TEST + +AC_DEFUN([CURL_CPP_P], [ + AC_MSG_CHECKING([if cpp -P is needed]) + AC_EGREP_CPP([TEST.*TEST], [ + #include +TEST EINVAL TEST + ], [cpp=no], [cpp=yes]) + AC_MSG_RESULT([$cpp]) + + dnl we need cpp -P so check if it works then + if test "x$cpp" = "xyes"; then + AC_MSG_CHECKING([if cpp -P works]) + OLDCPPFLAGS=$CPPFLAGS + CPPFLAGS="$CPPFLAGS -P" + AC_EGREP_CPP([TEST.*TEST], [ + #include +TEST EINVAL TEST + ], [cpp_p=yes], [cpp_p=no]) + AC_MSG_RESULT([$cpp_p]) + + if test "x$cpp_p" = "xno"; then + AC_MSG_WARN([failed to figure out cpp -P alternative]) + # without -P + CPPPFLAG="" + else + # with -P + CPPPFLAG="-P" + fi + dnl restore CPPFLAGS + CPPFLAGS=$OLDCPPFLAGS + else + # without -P + CPPPFLAG="" + fi +]) + + +dnl CURL_MAC_CFLAGS +dnl +dnl Check if -mmacosx-version-min, -miphoneos-version-min or any +dnl similar are set manually, otherwise do. And set +dnl -Werror=partial-availability. +dnl + +AC_DEFUN([CURL_MAC_CFLAGS], [ + + tst_cflags="no" + case $host_os in + darwin*) + tst_cflags="yes" + ;; + esac + + AC_MSG_CHECKING([for good-to-use Mac CFLAGS]) + AC_MSG_RESULT([$tst_cflags]); + + if test "$tst_cflags" = "yes"; then + AC_MSG_CHECKING([for *version-min in CFLAGS]) + min="" + if test -z "$(echo $CFLAGS | grep m.*os.*-version-min)"; then + min="-mmacosx-version-min=10.8" + CFLAGS="$CFLAGS $min" + fi + if test -z "$min"; then + AC_MSG_RESULT([set by user]) + else + AC_MSG_RESULT([$min set]) + fi + + old_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS -Werror=partial-availability" + AC_MSG_CHECKING([whether $CC accepts -Werror=partial-availability]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], + [AC_MSG_RESULT([yes])], + [AC_MSG_RESULT([no]) + CFLAGS=$old_CFLAGS]) + fi + +]) diff --git a/deps-win32/curl-7.54.1/buildconf b/deps-win32/curl-7.54.1/buildconf new file mode 100644 index 0000000..9405596 --- /dev/null +++ b/deps-win32/curl-7.54.1/buildconf @@ -0,0 +1,449 @@ +#!/bin/sh +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 1998 - 2017, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### + +#-------------------------------------------------------------------------- +# die prints argument string to stdout and exits this shell script. +# +die(){ + echo "buildconf: $@" + exit 1 +} + +#-------------------------------------------------------------------------- +# findtool works as 'which' but we use a different name to make it more +# obvious we aren't using 'which'! ;-) +# Unlike 'which' does, the current directory is ignored. +# +findtool(){ + file="$1" + + if { echo "$file" | grep "/" >/dev/null 2>&1; } then + # when file is given with a path check it first + if test -f "$file"; then + echo "$file" + return + fi + fi + + old_IFS=$IFS; IFS=':' + for path in $PATH + do + IFS=$old_IFS + # echo "checks for $file in $path" >&2 + if test "$path" -a "$path" != '.' -a -f "$path/$file"; then + echo "$path/$file" + return + fi + done + IFS=$old_IFS +} + +#-------------------------------------------------------------------------- +# removethis() removes all files and subdirectories with the given name, +# inside and below the current subdirectory at invocation time. +# +removethis(){ + if test "$#" = "1"; then + find . -depth -name $1 -print > buildconf.tmp.$$ + while read fdname + do + if test -f "$fdname"; then + rm -f "$fdname" + elif test -d "$fdname"; then + rm -f -r "$fdname" + fi + done < buildconf.tmp.$$ + rm -f buildconf.tmp.$$ + fi +} + +#-------------------------------------------------------------------------- +# Ensure that buildconf runs from the subdirectory where configure.ac lives +# +if test ! -f configure.ac || + test ! -f src/tool_main.c || + test ! -f lib/urldata.h || + test ! -f include/curl/curl.h || + test ! -f m4/curl-functions.m4; then + echo "Can not run buildconf from outside of curl's source subdirectory!" + echo "Change to the subdirectory where buildconf is found, and try again." + exit 1 +fi + +#-------------------------------------------------------------------------- +# autoconf 2.57 or newer. Unpatched version 2.67 does not generate proper +# configure script. Unpatched version 2.68 is simply unusable, we should +# disallow 2.68 usage. +# +need_autoconf="2.57" +ac_version=`${AUTOCONF:-autoconf} --version 2>/dev/null|head -n 1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'` +if test -z "$ac_version"; then + echo "buildconf: autoconf not found." + echo " You need autoconf version $need_autoconf or newer installed." + exit 1 +fi +old_IFS=$IFS; IFS='.'; set $ac_version; IFS=$old_IFS +if test "$1" = "2" -a "$2" -lt "57" || test "$1" -lt "2"; then + echo "buildconf: autoconf version $ac_version found." + echo " You need autoconf version $need_autoconf or newer installed." + echo " If you have a sufficient autoconf installed, but it" + echo " is not named 'autoconf', then try setting the" + echo " AUTOCONF environment variable." + exit 1 +fi + +if test "$1" = "2" -a "$2" -eq "67"; then + echo "buildconf: autoconf version $ac_version (BAD)" + echo " Unpatched version generates broken configure script." +elif test "$1" = "2" -a "$2" -eq "68"; then + echo "buildconf: autoconf version $ac_version (BAD)" + echo " Unpatched version generates unusable configure script." +else + echo "buildconf: autoconf version $ac_version (ok)" +fi + +am4te_version=`${AUTOM4TE:-autom4te} --version 2>/dev/null|head -n 1| sed -e 's/autom4te\(.*\)/\1/' -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'` +if test -z "$am4te_version"; then + echo "buildconf: autom4te not found. Weird autoconf installation!" + exit 1 +fi +if test "$am4te_version" = "$ac_version"; then + echo "buildconf: autom4te version $am4te_version (ok)" +else + echo "buildconf: autom4te version $am4te_version (ERROR: does not match autoconf version)" + exit 1 +fi + +#-------------------------------------------------------------------------- +# autoheader 2.50 or newer +# +ah_version=`${AUTOHEADER:-autoheader} --version 2>/dev/null|head -n 1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'` +if test -z "$ah_version"; then + echo "buildconf: autoheader not found." + echo " You need autoheader version 2.50 or newer installed." + exit 1 +fi +old_IFS=$IFS; IFS='.'; set $ah_version; IFS=$old_IFS +if test "$1" = "2" -a "$2" -lt "50" || test "$1" -lt "2"; then + echo "buildconf: autoheader version $ah_version found." + echo " You need autoheader version 2.50 or newer installed." + echo " If you have a sufficient autoheader installed, but it" + echo " is not named 'autoheader', then try setting the" + echo " AUTOHEADER environment variable." + exit 1 +fi + +echo "buildconf: autoheader version $ah_version (ok)" + +#-------------------------------------------------------------------------- +# automake 1.7 or newer +# +need_automake="1.7" +am_version=`${AUTOMAKE:-automake} --version 2>/dev/null|head -n 1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//' -e 's/\(.*\)\(-p.*\)/\1/'` +if test -z "$am_version"; then + echo "buildconf: automake not found." + echo " You need automake version $need_automake or newer installed." + exit 1 +fi +old_IFS=$IFS; IFS='.'; set $am_version; IFS=$old_IFS +if test "$1" = "1" -a "$2" -lt "7" || test "$1" -lt "1"; then + echo "buildconf: automake version $am_version found." + echo " You need automake version $need_automake or newer installed." + echo " If you have a sufficient automake installed, but it" + echo " is not named 'automake', then try setting the" + echo " AUTOMAKE environment variable." + exit 1 +fi + +echo "buildconf: automake version $am_version (ok)" + +acloc_version=`${ACLOCAL:-aclocal} --version 2>/dev/null|head -n 1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//' -e 's/\(.*\)\(-p.*\)/\1/'` +if test -z "$acloc_version"; then + echo "buildconf: aclocal not found. Weird automake installation!" + exit 1 +fi +if test "$acloc_version" = "$am_version"; then + echo "buildconf: aclocal version $acloc_version (ok)" +else + echo "buildconf: aclocal version $acloc_version (ERROR: does not match automake version)" + exit 1 +fi + +#-------------------------------------------------------------------------- +# GNU libtoolize preliminary check +# +want_lt_major=1 +want_lt_minor=4 +want_lt_patch=2 +want_lt_version=1.4.2 + +# This approach that tries 'glibtoolize' first is intended for systems that +# have GNU libtool named as 'glibtoolize' and libtoolize not being GNU's. + +libtoolize=`findtool glibtoolize 2>/dev/null` +if test ! -x "$libtoolize"; then + libtoolize=`findtool ${LIBTOOLIZE:-libtoolize}` +fi +if test -z "$libtoolize"; then + echo "buildconf: libtoolize not found." + echo " You need GNU libtoolize $want_lt_version or newer installed." + exit 1 +fi + +lt_pver=`$libtoolize --version 2>/dev/null|head -n 1` +lt_qver=`echo $lt_pver|sed -e "s/([^)]*)//g" -e "s/^[^0-9]*//g"` +lt_version=`echo $lt_qver|sed -e "s/[- ].*//" -e "s/\([a-z]*\)$//"` +if test -z "$lt_version"; then + echo "buildconf: libtoolize not found." + echo " You need GNU libtoolize $want_lt_version or newer installed." + exit 1 +fi +old_IFS=$IFS; IFS='.'; set $lt_version; IFS=$old_IFS +lt_major=$1 +lt_minor=$2 +lt_patch=$3 + +if test -z "$lt_major"; then + lt_status="bad" +elif test "$lt_major" -gt "$want_lt_major"; then + lt_status="good" +elif test "$lt_major" -lt "$want_lt_major"; then + lt_status="bad" +elif test -z "$lt_minor"; then + lt_status="bad" +elif test "$lt_minor" -gt "$want_lt_minor"; then + lt_status="good" +elif test "$lt_minor" -lt "$want_lt_minor"; then + lt_status="bad" +elif test -z "$lt_patch"; then + lt_status="bad" +elif test "$lt_patch" -gt "$want_lt_patch"; then + lt_status="good" +elif test "$lt_patch" -lt "$want_lt_patch"; then + lt_status="bad" +else + lt_status="good" +fi +if test "$lt_status" != "good"; then + echo "buildconf: libtoolize version $lt_version found." + echo " You need GNU libtoolize $want_lt_version or newer installed." + exit 1 +fi + +echo "buildconf: libtoolize version $lt_version (ok)" + +#-------------------------------------------------------------------------- +# m4 check +# +m4=`(${M4:-m4} --version 0<&- || ${M4:-gm4} --version) 2>/dev/null 0<&- | head -n 1`; +m4_version=`echo $m4 | sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'` + +if { echo $m4 | grep "GNU" >/dev/null 2>&1; } then + echo "buildconf: GNU m4 version $m4_version (ok)" +else + if test -z "$m4"; then + echo "buildconf: m4 version not recognized. You need a GNU m4 installed!" + else + echo "buildconf: m4 version $m4 found. You need a GNU m4 installed!" + fi + exit 1 +fi + +#-------------------------------------------------------------------------- +# perl check +# +PERL=`findtool ${PERL:-perl}` +if test -z "$PERL"; then + echo "buildconf: perl not found" + exit 1 +fi + +#-------------------------------------------------------------------------- +# Remove files generated on previous buildconf/configure run. +# +for fname in .deps \ + .libs \ + *.la \ + *.lo \ + *.a \ + *.o \ + Makefile \ + Makefile.in \ + aclocal.m4 \ + aclocal.m4.bak \ + ares_build.h \ + ares_config.h \ + ares_config.h.in \ + autom4te.cache \ + compile \ + config.guess \ + curl_config.h \ + curl_config.h.in \ + config.log \ + config.lt \ + config.status \ + config.sub \ + configure \ + configurehelp.pm \ + curl-config \ + curlbuild.h \ + depcomp \ + libcares.pc \ + libcurl.pc \ + libtool \ + libtool.m4 \ + libtool.m4.tmp \ + ltmain.sh \ + ltoptions.m4 \ + ltsugar.m4 \ + ltversion.m4 \ + lt~obsolete.m4 \ + missing \ + install-sh \ + stamp-h1 \ + stamp-h2 \ + stamp-h3 ; do + removethis "$fname" +done + +#-------------------------------------------------------------------------- +# run the correct scripts now +# + +echo "buildconf: running libtoolize" +${libtoolize} --copy --force || die "libtoolize command failed" + +# When using libtool 1.5.X (X < 26) we copy libtool.m4 to our local m4 +# subdirectory and this local copy is patched to fix some warnings that +# are triggered when running aclocal and using autoconf 2.62 or later. + +if test "$lt_major" = "1" && test "$lt_minor" = "5"; then + if test -z "$lt_patch" || test "$lt_patch" -lt "26"; then + echo "buildconf: copying libtool.m4 to local m4 subdir" + ac_dir=`${ACLOCAL:-aclocal} --print-ac-dir` + if test -f $ac_dir/libtool.m4; then + cp -f $ac_dir/libtool.m4 m4/libtool.m4 + else + echo "buildconf: $ac_dir/libtool.m4 not found" + fi + if test -f m4/libtool.m4; then + echo "buildconf: renaming some variables in local m4/libtool.m4" + $PERL -i.tmp -pe \ + 's/lt_prog_compiler_pic_works/lt_cv_prog_compiler_pic_works/g; \ + s/lt_prog_compiler_static_works/lt_cv_prog_compiler_static_works/g;' \ + m4/libtool.m4 + rm -f m4/libtool.m4.tmp + fi + fi +fi + +if test -f m4/libtool.m4; then + echo "buildconf: converting all mv to mv -f in local m4/libtool.m4" + $PERL -i.tmp -pe 's/\bmv +([^-\s])/mv -f $1/g' m4/libtool.m4 + rm -f m4/libtool.m4.tmp +fi + +echo "buildconf: running aclocal" +${ACLOCAL:-aclocal} -I m4 $ACLOCAL_FLAGS || die "aclocal command failed" + +echo "buildconf: converting all mv to mv -f in local aclocal.m4" +$PERL -i.bak -pe 's/\bmv +([^-\s])/mv -f $1/g' aclocal.m4 + +echo "buildconf: running autoheader" +${AUTOHEADER:-autoheader} || die "autoheader command failed" + +echo "buildconf: running autoconf" +${AUTOCONF:-autoconf} || die "autoconf command failed" + +if test -d ares; then + cd ares + echo "buildconf: running in ares" + ./buildconf + cd .. +fi + +echo "buildconf: running automake" +${AUTOMAKE:-automake} --add-missing --copy || die "automake command failed" + +#-------------------------------------------------------------------------- +# GNU libtool complementary check +# +# Depending on the libtool and automake versions being used, config.guess +# might not be installed in the subdirectory until automake has finished. +# So we can not attempt to use it until this very last buildconf stage. +# +if test ! -f ./config.guess; then + echo "buildconf: config.guess not found" +else + buildhost=`./config.guess 2>/dev/null|head -n 1` + case $buildhost in + *-*-darwin*) + need_lt_major=1 + need_lt_minor=5 + need_lt_patch=26 + need_lt_check="yes" + ;; + *-*-hpux*) + need_lt_major=1 + need_lt_minor=5 + need_lt_patch=24 + need_lt_check="yes" + ;; + esac + if test ! -z "$need_lt_check"; then + if test -z "$lt_major"; then + lt_status="bad" + elif test "$lt_major" -gt "$need_lt_major"; then + lt_status="good" + elif test "$lt_major" -lt "$need_lt_major"; then + lt_status="bad" + elif test -z "$lt_minor"; then + lt_status="bad" + elif test "$lt_minor" -gt "$need_lt_minor"; then + lt_status="good" + elif test "$lt_minor" -lt "$need_lt_minor"; then + lt_status="bad" + elif test -z "$lt_patch"; then + lt_status="bad" + elif test "$lt_patch" -gt "$need_lt_patch"; then + lt_status="good" + elif test "$lt_patch" -lt "$need_lt_patch"; then + lt_status="bad" + else + lt_status="good" + fi + if test "$lt_status" != "good"; then + need_lt_version="$need_lt_major.$need_lt_minor.$need_lt_patch" + echo "buildconf: libtool version $lt_version found." + echo " $buildhost requires GNU libtool $need_lt_version or newer installed." + rm -f configure + exit 1 + fi + fi +fi + +#-------------------------------------------------------------------------- +# Finished successfully. +# +echo "buildconf: OK" +exit 0 diff --git a/deps-win32/curl-7.54.1/buildconf.bat b/deps-win32/curl-7.54.1/buildconf.bat new file mode 100644 index 0000000..ad3fba6 --- /dev/null +++ b/deps-win32/curl-7.54.1/buildconf.bat @@ -0,0 +1,350 @@ +@echo off +rem *************************************************************************** +rem * _ _ ____ _ +rem * Project ___| | | | _ \| | +rem * / __| | | | |_) | | +rem * | (__| |_| | _ <| |___ +rem * \___|\___/|_| \_\_____| +rem * +rem * Copyright (C) 1998 - 2016, Daniel Stenberg, , et al. +rem * +rem * This software is licensed as described in the file COPYING, which +rem * you should have received as part of this distribution. The terms +rem * are also available at https://curl.haxx.se/docs/copyright.html. +rem * +rem * You may opt to use, copy, modify, merge, publish, distribute and/or sell +rem * copies of the Software, and permit persons to whom the Software is +rem * furnished to do so, under the terms of the COPYING file. +rem * +rem * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +rem * KIND, either express or implied. +rem * +rem *************************************************************************** + +rem NOTES +rem +rem This batch file must be used to set up a git tree to build on systems where +rem there is no autotools support (i.e. DOS and Windows). +rem + +:begin + rem Set our variables + if "%OS%" == "Windows_NT" setlocal + set MODE=GENERATE + + rem Switch to this batch file's directory + cd /d "%~0\.." 1>NUL 2>&1 + + rem Check we are running from a curl git repository + if not exist GIT-INFO goto norepo + + rem Detect programs. HAVE_ + rem When not found the variable is set undefined. The undefined pattern + rem allows for statements like "if not defined HAVE_PERL (command)" + groff --version NUL 2>&1 + if errorlevel 1 (set HAVE_GROFF=) else (set HAVE_GROFF=Y) + nroff --version NUL 2>&1 + if errorlevel 1 (set HAVE_NROFF=) else (set HAVE_NROFF=Y) + perl --version NUL 2>&1 + if errorlevel 1 (set HAVE_PERL=) else (set HAVE_PERL=Y) + gzip --version NUL 2>&1 + if errorlevel 1 (set HAVE_GZIP=) else (set HAVE_GZIP=Y) + +:parseArgs + if "%~1" == "" goto start + + if /i "%~1" == "-clean" ( + set MODE=CLEAN + ) else if /i "%~1" == "-?" ( + goto syntax + ) else if /i "%~1" == "-h" ( + goto syntax + ) else if /i "%~1" == "-help" ( + goto syntax + ) else ( + goto unknown + ) + + shift & goto parseArgs + +:start + if "%MODE%" == "GENERATE" ( + echo. + echo Generating prerequisite files + + call :generate + if errorlevel 4 goto nogencurlbuild + if errorlevel 3 goto nogenhugehelp + if errorlevel 2 goto nogenmakefile + if errorlevel 1 goto warning + + ) else ( + echo. + echo Removing prerequisite files + + call :clean + if errorlevel 3 goto nocleancurlbuild + if errorlevel 2 goto nocleanhugehelp + if errorlevel 1 goto nocleanmakefile + ) + + goto success + +rem Main generate function. +rem +rem Returns: +rem +rem 0 - success +rem 1 - success with simplified tool_hugehelp.c +rem 2 - failed to generate Makefile +rem 3 - failed to generate tool_hugehelp.c +rem 4 - failed to generate curlbuild.h +rem +:generate + if "%OS%" == "Windows_NT" setlocal + set BASIC_HUGEHELP=0 + + rem Create Makefile + echo * %CD%\Makefile + if exist Makefile.dist ( + copy /Y Makefile.dist Makefile 1>NUL 2>&1 + if errorlevel 1 ( + if "%OS%" == "Windows_NT" endlocal + exit /B 2 + ) + ) + + rem Create tool_hugehelp.c + echo * %CD%\src\tool_hugehelp.c + call :genHugeHelp + if errorlevel 2 ( + if "%OS%" == "Windows_NT" endlocal + exit /B 3 + ) + if errorlevel 1 ( + set BASIC_HUGEHELP=1 + ) + cmd /c exit 0 + + rem Create curlbuild.h + echo * %CD%\include\curl\curlbuild.h + if exist include\curl\curlbuild.h.dist ( + copy /Y include\curl\curlbuild.h.dist include\curl\curlbuild.h 1>NUL 2>&1 + if errorlevel 1 ( + if "%OS%" == "Windows_NT" endlocal + exit /B 4 + ) + ) + + rem Setup c-ares git tree + if exist ares\buildconf.bat ( + echo. + echo Configuring c-ares build environment + cd ares + call buildconf.bat + cd .. + ) + + if "%BASIC_HUGEHELP%" == "1" ( + if "%OS%" == "Windows_NT" endlocal + exit /B 1 + ) + + if "%OS%" == "Windows_NT" endlocal + exit /B 0 + +rem Main clean function. +rem +rem Returns: +rem +rem 0 - success +rem 1 - failed to clean Makefile +rem 2 - failed to clean tool_hugehelp.c +rem 3 - failed to clean curlbuild.h +rem +:clean + rem Remove Makefile + echo * %CD%\Makefile + if exist Makefile ( + del Makefile 2>NUL + if exist Makefile ( + exit /B 1 + ) + ) + + rem Remove tool_hugehelp.c + echo * %CD%\src\tool_hugehelp.c + if exist src\tool_hugehelp.c ( + del src\tool_hugehelp.c 2>NUL + if exist src\tool_hugehelp.c ( + exit /B 2 + ) + ) + + rem Remove curlbuild.h + echo * %CD%\include\curl\curlbuild.h + if exist include\curl\curlbuild.h ( + del include\curl\curlbuild.h 2>NUL + if exist include\curl\curlbuild.h ( + exit /B 3 + ) + ) + + exit /B + +rem Function to generate src\tool_hugehelp.c +rem +rem Returns: +rem +rem 0 - full tool_hugehelp.c generated +rem 1 - simplified tool_hugehelp.c +rem 2 - failure +rem +:genHugeHelp + if "%OS%" == "Windows_NT" setlocal + set LC_ALL=C + set ROFFCMD= + set BASIC=1 + + if defined HAVE_PERL ( + if defined HAVE_GROFF ( + set ROFFCMD=groff -mtty-char -Tascii -P-c -man + ) else if defined HAVE_NROFF ( + set ROFFCMD=nroff -c -Tascii -man + ) + ) + + if defined ROFFCMD ( + echo #include "tool_setup.h"> src\tool_hugehelp.c + echo #include "tool_hugehelp.h">> src\tool_hugehelp.c + + if defined HAVE_GZIP ( + echo #ifndef HAVE_LIBZ>> src\tool_hugehelp.c + ) + + %ROFFCMD% docs\curl.1 2>NUL | perl src\mkhelp.pl docs\MANUAL >> src\tool_hugehelp.c + if defined HAVE_GZIP ( + echo #else>> src\tool_hugehelp.c + %ROFFCMD% docs\curl.1 2>NUL | perl src\mkhelp.pl -c docs\MANUAL >> src\tool_hugehelp.c + echo #endif /^* HAVE_LIBZ ^*/>> src\tool_hugehelp.c + ) + + set BASIC=0 + ) else ( + if exist src\tool_hugehelp.c.cvs ( + copy /Y src\tool_hugehelp.c.cvs src\tool_hugehelp.c 1>NUL 2>&1 + ) else ( + echo #include "tool_setup.h"> src\tool_hugehelp.c + echo #include "tool_hugehelp.hd">> src\tool_hugehelp.c + echo.>> src\tool_hugehelp.c + echo void hugehelp(void^)>> src\tool_hugehelp.c + echo {>> src\tool_hugehelp.c + echo #ifdef USE_MANUAL>> src\tool_hugehelp.c + echo fputs("Built-in manual not included\n", stdout^);>> src\tool_hugehelp.c + echo #endif>> src\tool_hugehelp.c + echo }>> src\tool_hugehelp.c + ) + ) + + findstr "/C:void hugehelp(void)" src\tool_hugehelp.c 1>NUL 2>&1 + if errorlevel 1 ( + if "%OS%" == "Windows_NT" endlocal + exit /B 2 + ) + + if "%BASIC%" == "1" ( + if "%OS%" == "Windows_NT" endlocal + exit /B 1 + ) + + if "%OS%" == "Windows_NT" endlocal + exit /B 0 + +rem Function to clean-up local variables under DOS, Windows 3.x and +rem Windows 9x as setlocal isn't available until Windows NT +rem +:dosCleanup + set MODE= + set HAVE_GROFF= + set HAVE_NROFF= + set HAVE_PERL= + set HAVE_GZIP= + set BASIC_HUGEHELP= + set LC_ALL + set ROFFCMD= + set BASIC= + + exit /B + +:syntax + rem Display the help + echo. + echo Usage: buildconf [-clean] + echo. + echo -clean - Removes the files + goto error + +:unknown + echo. + echo Error: Unknown argument '%1' + goto error + +:norepo + echo. + echo Error: This batch file should only be used with a curl git repository + goto error + +:nogenmakefile + echo. + echo Error: Unable to generate Makefile + goto error + +:nogenhugehelp + echo. + echo Error: Unable to generate src\tool_hugehelp.c + goto error + +:nogencurlbuild + echo. + echo Error: Unable to generate include\curl\curlbuild.h + goto error + +:nocleanmakefile + echo. + echo Error: Unable to clean Makefile + goto error + +:nocleanhugehelp + echo. + echo Error: Unable to clean src\tool_hugehelp.c + goto error + +:nocleancurlbuild + echo. + echo Error: Unable to clean include\curl\curlbuild.h + goto error + +:warning + echo. + echo Warning: The curl manual could not be integrated in the source. This means when + echo you build curl the manual will not be available (curl --man^). Integration of + echo the manual is not required and a summary of the options will still be available + echo (curl --help^). To integrate the manual your PATH is required to have + echo groff/nroff, perl and optionally gzip for compression. + goto success + +:error + if "%OS%" == "Windows_NT" ( + endlocal + ) else ( + call :dosCleanup + ) + exit /B 1 + +:success + if "%OS%" == "Windows_NT" ( + endlocal + ) else ( + call :dosCleanup + ) + exit /B 0 diff --git a/deps-win32/curl-7.54.1/configure.ac b/deps-win32/curl-7.54.1/configure.ac new file mode 100644 index 0000000..d275751 --- /dev/null +++ b/deps-win32/curl-7.54.1/configure.ac @@ -0,0 +1,3964 @@ +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 1998 - 2017, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +#*************************************************************************** +dnl Process this file with autoconf to produce a configure script. + +AC_PREREQ(2.57) + +dnl We don't know the version number "statically" so we use a dash here +AC_INIT([curl], [-], [a suitable curl mailing list: https://curl.haxx.se/mail/]) + +XC_OVR_ZZ50 +XC_OVR_ZZ60 +CURL_OVERRIDE_AUTOCONF + +dnl configure script copyright +AC_COPYRIGHT([Copyright (c) 1998 - 2017 Daniel Stenberg, +This configure script may be copied, distributed and modified under the +terms of the curl license; see COPYING for more details]) + +AC_CONFIG_SRCDIR([lib/urldata.h]) +AC_CONFIG_HEADERS(lib/curl_config.h include/curl/curlbuild.h) +AC_CONFIG_MACRO_DIR([m4]) +AM_MAINTAINER_MODE +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) + +CURL_CHECK_OPTION_DEBUG +CURL_CHECK_OPTION_OPTIMIZE +CURL_CHECK_OPTION_WARNINGS +CURL_CHECK_OPTION_WERROR +CURL_CHECK_OPTION_CURLDEBUG +CURL_CHECK_OPTION_SYMBOL_HIDING +CURL_CHECK_OPTION_ARES +CURL_CHECK_OPTION_RT + +XC_CHECK_PATH_SEPARATOR +AX_CODE_COVERAGE + +# +# save the configure arguments +# +CONFIGURE_OPTIONS="\"$ac_configure_args\"" +AC_SUBST(CONFIGURE_OPTIONS) + +CURL_CFLAG_EXTRAS="" +if test X"$want_werror" = Xyes; then + CURL_CFLAG_EXTRAS="-Werror" +fi +AC_SUBST(CURL_CFLAG_EXTRAS) + +dnl SED is mandatory for configure process and libtool. +dnl Set it now, allowing it to be changed later. +if test -z "$SED"; then + dnl allow it to be overridden + AC_PATH_PROG([SED], [sed], [not_found], + [$PATH:/usr/bin:/usr/local/bin]) + if test -z "$SED" || test "$SED" = "not_found"; then + AC_MSG_ERROR([sed not found in PATH. Cannot continue without sed.]) + fi +fi +AC_SUBST([SED]) + +dnl GREP is mandatory for configure process and libtool. +dnl Set it now, allowing it to be changed later. +if test -z "$GREP"; then + dnl allow it to be overridden + AC_PATH_PROG([GREP], [grep], [not_found], + [$PATH:/usr/bin:/usr/local/bin]) + if test -z "$GREP" || test "$GREP" = "not_found"; then + AC_MSG_ERROR([grep not found in PATH. Cannot continue without grep.]) + fi +fi +AC_SUBST([GREP]) + +dnl EGREP is mandatory for configure process and libtool. +dnl Set it now, allowing it to be changed later. +if test -z "$EGREP"; then + dnl allow it to be overridden + if echo a | ($GREP -E '(a|b)') >/dev/null 2>&1; then + AC_MSG_CHECKING([for egrep]) + EGREP="$GREP -E" + AC_MSG_RESULT([$EGREP]) + else + AC_PATH_PROG([EGREP], [egrep], [not_found], + [$PATH:/usr/bin:/usr/local/bin]) + fi +fi +if test -z "$EGREP" || test "$EGREP" = "not_found"; then + AC_MSG_ERROR([egrep not found in PATH. Cannot continue without egrep.]) +fi +AC_SUBST([EGREP]) + +dnl AR is mandatory for configure process and libtool. +dnl This is target dependent, so check it as a tool. +if test -z "$AR"; then + dnl allow it to be overridden + AC_PATH_TOOL([AR], [ar], [not_found], + [$PATH:/usr/bin:/usr/local/bin]) + if test -z "$AR" || test "$AR" = "not_found"; then + AC_MSG_ERROR([ar not found in PATH. Cannot continue without ar.]) + fi +fi +AC_SUBST([AR]) + +AC_SUBST(libext) + +dnl Remove non-configure distributed curlbuild.h +if test -f ${srcdir}/include/curl/curlbuild.h; then + rm -f ${srcdir}/include/curl/curlbuild.h +fi + +dnl figure out the libcurl version +CURLVERSION=`$SED -ne 's/^#define LIBCURL_VERSION "\(.*\)".*/\1/p' ${srcdir}/include/curl/curlver.h` +XC_CHECK_PROG_CC +XC_AUTOMAKE +AC_MSG_CHECKING([curl version]) +AC_MSG_RESULT($CURLVERSION) + +AC_SUBST(CURLVERSION) + +dnl +dnl we extract the numerical version for curl-config only +VERSIONNUM=`$SED -ne 's/^#define LIBCURL_VERSION_NUM 0x\([0-9A-Fa-f]*\).*/\1/p' ${srcdir}/include/curl/curlver.h` +AC_SUBST(VERSIONNUM) + +dnl Solaris pkgadd support definitions +PKGADD_PKG="HAXXcurl" +PKGADD_NAME="curl - a client that groks URLs" +PKGADD_VENDOR="curl.haxx.se" +AC_SUBST(PKGADD_PKG) +AC_SUBST(PKGADD_NAME) +AC_SUBST(PKGADD_VENDOR) + +dnl +dnl initialize all the info variables + curl_ssl_msg="no (--with-{ssl,gnutls,nss,polarssl,mbedtls,cyassl,axtls,winssl,darwinssl} )" + curl_ssh_msg="no (--with-libssh2)" + curl_zlib_msg="no (--with-zlib)" + curl_gss_msg="no (--with-gssapi)" +curl_tls_srp_msg="no (--enable-tls-srp)" + curl_res_msg="default (--enable-ares / --enable-threaded-resolver)" + curl_ipv6_msg="no (--enable-ipv6)" +curl_unix_sockets_msg="no (--enable-unix-sockets)" + curl_idn_msg="no (--with-{libidn2,winidn})" + curl_manual_msg="no (--enable-manual)" +curl_libcurl_msg="enabled (--disable-libcurl-option)" +curl_verbose_msg="enabled (--disable-verbose)" + curl_sspi_msg="no (--enable-sspi)" + curl_ldap_msg="no (--enable-ldap / --with-ldap-lib / --with-lber-lib)" + curl_ldaps_msg="no (--enable-ldaps)" + curl_rtsp_msg="no (--enable-rtsp)" + curl_rtmp_msg="no (--with-librtmp)" + curl_mtlnk_msg="no (--with-libmetalink)" + curl_psl_msg="no (--with-libpsl)" + + init_ssl_msg=${curl_ssl_msg} + +dnl +dnl Save some initial values the user might have provided +dnl +INITIAL_LDFLAGS=$LDFLAGS +INITIAL_LIBS=$LIBS + +dnl +dnl Detect the canonical host and target build environment +dnl + +AC_CANONICAL_HOST +dnl Get system canonical name +AC_DEFINE_UNQUOTED(OS, "${host}", [cpu-machine-OS]) + +dnl Checks for programs. + +dnl Our curl_off_t internal and external configure settings +CURL_CONFIGURE_CURL_OFF_T + +dnl This defines _ALL_SOURCE for AIX +CURL_CHECK_AIX_ALL_SOURCE + +dnl Our configure and build reentrant settings +CURL_CONFIGURE_THREAD_SAFE +CURL_CONFIGURE_REENTRANT + +dnl check for how to do large files +AC_SYS_LARGEFILE + +XC_LIBTOOL + +# +# Automake conditionals based on libtool related checks +# + +AM_CONDITIONAL([CURL_LT_SHLIB_USE_VERSION_INFO], + [test "x$xc_lt_shlib_use_version_info" = 'xyes']) +AM_CONDITIONAL([CURL_LT_SHLIB_USE_NO_UNDEFINED], + [test "x$xc_lt_shlib_use_no_undefined" = 'xyes']) +AM_CONDITIONAL([CURL_LT_SHLIB_USE_MIMPURE_TEXT], + [test "x$xc_lt_shlib_use_mimpure_text" = 'xyes']) + +# +# Due to libtool and automake machinery limitations of not allowing +# specifying separate CPPFLAGS or CFLAGS when compiling objects for +# inclusion of these in shared or static libraries, we are forced to +# build using separate configure runs for shared and static libraries +# on systems where different CPPFLAGS or CFLAGS are mandatory in order +# to compile objects for each kind of library. Notice that relying on +# the '-DPIC' CFLAG that libtool provides is not valid given that the +# user might for example choose to build static libraries with PIC. +# + +# +# Make our Makefile.am files use the staticlib CPPFLAG only when strictly +# targeting a static library and not building its shared counterpart. +# + +AM_CONDITIONAL([USE_CPPFLAG_CURL_STATICLIB], + [test "x$xc_lt_build_static_only" = 'xyes']) + +# +# Make staticlib CPPFLAG variable and its definition visible in output +# files unconditionally, providing an empty definition unless strictly +# targeting a static library and not building its shared counterpart. +# + +CPPFLAG_CURL_STATICLIB= +if test "x$xc_lt_build_static_only" = 'xyes'; then + CPPFLAG_CURL_STATICLIB='-DCURL_STATICLIB' +fi +AC_SUBST([CPPFLAG_CURL_STATICLIB]) + + +# Determine whether all dependent libraries must be specified when linking +if test "X$enable_shared" = "Xyes" -a "X$link_all_deplibs" = "Xno" +then + REQUIRE_LIB_DEPS=no +else + REQUIRE_LIB_DEPS=yes +fi +AC_SUBST(REQUIRE_LIB_DEPS) +AM_CONDITIONAL(USE_EXPLICIT_LIB_DEPS, test x$REQUIRE_LIB_DEPS = xyes) + +dnl check if there's a way to force code inline +AC_C_INLINE + +dnl ********************************************************************** +dnl platform/compiler/architecture specific checks/flags +dnl ********************************************************************** + +CURL_CHECK_COMPILER +CURL_SET_COMPILER_BASIC_OPTS +CURL_SET_COMPILER_DEBUG_OPTS +CURL_SET_COMPILER_OPTIMIZE_OPTS +CURL_SET_COMPILER_WARNING_OPTS + +if test "$compiler_id" = "INTEL_UNIX_C"; then + # + if test "$compiler_num" -ge "1000"; then + dnl icc 10.X or later + CFLAGS="$CFLAGS -shared-intel" + elif test "$compiler_num" -ge "900"; then + dnl icc 9.X specific + CFLAGS="$CFLAGS -i-dynamic" + fi + # +fi + +CURL_CHECK_COMPILER_HALT_ON_ERROR +CURL_CHECK_COMPILER_ARRAY_SIZE_NEGATIVE +CURL_CHECK_COMPILER_PROTOTYPE_MISMATCH +CURL_CHECK_COMPILER_SYMBOL_HIDING + +CURL_CHECK_CURLDEBUG +AM_CONDITIONAL(CURLDEBUG, test x$want_curldebug = xyes) + +supports_unittests=yes +# cross-compilation of unit tests static library/programs fails when +# libcurl shared library is built. This might be due to a libtool or +# automake issue. In this case we disable unit tests. +if test "x$cross_compiling" != "xno" && + test "x$enable_shared" != "xno"; then + supports_unittests=no +fi + +# IRIX 6.5.24 gcc 3.3 autobuilds fail unittests library compilation due to +# a problem related with OpenSSL headers and library versions not matching. +# Disable unit tests while time to further investigate this is found. +case $host in + mips-sgi-irix6.5) + if test "$compiler_id" = "GNU_C"; then + supports_unittests=no + fi + ;; +esac + +# All AIX autobuilds fails unit tests linking against unittests library +# due to unittests library being built with no symbols or members. Libtool ? +# Disable unit tests while time to further investigate this is found. +case $host_os in + aix*) + supports_unittests=no + ;; +esac + +dnl Build unit tests when option --enable-debug is given. +if test "x$want_debug" = "xyes" && + test "x$supports_unittests" = "xyes"; then + want_unittests=yes +else + want_unittests=no +fi +AM_CONDITIONAL(BUILD_UNITTESTS, test x$want_unittests = xyes) + +dnl ********************************************************************** +dnl Compilation based checks should not be done before this point. +dnl ********************************************************************** + +dnl ********************************************************************** +dnl Make sure that our checks for headers windows.h winsock.h winsock2.h +dnl and ws2tcpip.h take precedence over any other further checks which +dnl could be done later using AC_CHECK_HEADER or AC_CHECK_HEADERS for +dnl this specific header files. And do them before its results are used. +dnl ********************************************************************** + +CURL_CHECK_HEADER_WINDOWS +CURL_CHECK_NATIVE_WINDOWS +case X-"$curl_cv_native_windows" in + X-yes) + CURL_CHECK_HEADER_WINSOCK + CURL_CHECK_HEADER_WINSOCK2 + CURL_CHECK_HEADER_WS2TCPIP + CURL_CHECK_HEADER_WINLDAP + CURL_CHECK_HEADER_WINBER + ;; + *) + curl_cv_header_winsock_h="no" + curl_cv_header_winsock2_h="no" + curl_cv_header_ws2tcpip_h="no" + curl_cv_header_winldap_h="no" + curl_cv_header_winber_h="no" + ;; +esac +CURL_CHECK_WIN32_LARGEFILE + +CURL_MAC_CFLAGS + +dnl ************************************************************ +dnl switch off particular protocols +dnl +AC_MSG_CHECKING([whether to support http]) +AC_ARG_ENABLE(http, +AC_HELP_STRING([--enable-http],[Enable HTTP support]) +AC_HELP_STRING([--disable-http],[Disable HTTP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_HTTP, 1, [to disable HTTP]) + AC_MSG_WARN([disable HTTP disables FTP over proxy and RTSP]) + AC_SUBST(CURL_DISABLE_HTTP, [1]) + AC_DEFINE(CURL_DISABLE_RTSP, 1, [to disable RTSP]) + AC_SUBST(CURL_DISABLE_RTSP, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) +AC_MSG_CHECKING([whether to support ftp]) +AC_ARG_ENABLE(ftp, +AC_HELP_STRING([--enable-ftp],[Enable FTP support]) +AC_HELP_STRING([--disable-ftp],[Disable FTP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_FTP, 1, [to disable FTP]) + AC_SUBST(CURL_DISABLE_FTP, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) +AC_MSG_CHECKING([whether to support file]) +AC_ARG_ENABLE(file, +AC_HELP_STRING([--enable-file],[Enable FILE support]) +AC_HELP_STRING([--disable-file],[Disable FILE support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_FILE, 1, [to disable FILE]) + AC_SUBST(CURL_DISABLE_FILE, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) +AC_MSG_CHECKING([whether to support ldap]) +AC_ARG_ENABLE(ldap, +AC_HELP_STRING([--enable-ldap],[Enable LDAP support]) +AC_HELP_STRING([--disable-ldap],[Disable LDAP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_LDAP, 1, [to disable LDAP]) + AC_SUBST(CURL_DISABLE_LDAP, [1]) + ;; + *) + AC_MSG_RESULT(yes) + ;; + esac ],[ + AC_MSG_RESULT(yes) ] +) +AC_MSG_CHECKING([whether to support ldaps]) +AC_ARG_ENABLE(ldaps, +AC_HELP_STRING([--enable-ldaps],[Enable LDAPS support]) +AC_HELP_STRING([--disable-ldaps],[Disable LDAPS support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_LDAPS, 1, [to disable LDAPS]) + AC_SUBST(CURL_DISABLE_LDAPS, [1]) + ;; + *) if test "x$CURL_DISABLE_LDAP" = "x1" ; then + AC_MSG_RESULT(LDAP needs to be enabled to support LDAPS) + AC_DEFINE(CURL_DISABLE_LDAPS, 1, [to disable LDAPS]) + AC_SUBST(CURL_DISABLE_LDAPS, [1]) + else + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_LDAP_SSL, 1, [Use LDAPS implementation]) + AC_SUBST(HAVE_LDAP_SSL, [1]) + fi + ;; + esac ],[ + if test "x$CURL_DISABLE_LDAP" = "x1" ; then + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_LDAPS, 1, [to disable LDAPS]) + AC_SUBST(CURL_DISABLE_LDAPS, [1]) + else + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_LDAP_SSL, 1, [Use LDAPS implementation]) + AC_SUBST(HAVE_LDAP_SSL, [1]) + fi ] +) + +AC_MSG_CHECKING([whether to support rtsp]) +AC_ARG_ENABLE(rtsp, +AC_HELP_STRING([--enable-rtsp],[Enable RTSP support]) +AC_HELP_STRING([--disable-rtsp],[Disable RTSP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_RTSP, 1, [to disable RTSP]) + AC_SUBST(CURL_DISABLE_RTSP, [1]) + ;; + *) if test x$CURL_DISABLE_HTTP = x1 ; then + AC_MSG_ERROR(HTTP support needs to be enabled in order to enable RTSP support!) + else + AC_MSG_RESULT(yes) + curl_rtsp_msg="enabled" + fi + ;; + esac ], + if test "x$CURL_DISABLE_HTTP" != "x1"; then + AC_MSG_RESULT(yes) + curl_rtsp_msg="enabled" + else + AC_MSG_RESULT(no) + fi +) + +AC_MSG_CHECKING([whether to support proxies]) +AC_ARG_ENABLE(proxy, +AC_HELP_STRING([--enable-proxy],[Enable proxy support]) +AC_HELP_STRING([--disable-proxy],[Disable proxy support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_PROXY, 1, [to disable proxies]) + AC_SUBST(CURL_DISABLE_PROXY, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +AC_MSG_CHECKING([whether to support dict]) +AC_ARG_ENABLE(dict, +AC_HELP_STRING([--enable-dict],[Enable DICT support]) +AC_HELP_STRING([--disable-dict],[Disable DICT support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_DICT, 1, [to disable DICT]) + AC_SUBST(CURL_DISABLE_DICT, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) +AC_MSG_CHECKING([whether to support telnet]) +AC_ARG_ENABLE(telnet, +AC_HELP_STRING([--enable-telnet],[Enable TELNET support]) +AC_HELP_STRING([--disable-telnet],[Disable TELNET support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_TELNET, 1, [to disable TELNET]) + AC_SUBST(CURL_DISABLE_TELNET, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) +AC_MSG_CHECKING([whether to support tftp]) +AC_ARG_ENABLE(tftp, +AC_HELP_STRING([--enable-tftp],[Enable TFTP support]) +AC_HELP_STRING([--disable-tftp],[Disable TFTP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_TFTP, 1, [to disable TFTP]) + AC_SUBST(CURL_DISABLE_TFTP, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +AC_MSG_CHECKING([whether to support pop3]) +AC_ARG_ENABLE(pop3, +AC_HELP_STRING([--enable-pop3],[Enable POP3 support]) +AC_HELP_STRING([--disable-pop3],[Disable POP3 support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_POP3, 1, [to disable POP3]) + AC_SUBST(CURL_DISABLE_POP3, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + + +AC_MSG_CHECKING([whether to support imap]) +AC_ARG_ENABLE(imap, +AC_HELP_STRING([--enable-imap],[Enable IMAP support]) +AC_HELP_STRING([--disable-imap],[Disable IMAP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_IMAP, 1, [to disable IMAP]) + AC_SUBST(CURL_DISABLE_IMAP, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + + +AC_MSG_CHECKING([whether to support smb]) +AC_ARG_ENABLE(smb, +AC_HELP_STRING([--enable-smb],[Enable SMB/CIFS support]) +AC_HELP_STRING([--disable-smb],[Disable SMB/CIFS support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_SMB, 1, [to disable SMB/CIFS]) + AC_SUBST(CURL_DISABLE_SMB, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +AC_MSG_CHECKING([whether to support smtp]) +AC_ARG_ENABLE(smtp, +AC_HELP_STRING([--enable-smtp],[Enable SMTP support]) +AC_HELP_STRING([--disable-smtp],[Disable SMTP support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_SMTP, 1, [to disable SMTP]) + AC_SUBST(CURL_DISABLE_SMTP, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +AC_MSG_CHECKING([whether to support gopher]) +AC_ARG_ENABLE(gopher, +AC_HELP_STRING([--enable-gopher],[Enable Gopher support]) +AC_HELP_STRING([--disable-gopher],[Disable Gopher support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_GOPHER, 1, [to disable Gopher]) + AC_SUBST(CURL_DISABLE_GOPHER, [1]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + + +dnl ********************************************************************** +dnl Check for built-in manual +dnl ********************************************************************** + +AC_MSG_CHECKING([whether to provide built-in manual]) +AC_ARG_ENABLE(manual, +AC_HELP_STRING([--enable-manual],[Enable built-in manual]) +AC_HELP_STRING([--disable-manual],[Disable built-in manual]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + ;; + *) AC_MSG_RESULT(yes) + USE_MANUAL="1" + ;; + esac ], + AC_MSG_RESULT(yes) + USE_MANUAL="1" +) +dnl The actual use of the USE_MANUAL variable is done much later in this +dnl script to allow other actions to disable it as well. + +dnl ************************************************************ +dnl disable C code generation support +dnl +AC_MSG_CHECKING([whether to enable generation of C code]) +AC_ARG_ENABLE(libcurl_option, +AC_HELP_STRING([--enable-libcurl-option],[Enable --libcurl C code generation support]) +AC_HELP_STRING([--disable-libcurl-option],[Disable --libcurl C code generation support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_LIBCURL_OPTION, 1, [to disable --libcurl C code generation option]) + curl_libcurl_msg="no" + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +dnl ********************************************************************** +dnl Checks for libraries. +dnl ********************************************************************** + +AC_MSG_CHECKING([whether to use libgcc]) +AC_ARG_ENABLE(libgcc, +AC_HELP_STRING([--enable-libgcc],[use libgcc when linking]), +[ case "$enableval" in + yes) + LIBS="-lgcc $LIBS" + AC_MSG_RESULT(yes) + ;; + *) AC_MSG_RESULT(no) + ;; + esac ], + AC_MSG_RESULT(no) +) + +CURL_CHECK_LIB_XNET + +dnl gethostbyname without lib or in the nsl lib? +AC_CHECK_FUNC(gethostbyname, + [HAVE_GETHOSTBYNAME="1" + ], + [ AC_CHECK_LIB(nsl, gethostbyname, + [HAVE_GETHOSTBYNAME="1" + LIBS="-lnsl $LIBS" + ]) + ]) + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl gethostbyname in the socket lib? + AC_CHECK_LIB(socket, gethostbyname, + [HAVE_GETHOSTBYNAME="1" + LIBS="-lsocket $LIBS" + ]) +fi + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl gethostbyname in the watt lib? + AC_CHECK_LIB(watt, gethostbyname, + [HAVE_GETHOSTBYNAME="1" + CPPFLAGS="-I/dev/env/WATT_ROOT/inc" + LDFLAGS="-L/dev/env/WATT_ROOT/lib" + LIBS="-lwatt $LIBS" + ]) +fi + +dnl At least one system has been identified to require BOTH nsl and socket +dnl libs at the same time to link properly. +if test "$HAVE_GETHOSTBYNAME" != "1" +then + AC_MSG_CHECKING([for gethostbyname with both nsl and socket libs]) + my_ac_save_LIBS=$LIBS + LIBS="-lnsl -lsocket $LIBS" + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ + gethostbyname(); + ]]) + ],[ + AC_MSG_RESULT([yes]) + HAVE_GETHOSTBYNAME="1" + ],[ + AC_MSG_RESULT([no]) + LIBS=$my_ac_save_LIBS + ]) +fi + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl This is for winsock systems + if test "$curl_cv_header_windows_h" = "yes"; then + if test "$curl_cv_header_winsock_h" = "yes"; then + case $host in + *-*-mingw32ce*) + winsock_LIB="-lwinsock" + ;; + *) + winsock_LIB="-lwsock32" + ;; + esac + fi + if test "$curl_cv_header_winsock2_h" = "yes"; then + winsock_LIB="-lws2_32" + fi + if test ! -z "$winsock_LIB"; then + my_ac_save_LIBS=$LIBS + LIBS="$winsock_LIB $LIBS" + AC_MSG_CHECKING([for gethostbyname in $winsock_LIB]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#ifdef HAVE_WINDOWS_H +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#ifdef HAVE_WINSOCK_H +#include +#endif +#endif +#endif + ]],[[ + gethostbyname("www.dummysite.com"); + ]]) + ],[ + AC_MSG_RESULT([yes]) + HAVE_GETHOSTBYNAME="1" + ],[ + AC_MSG_RESULT([no]) + winsock_LIB="" + LIBS=$my_ac_save_LIBS + ]) + fi + fi +fi + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl This is for Minix 3.1 + AC_MSG_CHECKING([for gethostbyname for Minix 3]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +/* Older Minix versions may need here instead */ +#include + ]],[[ + gethostbyname("www.dummysite.com"); + ]]) + ],[ + AC_MSG_RESULT([yes]) + HAVE_GETHOSTBYNAME="1" + ],[ + AC_MSG_RESULT([no]) + ]) +fi + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl This is for eCos with a stubbed DNS implementation + AC_MSG_CHECKING([for gethostbyname for eCos]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +#include +#include + ]],[[ + gethostbyname("www.dummysite.com"); + ]]) + ],[ + AC_MSG_RESULT([yes]) + HAVE_GETHOSTBYNAME="1" + ],[ + AC_MSG_RESULT([no]) + ]) +fi + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl gethostbyname in the network lib - for Haiku OS + AC_CHECK_LIB(network, gethostbyname, + [HAVE_GETHOSTBYNAME="1" + LIBS="-lnetwork $LIBS" + ]) +fi + +if test "$HAVE_GETHOSTBYNAME" != "1" +then + dnl gethostbyname in the net lib - for BeOS + AC_CHECK_LIB(net, gethostbyname, + [HAVE_GETHOSTBYNAME="1" + LIBS="-lnet $LIBS" + ]) +fi + + +if test "$HAVE_GETHOSTBYNAME" != "1"; then + AC_MSG_ERROR([couldn't find libraries for gethostbyname()]) +fi + +CURL_CHECK_LIBS_CONNECT + +CURL_NETWORK_LIBS=$LIBS + +dnl ********************************************************************** +dnl In case that function clock_gettime with monotonic timer is available, +dnl check for additional required libraries. +dnl ********************************************************************** +CURL_CHECK_LIBS_CLOCK_GETTIME_MONOTONIC + +dnl ********************************************************************** +dnl The preceding library checks are all potentially useful for test +dnl servers and libtest cases which require networking and clock_gettime +dnl support. Save the list of required libraries at this point for use +dnl while linking those test servers and programs. +dnl ********************************************************************** +CURL_NETWORK_AND_TIME_LIBS=$LIBS + +dnl ********************************************************************** +dnl Check for the presence of ZLIB libraries and headers +dnl ********************************************************************** + +dnl Check for & handle argument to --with-zlib. + +clean_CPPFLAGS=$CPPFLAGS +clean_LDFLAGS=$LDFLAGS +clean_LIBS=$LIBS +ZLIB_LIBS="" +AC_ARG_WITH(zlib, +AC_HELP_STRING([--with-zlib=PATH],[search for zlib in PATH]) +AC_HELP_STRING([--without-zlib],[disable use of zlib]), + [OPT_ZLIB="$withval"]) + +if test "$OPT_ZLIB" = "no" ; then + AC_MSG_WARN([zlib disabled]) +else + if test "$OPT_ZLIB" = "yes" ; then + OPT_ZLIB="" + fi + + if test -z "$OPT_ZLIB" ; then + CURL_CHECK_PKGCONFIG(zlib) + + if test "$PKGCONFIG" != "no" ; then + LIBS="`$PKGCONFIG --libs-only-l zlib` $LIBS" + LDFLAGS="$LDFLAGS `$PKGCONFIG --libs-only-L zlib`" + CPPFLAGS="$CPPFLAGS `$PKGCONFIG --cflags-only-I zlib`" + OPT_ZLIB="" + HAVE_LIBZ="1" + fi + + if test -z "$HAVE_LIBZ"; then + + dnl Check for the lib without setting any new path, since many + dnl people have it in the default path + + AC_CHECK_LIB(z, inflateEnd, + dnl libz found, set the variable + [HAVE_LIBZ="1" + LIBS="-lz $LIBS"], + dnl if no lib found, try /usr/local + [OPT_ZLIB="/usr/local"]) + fi + fi + + dnl Add a nonempty path to the compiler flags + if test -n "$OPT_ZLIB"; then + CPPFLAGS="$CPPFLAGS -I$OPT_ZLIB/include" + LDFLAGS="$LDFLAGS -L$OPT_ZLIB/lib$libsuff" + fi + + AC_CHECK_HEADER(zlib.h, + [ + dnl zlib.h was found + HAVE_ZLIB_H="1" + dnl if the lib wasn't found already, try again with the new paths + if test "$HAVE_LIBZ" != "1"; then + AC_CHECK_LIB(z, gzread, + [ + dnl the lib was found! + HAVE_LIBZ="1" + LIBS="-lz $LIBS" + ], + [ CPPFLAGS=$clean_CPPFLAGS + LDFLAGS=$clean_LDFLAGS]) + fi + ], + [ + dnl zlib.h was not found, restore the flags + CPPFLAGS=$clean_CPPFLAGS + LDFLAGS=$clean_LDFLAGS] + ) + + if test "$HAVE_LIBZ" = "1" && test "$HAVE_ZLIB_H" != "1" + then + AC_MSG_WARN([configure found only the libz lib, not the header file!]) + HAVE_LIBZ="" + CPPFLAGS=$clean_CPPFLAGS + LDFLAGS=$clean_LDFLAGS + LIBS=$clean_LIBS + elif test "$HAVE_LIBZ" != "1" && test "$HAVE_ZLIB_H" = "1" + then + AC_MSG_WARN([configure found only the libz header file, not the lib!]) + CPPFLAGS=$clean_CPPFLAGS + LDFLAGS=$clean_LDFLAGS + LIBS=$clean_LIBS + elif test "$HAVE_LIBZ" = "1" && test "$HAVE_ZLIB_H" = "1" + then + dnl both header and lib were found! + AC_SUBST(HAVE_LIBZ) + AC_DEFINE(HAVE_ZLIB_H, 1, [if you have the zlib.h header file]) + AC_DEFINE(HAVE_LIBZ, 1, [if zlib is available]) + + ZLIB_LIBS="-lz" + LIBS="-lz $clean_LIBS" + + dnl replace 'HAVE_LIBZ' in the automake makefile.ams + AMFIXLIB="1" + AC_MSG_NOTICE([found both libz and libz.h header]) + curl_zlib_msg="enabled" + fi +fi + +dnl set variable for use in automakefile(s) +AM_CONDITIONAL(HAVE_LIBZ, test x"$AMFIXLIB" = x1) +AC_SUBST(ZLIB_LIBS) + +dnl ********************************************************************** +dnl Check for LDAP +dnl ********************************************************************** + +LDAPLIBNAME="" +AC_ARG_WITH(ldap-lib, +AC_HELP_STRING([--with-ldap-lib=libname],[Specify name of ldap lib file]), + [LDAPLIBNAME="$withval"]) + +LBERLIBNAME="" +AC_ARG_WITH(lber-lib, +AC_HELP_STRING([--with-lber-lib=libname],[Specify name of lber lib file]), + [LBERLIBNAME="$withval"]) + +if test x$CURL_DISABLE_LDAP != x1 ; then + + CURL_CHECK_HEADER_LBER + CURL_CHECK_HEADER_LDAP + CURL_CHECK_HEADER_LDAPSSL + CURL_CHECK_HEADER_LDAP_SSL + + if test -z "$LDAPLIBNAME" ; then + if test "$curl_cv_native_windows" = "yes"; then + dnl Windows uses a single and unique LDAP library name + LDAPLIBNAME="wldap32" + LBERLIBNAME="no" + fi + fi + + if test "$LDAPLIBNAME" ; then + AC_CHECK_LIB("$LDAPLIBNAME", ldap_init,, [ + AC_MSG_WARN(["$LDAPLIBNAME" is not an LDAP library: LDAP disabled]) + AC_DEFINE(CURL_DISABLE_LDAP, 1, [to disable LDAP]) + AC_SUBST(CURL_DISABLE_LDAP, [1]) + AC_DEFINE(CURL_DISABLE_LDAPS, 1, [to disable LDAPS]) + AC_SUBST(CURL_DISABLE_LDAPS, [1])]) + else + dnl Try to find the right ldap libraries for this system + CURL_CHECK_LIBS_LDAP + case X-"$curl_cv_ldap_LIBS" in + X-unknown) + AC_MSG_WARN([Cannot find libraries for LDAP support: LDAP disabled]) + AC_DEFINE(CURL_DISABLE_LDAP, 1, [to disable LDAP]) + AC_SUBST(CURL_DISABLE_LDAP, [1]) + AC_DEFINE(CURL_DISABLE_LDAPS, 1, [to disable LDAPS]) + AC_SUBST(CURL_DISABLE_LDAPS, [1]) + ;; + esac + fi +fi + +if test x$CURL_DISABLE_LDAP != x1 ; then + + if test "$LBERLIBNAME" ; then + dnl If name is "no" then don't define this library at all + dnl (it's only needed if libldap.so's dependencies are broken). + if test "$LBERLIBNAME" != "no" ; then + AC_CHECK_LIB("$LBERLIBNAME", ber_free,, [ + AC_MSG_WARN(["$LBERLIBNAME" is not an LBER library: LDAP disabled]) + AC_DEFINE(CURL_DISABLE_LDAP, 1, [to disable LDAP]) + AC_SUBST(CURL_DISABLE_LDAP, [1]) + AC_DEFINE(CURL_DISABLE_LDAPS, 1, [to disable LDAPS]) + AC_SUBST(CURL_DISABLE_LDAPS, [1])]) + fi + fi +fi + +if test x$CURL_DISABLE_LDAP != x1 ; then + AC_CHECK_FUNCS([ldap_url_parse ldap_init_fd]) + + if test "$LDAPLIBNAME" = "wldap32"; then + curl_ldap_msg="enabled (winldap)" + AC_DEFINE(USE_WIN32_LDAP, 1, [Use Windows LDAP implementation]) + else + curl_ldap_msg="enabled (OpenLDAP)" + if test "x$ac_cv_func_ldap_init_fd" = "xyes"; then + AC_DEFINE(USE_OPENLDAP, 1, [Use OpenLDAP-specific code]) + AC_SUBST(USE_OPENLDAP, [1]) + fi + fi +fi + +if test x$CURL_DISABLE_LDAPS != x1 ; then + curl_ldaps_msg="enabled" +fi + +dnl ********************************************************************** +dnl Checks for IPv6 +dnl ********************************************************************** + +AC_MSG_CHECKING([whether to enable IPv6]) +AC_ARG_ENABLE(ipv6, +AC_HELP_STRING([--enable-ipv6],[Enable IPv6 (with IPv4) support]) +AC_HELP_STRING([--disable-ipv6],[Disable IPv6 support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + ipv6=no + ;; + *) AC_MSG_RESULT(yes) + ipv6=yes + ;; + esac ], + + AC_TRY_RUN([ /* is AF_INET6 available? */ +#include +#ifdef HAVE_WINSOCK2_H +#include +#else +#include +#endif +#include /* for exit() */ +main() +{ + if (socket(AF_INET6, SOCK_STREAM, 0) < 0) + exit(1); + else + exit(0); +} +], + AC_MSG_RESULT(yes) + ipv6=yes, + AC_MSG_RESULT(no) + ipv6=no, + AC_MSG_RESULT(yes) + ipv6=yes +)) + +if test "$ipv6" = "yes"; then + curl_ipv6_msg="enabled" +fi + +# Check if struct sockaddr_in6 have sin6_scope_id member +if test "$ipv6" = yes; then + AC_MSG_CHECKING([if struct sockaddr_in6 has sin6_scope_id member]) + AC_TRY_COMPILE([ +#include +#ifdef HAVE_WINSOCK2_H +#include +#include +#else +#include +#endif] , + struct sockaddr_in6 s; s.sin6_scope_id = 0; , have_sin6_scope_id=yes) + if test "$have_sin6_scope_id" = yes; then + AC_MSG_RESULT([yes]) + AC_DEFINE(HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID, 1, [Define to 1 if struct sockaddr_in6 has the sin6_scope_id member]) + else + AC_MSG_RESULT([no]) + fi +fi + +dnl ********************************************************************** +dnl Check if the operating system allows programs to write to their own argv[] +dnl ********************************************************************** + +AC_MSG_CHECKING([if argv can be written to]) +AC_RUN_IFELSE([ + AC_LANG_SOURCE([[ +int main(int argc, char ** argv) { + argv[0][0] = ' '; + return (argv[0][0] == ' ')?0:1; +} + ]]) +],[ + curl_cv_writable_argv=yes +],[ + curl_cv_writable_argv=no +],[ + curl_cv_writable_argv=cross +]) +case $curl_cv_writable_argv in +yes) + AC_DEFINE(HAVE_WRITABLE_ARGV, 1, [Define this symbol if your OS supports changing the contents of argv]) + AC_MSG_RESULT(yes) + ;; +no) + AC_MSG_RESULT(no) + ;; +*) + AC_MSG_RESULT(no) + AC_MSG_WARN([the previous check could not be made default was used]) + ;; +esac + +dnl ********************************************************************** +dnl Check for GSS-API libraries +dnl ********************************************************************** + +dnl check for GSS-API stuff in the /usr as default + +GSSAPI_ROOT="/usr" +AC_ARG_WITH(gssapi-includes, + AC_HELP_STRING([--with-gssapi-includes=DIR], + [Specify location of GSS-API headers]), + [ GSSAPI_INCS="-I$withval" + want_gss="yes" ] +) + +AC_ARG_WITH(gssapi-libs, + AC_HELP_STRING([--with-gssapi-libs=DIR], + [Specify location of GSS-API libs]), + [ GSSAPI_LIB_DIR="-L$withval" + want_gss="yes" ] +) + +AC_ARG_WITH(gssapi, + AC_HELP_STRING([--with-gssapi=DIR], + [Where to look for GSS-API]), [ + GSSAPI_ROOT="$withval" + if test x"$GSSAPI_ROOT" != xno; then + want_gss="yes" + if test x"$GSSAPI_ROOT" = xyes; then + dnl if yes, then use default root + GSSAPI_ROOT="/usr" + fi + fi +]) + +: ${KRB5CONFIG:="$GSSAPI_ROOT/bin/krb5-config"} + +save_CPPFLAGS="$CPPFLAGS" +AC_MSG_CHECKING([if GSS-API support is requested]) +if test x"$want_gss" = xyes; then + AC_MSG_RESULT(yes) + + if test -z "$GSSAPI_INCS"; then + if test -n "$host_alias" -a -f "$GSSAPI_ROOT/bin/$host_alias-krb5-config"; then + GSSAPI_INCS=`$GSSAPI_ROOT/bin/$host_alias-krb5-config --cflags gssapi` + elif test -f "$KRB5CONFIG"; then + GSSAPI_INCS=`$KRB5CONFIG --cflags gssapi` + elif test "$GSSAPI_ROOT" != "yes"; then + GSSAPI_INCS="-I$GSSAPI_ROOT/include" + fi + fi + + CPPFLAGS="$CPPFLAGS $GSSAPI_INCS" + + AC_CHECK_HEADER(gss.h, + [ + dnl found in the given dirs + AC_DEFINE(HAVE_GSSGNU, 1, [if you have GNU GSS]) + gnu_gss=yes + ], + [ + dnl not found, check Heimdal or MIT + AC_CHECK_HEADERS([gssapi/gssapi.h], [], [not_mit=1]) + AC_CHECK_HEADERS( + [gssapi/gssapi_generic.h gssapi/gssapi_krb5.h], + [], + [not_mit=1], + [ +AC_INCLUDES_DEFAULT +#ifdef HAVE_GSSAPI_GSSAPI_H +#include +#endif + ]) + if test "x$not_mit" = "x1"; then + dnl MIT not found, check for Heimdal + AC_CHECK_HEADER(gssapi.h, + [ + dnl found + AC_DEFINE(HAVE_GSSHEIMDAL, 1, [if you have Heimdal]) + ], + [ + dnl no header found, disabling GSS + want_gss=no + AC_MSG_WARN(disabling GSS-API support since no header files were found) + ] + ) + else + dnl MIT found + AC_DEFINE(HAVE_GSSMIT, 1, [if you have MIT Kerberos]) + dnl check if we have a really old MIT Kerberos version (<= 1.2) + AC_MSG_CHECKING([if GSS-API headers declare GSS_C_NT_HOSTBASED_SERVICE]) + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include +#include +#include + ]],[[ + gss_import_name( + (OM_uint32 *)0, + (gss_buffer_t)0, + GSS_C_NT_HOSTBASED_SERVICE, + (gss_name_t *)0); + ]]) + ],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + AC_DEFINE(HAVE_OLD_GSSMIT, 1, + [if you have an old MIT Kerberos version, lacking GSS_C_NT_HOSTBASED_SERVICE]) + ]) + fi + ] + ) +else + AC_MSG_RESULT(no) +fi +if test x"$want_gss" = xyes; then + AC_DEFINE(HAVE_GSSAPI, 1, [if you have GSS-API libraries]) + HAVE_GSSAPI=1 + curl_gss_msg="enabled (MIT Kerberos/Heimdal)" + + if test -n "$gnu_gss"; then + curl_gss_msg="enabled (GNU GSS)" + LDFLAGS="$LDFLAGS $GSSAPI_LIB_DIR" + LIBS="-lgss $LIBS" + elif test -z "$GSSAPI_LIB_DIR"; then + case $host in + *-*-darwin*) + LIBS="-lgssapi_krb5 -lresolv $LIBS" + ;; + *) + if test -n "$host_alias" -a -f "$GSSAPI_ROOT/bin/$host_alias-krb5-config"; then + dnl krb5-config doesn't have --libs-only-L or similar, put everything + dnl into LIBS + gss_libs=`$GSSAPI_ROOT/bin/$host_alias-krb5-config --libs gssapi` + LIBS="$gss_libs $LIBS" + elif test -f "$KRB5CONFIG"; then + dnl krb5-config doesn't have --libs-only-L or similar, put everything + dnl into LIBS + gss_libs=`$KRB5CONFIG --libs gssapi` + LIBS="$gss_libs $LIBS" + else + case $host in + *-hp-hpux*) + gss_libname="gss" + ;; + *) + gss_libname="gssapi" + ;; + esac + + if test "$GSSAPI_ROOT" != "yes"; then + LDFLAGS="$LDFLAGS -L$GSSAPI_ROOT/lib$libsuff" + LIBS="-l$gss_libname $LIBS" + else + LIBS="-l$gss_libname $LIBS" + fi + fi + ;; + esac + else + LDFLAGS="$LDFLAGS $GSSAPI_LIB_DIR" + case $host in + *-hp-hpux*) + LIBS="-lgss $LIBS" + ;; + *) + LIBS="-lgssapi $LIBS" + ;; + esac + fi +else + CPPFLAGS="$save_CPPFLAGS" +fi + +dnl ------------------------------------------------- +dnl check winssl option before other SSL libraries +dnl ------------------------------------------------- + +OPT_WINSSL=no +AC_ARG_WITH(winssl,dnl +AC_HELP_STRING([--with-winssl],[enable Windows native SSL/TLS]) +AC_HELP_STRING([--without-winssl], [disable Windows native SSL/TLS]), + OPT_WINSSL=$withval) + +AC_MSG_CHECKING([whether to enable Windows native SSL/TLS (Windows native builds only)]) +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + if test "x$OPT_WINSSL" != "xno" && + test "x$curl_cv_native_windows" = "xyes"; then + AC_MSG_RESULT(yes) + AC_DEFINE(USE_SCHANNEL, 1, [to enable Windows native SSL/TLS support]) + AC_SUBST(USE_SCHANNEL, [1]) + curl_ssl_msg="enabled (Windows-native)" + WINSSL_ENABLED=1 + # --with-winssl implies --enable-sspi + AC_DEFINE(USE_WINDOWS_SSPI, 1, [to enable SSPI support]) + AC_SUBST(USE_WINDOWS_SSPI, [1]) + curl_sspi_msg="enabled" + LIBS="-lcrypt32 $LIBS" + else + AC_MSG_RESULT(no) + fi +else + AC_MSG_RESULT(no) +fi + +OPT_DARWINSSL=no +AC_ARG_WITH(darwinssl,dnl +AC_HELP_STRING([--with-darwinssl],[enable Apple OS native SSL/TLS]) +AC_HELP_STRING([--without-darwinssl], [disable Apple OS native SSL/TLS]), + OPT_DARWINSSL=$withval) + +AC_MSG_CHECKING([whether to enable Apple OS native SSL/TLS]) +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + if test "x$OPT_DARWINSSL" != "xno" && + test -d "/System/Library/Frameworks/Security.framework"; then + AC_MSG_RESULT(yes) + AC_DEFINE(USE_DARWINSSL, 1, [to enable Apple OS native SSL/TLS support]) + AC_SUBST(USE_DARWINSSL, [1]) + curl_ssl_msg="enabled (Apple OS-native)" + DARWINSSL_ENABLED=1 + LDFLAGS="$LDFLAGS -framework CoreFoundation -framework Security" + else + AC_MSG_RESULT(no) + fi +else + AC_MSG_RESULT(no) +fi + +dnl ********************************************************************** +dnl Check for the presence of SSL libraries and headers +dnl ********************************************************************** + +dnl Default to compiler & linker defaults for SSL files & libraries. +OPT_SSL=off +dnl Default to no CA bundle +ca="no" +AC_ARG_WITH(ssl,dnl +AC_HELP_STRING([--with-ssl=PATH],[Where to look for OpenSSL, PATH points to the SSL installation (default: /usr/local/ssl); when possible, set the PKG_CONFIG_PATH environment variable instead of using this option]) +AC_HELP_STRING([--without-ssl], [disable OpenSSL]), + OPT_SSL=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg" && test X"$OPT_SSL" != Xno; then + dnl backup the pre-ssl variables + CLEANLDFLAGS="$LDFLAGS" + CLEANCPPFLAGS="$CPPFLAGS" + CLEANLIBS="$LIBS" + + dnl This is for Msys/Mingw + case $host in + *-*-msys* | *-*-mingw*) + AC_MSG_CHECKING([for gdi32]) + my_ac_save_LIBS=$LIBS + LIBS="-lgdi32 $LIBS" + AC_TRY_LINK([#include + #include ], + [GdiFlush();], + [ dnl worked! + AC_MSG_RESULT([yes])], + [ dnl failed, restore LIBS + LIBS=$my_ac_save_LIBS + AC_MSG_RESULT(no)] + ) + ;; + esac + + case "$OPT_SSL" in + yes) + dnl --with-ssl (without path) used + if test x$cross_compiling != xyes; then + dnl only do pkg-config magic when not cross-compiling + PKGTEST="yes" + fi + PREFIX_OPENSSL=/usr/local/ssl + LIB_OPENSSL="$PREFIX_OPENSSL/lib$libsuff" + ;; + off) + dnl no --with-ssl option given, just check default places + if test x$cross_compiling != xyes; then + dnl only do pkg-config magic when not cross-compiling + PKGTEST="yes" + fi + PREFIX_OPENSSL= + ;; + *) + dnl check the given --with-ssl spot + PKGTEST="no" + PREFIX_OPENSSL=$OPT_SSL + + dnl Try pkg-config even when cross-compiling. Since we + dnl specify PKG_CONFIG_LIBDIR we're only looking where + dnl the user told us to look + OPENSSL_PCDIR="$OPT_SSL/lib/pkgconfig" + AC_MSG_NOTICE([PKG_CONFIG_LIBDIR will be set to "$OPENSSL_PCDIR"]) + if test -f "$OPENSSL_PCDIR/openssl.pc"; then + PKGTEST="yes" + fi + + dnl in case pkg-config comes up empty, use what we got + dnl via --with-ssl + LIB_OPENSSL="$PREFIX_OPENSSL/lib$libsuff" + if test "$PREFIX_OPENSSL" != "/usr" ; then + SSL_LDFLAGS="-L$LIB_OPENSSL" + SSL_CPPFLAGS="-I$PREFIX_OPENSSL/include" + fi + SSL_CPPFLAGS="$SSL_CPPFLAGS -I$PREFIX_OPENSSL/include/openssl" + ;; + esac + + if test "$PKGTEST" = "yes"; then + + CURL_CHECK_PKGCONFIG(openssl, [$OPENSSL_PCDIR]) + + if test "$PKGCONFIG" != "no" ; then + SSL_LIBS=`CURL_EXPORT_PCDIR([$OPENSSL_PCDIR]) dnl + $PKGCONFIG --libs-only-l openssl 2>/dev/null` + + SSL_LDFLAGS=`CURL_EXPORT_PCDIR([$OPENSSL_PCDIR]) dnl + $PKGCONFIG --libs-only-L openssl 2>/dev/null` + + SSL_CPPFLAGS=`CURL_EXPORT_PCDIR([$OPENSSL_PCDIR]) dnl + $PKGCONFIG --cflags-only-I openssl 2>/dev/null` + + AC_SUBST(SSL_LIBS) + AC_MSG_NOTICE([pkg-config: SSL_LIBS: "$SSL_LIBS"]) + AC_MSG_NOTICE([pkg-config: SSL_LDFLAGS: "$SSL_LDFLAGS"]) + AC_MSG_NOTICE([pkg-config: SSL_CPPFLAGS: "$SSL_CPPFLAGS"]) + + LIB_OPENSSL=`echo $SSL_LDFLAGS | sed -e 's/-L//g'` + + dnl use the values pkg-config reported. This is here + dnl instead of below with CPPFLAGS and LDFLAGS because we only + dnl learn about this via pkg-config. If we only have + dnl the argument to --with-ssl we don't know what + dnl additional libs may be necessary. Hope that we + dnl don't need any. + LIBS="$SSL_LIBS $LIBS" + fi + fi + + dnl finally, set flags to use SSL + CPPFLAGS="$CPPFLAGS $SSL_CPPFLAGS" + LDFLAGS="$LDFLAGS $SSL_LDFLAGS" + + AC_CHECK_LIB(crypto, HMAC_Update,[ + HAVECRYPTO="yes" + LIBS="-lcrypto $LIBS" + ],[ + LDFLAGS="$CLEANLDFLAGS -L$LIB_OPENSSL" + CPPFLAGS="$CLEANCPPFLAGS -I$PREFIX_OPENSSL/include/openssl -I$PREFIX_OPENSSL/include" + AC_CHECK_LIB(crypto, HMAC_Init_ex,[ + HAVECRYPTO="yes" + LIBS="-lcrypto $LIBS"], [ + + dnl still no, but what about with -ldl? + AC_MSG_CHECKING([OpenSSL linking with -ldl]) + LIBS="-ldl $LIBS" + AC_TRY_LINK( + [ + #include + ], + [ + ERR_clear_error(); + ], + [ + AC_MSG_RESULT(yes) + HAVECRYPTO="yes" + ], + [ + AC_MSG_RESULT(no) + dnl ok, so what about bouth -ldl and -lpthread? + + AC_MSG_CHECKING([OpenSSL linking with -ldl and -lpthread]) + LIBS="-lpthread $LIBS" + AC_TRY_LINK( + [ + #include + ], + [ + ERR_clear_error(); + ], + [ + AC_MSG_RESULT(yes) + HAVECRYPTO="yes" + ], + [ + AC_MSG_RESULT(no) + LDFLAGS="$CLEANLDFLAGS" + CPPFLAGS="$CLEANCPPFLAGS" + LIBS="$CLEANLIBS" + + ]) + + ]) + + ]) + ]) + + if test X"$HAVECRYPTO" = X"yes"; then + dnl This is only reasonable to do if crypto actually is there: check for + dnl SSL libs NOTE: it is important to do this AFTER the crypto lib + + AC_CHECK_LIB(ssl, SSL_connect) + + if test "$ac_cv_lib_ssl_SSL_connect" != yes; then + dnl we didn't find the SSL lib, try the RSAglue/rsaref stuff + AC_MSG_CHECKING(for ssl with RSAglue/rsaref libs in use); + OLIBS=$LIBS + LIBS="-lRSAglue -lrsaref $LIBS" + AC_CHECK_LIB(ssl, SSL_connect) + if test "$ac_cv_lib_ssl_SSL_connect" != yes; then + dnl still no SSL_connect + AC_MSG_RESULT(no) + LIBS=$OLIBS + else + AC_MSG_RESULT(yes) + fi + + else + + dnl Have the libraries--check for OpenSSL headers + AC_CHECK_HEADERS(openssl/x509.h openssl/rsa.h openssl/crypto.h \ + openssl/pem.h openssl/ssl.h openssl/err.h, + curl_ssl_msg="enabled (OpenSSL)" + OPENSSL_ENABLED=1 + AC_DEFINE(USE_OPENSSL, 1, [if OpenSSL is in use])) + + if test $ac_cv_header_openssl_x509_h = no; then + dnl we don't use the "action" part of the AC_CHECK_HEADERS macro + dnl since 'err.h' might in fact find a krb4 header with the same + dnl name + AC_CHECK_HEADERS(x509.h rsa.h crypto.h pem.h ssl.h err.h) + + if test $ac_cv_header_x509_h = yes && + test $ac_cv_header_crypto_h = yes && + test $ac_cv_header_ssl_h = yes; then + dnl three matches + curl_ssl_msg="enabled (OpenSSL)" + OPENSSL_ENABLED=1 + fi + fi + fi + + if test X"$OPENSSL_ENABLED" = X"1"; then + dnl is there a pkcs12.h header present? + AC_CHECK_HEADERS(openssl/pkcs12.h) + else + LIBS="$CLEANLIBS" + fi + + if test X"$OPT_SSL" != Xoff && + test "$OPENSSL_ENABLED" != "1"; then + AC_MSG_ERROR([OpenSSL libs and/or directories were not found where specified!]) + fi + fi + + if test X"$OPENSSL_ENABLED" = X"1"; then + dnl If the ENGINE library seems to be around, check for the OpenSSL engine + dnl stuff, it is kind of "separated" from the main SSL check + AC_CHECK_FUNC(ENGINE_init, + [ + AC_CHECK_HEADERS(openssl/engine.h) + AC_CHECK_FUNCS( ENGINE_load_builtin_engines ) + ]) + + dnl These can only exist if OpenSSL exists + dnl Older versions of Cyassl (some time before 2.9.4) don't have + dnl SSL_get_shutdown (but this check won't actually detect it there + dnl as it's a macro that needs the header files be included) + + AC_CHECK_FUNCS( RAND_egd \ + ENGINE_cleanup \ + SSL_get_shutdown \ + SSLv2_client_method ) + + AC_MSG_CHECKING([for BoringSSL]) + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + #include + ]],[[ + #ifndef OPENSSL_IS_BORINGSSL + #error not boringssl + #endif + ]]) + ],[ + AC_MSG_RESULT([yes]) + AC_DEFINE_UNQUOTED(HAVE_BORINGSSL, 1, + [Define to 1 if using BoringSSL.]) + curl_ssl_msg="enabled (BoringSSL)" + ],[ + AC_MSG_RESULT([no]) + ]) + + AC_MSG_CHECKING([for libressl]) + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#include + ]],[[ + int dummy = LIBRESSL_VERSION_NUMBER; + ]]) + ],[ + AC_MSG_RESULT([yes]) + AC_DEFINE_UNQUOTED(HAVE_LIBRESSL, 1, + [Define to 1 if using libressl.]) + curl_ssl_msg="enabled (libressl)" + ],[ + AC_MSG_RESULT([no]) + ]) + fi + + if test "$OPENSSL_ENABLED" = "1"; then + if test -n "$LIB_OPENSSL"; then + dnl when the ssl shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to LD_LIBRARY_PATH + dnl to prevent further configure tests to fail due to this + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$LIB_OPENSSL" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $LIB_OPENSSL to LD_LIBRARY_PATH]) + fi + fi + CURL_CHECK_OPENSSL_API + fi + +fi + +dnl ********************************************************************** +dnl Check for the random seed preferences +dnl ********************************************************************** + +if test X"$OPENSSL_ENABLED" = X"1"; then + AC_ARG_WITH(egd-socket, + AC_HELP_STRING([--with-egd-socket=FILE], + [Entropy Gathering Daemon socket pathname]), + [ EGD_SOCKET="$withval" ] + ) + if test -n "$EGD_SOCKET" ; then + AC_DEFINE_UNQUOTED(EGD_SOCKET, "$EGD_SOCKET", + [your Entropy Gathering Daemon socket pathname] ) + fi + + dnl Check for user-specified random device + AC_ARG_WITH(random, + AC_HELP_STRING([--with-random=FILE], + [read randomness from FILE (default=/dev/urandom)]), + [ RANDOM_FILE="$withval" ], + [ + if test x$cross_compiling != xyes; then + dnl Check for random device + AC_CHECK_FILE("/dev/urandom", [ RANDOM_FILE="/dev/urandom"] ) + else + AC_MSG_WARN([skipped the /dev/urandom detection when cross-compiling]) + fi + ] + ) + if test -n "$RANDOM_FILE" && test X"$RANDOM_FILE" != Xno ; then + AC_SUBST(RANDOM_FILE) + AC_DEFINE_UNQUOTED(RANDOM_FILE, "$RANDOM_FILE", + [a suitable file to read random data from]) + fi +fi + +dnl --- +dnl We require OpenSSL with SRP support. +dnl --- +if test "$OPENSSL_ENABLED" = "1"; then + AC_CHECK_LIB(crypto, SRP_Calc_client_key, + [ + AC_DEFINE(HAVE_OPENSSL_SRP, 1, [if you have the function SRP_Calc_client_key]) + AC_SUBST(HAVE_OPENSSL_SRP, [1]) + ]) +fi + +dnl ---------------------------------------------------- +dnl check for GnuTLS +dnl ---------------------------------------------------- + +dnl Default to compiler & linker defaults for GnuTLS files & libraries. +OPT_GNUTLS=no + +AC_ARG_WITH(gnutls,dnl +AC_HELP_STRING([--with-gnutls=PATH],[where to look for GnuTLS, PATH points to the installation root]) +AC_HELP_STRING([--without-gnutls], [disable GnuTLS detection]), + OPT_GNUTLS=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + + if test X"$OPT_GNUTLS" != Xno; then + + addld="" + addlib="" + gtlslib="" + version="" + addcflags="" + + if test "x$OPT_GNUTLS" = "xyes"; then + dnl this is with no partiular path given + CURL_CHECK_PKGCONFIG(gnutls) + + if test "$PKGCONFIG" != "no" ; then + addlib=`$PKGCONFIG --libs-only-l gnutls` + addld=`$PKGCONFIG --libs-only-L gnutls` + addcflags=`$PKGCONFIG --cflags-only-I gnutls` + version=`$PKGCONFIG --modversion gnutls` + gtlslib=`echo $addld | $SED -e 's/-L//'` + else + dnl without pkg-config, we try libgnutls-config as that was how it + dnl used to be done + check=`libgnutls-config --version 2>/dev/null` + if test -n "$check"; then + addlib=`libgnutls-config --libs` + addcflags=`libgnutls-config --cflags` + version=`libgnutls-config --version` + gtlslib=`libgnutls-config --prefix`/lib$libsuff + fi + fi + else + dnl this is with a given path, first check if there's a libgnutls-config + dnl there and if not, make an educated guess + cfg=$OPT_GNUTLS/bin/libgnutls-config + check=`$cfg --version 2>/dev/null` + if test -n "$check"; then + addlib=`$cfg --libs` + addcflags=`$cfg --cflags` + version=`$cfg --version` + gtlslib=`$cfg --prefix`/lib$libsuff + else + dnl without pkg-config and libgnutls-config, we guess a lot! + addlib=-lgnutls + addld=-L$OPT_GNUTLS/lib$libsuff + addcflags=-I$OPT_GNUTLS/include + version="" # we just don't know + gtlslib=$OPT_GNUTLS/lib$libsuff + fi + fi + + if test -z "$version"; then + dnl lots of efforts, still no go + version="unknown" + fi + + if test -n "$addlib"; then + + CLEANLIBS="$LIBS" + CLEANCPPFLAGS="$CPPFLAGS" + CLEANLDFLAGS="$LDFLAGS" + + LIBS="$addlib $LIBS" + LDFLAGS="$LDFLAGS $addld" + if test "$addcflags" != "-I/usr/include"; then + CPPFLAGS="$CPPFLAGS $addcflags" + fi + + AC_CHECK_LIB(gnutls, gnutls_check_version, + [ + AC_DEFINE(USE_GNUTLS, 1, [if GnuTLS is enabled]) + AC_SUBST(USE_GNUTLS, [1]) + GNUTLS_ENABLED=1 + USE_GNUTLS="yes" + curl_ssl_msg="enabled (GnuTLS)" + ], + [ + LIBS="$CLEANLIBS" + CPPFLAGS="$CLEANCPPFLAGS" + ]) + + if test "x$USE_GNUTLS" = "xyes"; then + AC_MSG_NOTICE([detected GnuTLS version $version]) + + if test -n "$gtlslib"; then + dnl when shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to + dnl LD_LIBRARY_PATH to prevent further configure tests to fail + dnl due to this + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$gtlslib" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $gtlslib to LD_LIBRARY_PATH]) + fi + fi + AC_CHECK_FUNCS([gnutls_certificate_set_x509_key_file2 gnutls_alpn_set_protocols gnutls_ocsp_req_init]) + fi + + fi + + fi dnl GNUTLS not disabled + +fi + +dnl --- +dnl Check which crypto backend GnuTLS uses +dnl --- + +if test "$GNUTLS_ENABLED" = "1"; then + USE_GNUTLS_NETTLE= + # First check if we can detect either crypto library via transitive linking + AC_CHECK_LIB(gnutls, nettle_MD5Init, [ USE_GNUTLS_NETTLE=1 ]) + if test "$USE_GNUTLS_NETTLE" = ""; then + AC_CHECK_LIB(gnutls, gcry_control, [ USE_GNUTLS_NETTLE=0 ]) + fi + # If not, try linking directly to both of them to see if they are available + if test "$USE_GNUTLS_NETTLE" = ""; then + AC_CHECK_LIB(nettle, nettle_MD5Init, [ USE_GNUTLS_NETTLE=1 ]) + fi + if test "$USE_GNUTLS_NETTLE" = ""; then + AC_CHECK_LIB(gcrypt, gcry_control, [ USE_GNUTLS_NETTLE=0 ]) + fi + if test "$USE_GNUTLS_NETTLE" = ""; then + AC_MSG_ERROR([GnuTLS found, but neither gcrypt nor nettle found]) + fi + if test "$USE_GNUTLS_NETTLE" = "1"; then + AC_DEFINE(USE_GNUTLS_NETTLE, 1, [if GnuTLS uses nettle as crypto backend]) + AC_SUBST(USE_GNUTLS_NETTLE, [1]) + LIBS="-lnettle $LIBS" + else + LIBS="-lgcrypt $LIBS" + fi +fi + +dnl --- +dnl We require GnuTLS with SRP support. +dnl --- +if test "$GNUTLS_ENABLED" = "1"; then + AC_CHECK_LIB(gnutls, gnutls_srp_verifier, + [ + AC_DEFINE(HAVE_GNUTLS_SRP, 1, [if you have the function gnutls_srp_verifier]) + AC_SUBST(HAVE_GNUTLS_SRP, [1]) + ]) +fi + +dnl ---------------------------------------------------- +dnl check for PolarSSL +dnl ---------------------------------------------------- + +dnl Default to compiler & linker defaults for PolarSSL files & libraries. +OPT_POLARSSL=no + +_cppflags=$CPPFLAGS +_ldflags=$LDFLAGS +AC_ARG_WITH(polarssl,dnl +AC_HELP_STRING([--with-polarssl=PATH],[where to look for PolarSSL, PATH points to the installation root]) +AC_HELP_STRING([--without-polarssl], [disable PolarSSL detection]), + OPT_POLARSSL=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + + if test X"$OPT_POLARSSL" != Xno; then + + if test "$OPT_POLARSSL" = "yes"; then + OPT_POLARSSL="" + fi + + if test -z "$OPT_POLARSSL" ; then + dnl check for lib first without setting any new path + + AC_CHECK_LIB(polarssl, havege_init, + dnl libpolarssl found, set the variable + [ + AC_DEFINE(USE_POLARSSL, 1, [if PolarSSL is enabled]) + AC_SUBST(USE_POLARSSL, [1]) + POLARSSL_ENABLED=1 + USE_POLARSSL="yes" + curl_ssl_msg="enabled (PolarSSL)" + ]) + fi + + addld="" + addlib="" + addcflags="" + polarssllib="" + + if test "x$USE_POLARSSL" != "xyes"; then + dnl add the path and test again + addld=-L$OPT_POLARSSL/lib$libsuff + addcflags=-I$OPT_POLARSSL/include + polarssllib=$OPT_POLARSSL/lib$libsuff + + LDFLAGS="$LDFLAGS $addld" + if test "$addcflags" != "-I/usr/include"; then + CPPFLAGS="$CPPFLAGS $addcflags" + fi + + AC_CHECK_LIB(polarssl, ssl_init, + [ + AC_DEFINE(USE_POLARSSL, 1, [if PolarSSL is enabled]) + AC_SUBST(USE_POLARSSL, [1]) + POLARSSL_ENABLED=1 + USE_POLARSSL="yes" + curl_ssl_msg="enabled (PolarSSL)" + ], + [ + CPPFLAGS=$_cppflags + LDFLAGS=$_ldflags + ]) + fi + + if test "x$USE_POLARSSL" = "xyes"; then + AC_MSG_NOTICE([detected PolarSSL]) + + LIBS="-lpolarssl $LIBS" + + if test -n "$polarssllib"; then + dnl when shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to + dnl LD_LIBRARY_PATH to prevent further configure tests to fail + dnl due to this + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$polarssllib" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $polarssllib to LD_LIBRARY_PATH]) + fi + fi + fi + + fi dnl PolarSSL not disabled + +fi + +dnl ---------------------------------------------------- +dnl check for mbedTLS +dnl ---------------------------------------------------- + +OPT_MBEDTLS=no + +_cppflags=$CPPFLAGS +_ldflags=$LDFLAGS +AC_ARG_WITH(mbedtls,dnl +AC_HELP_STRING([--with-mbedtls=PATH],[where to look for mbedTLS, PATH points to the installation root]) +AC_HELP_STRING([--without-mbedtls], [disable mbedTLS detection]), + OPT_MBEDTLS=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + + if test X"$OPT_MBEDTLS" != Xno; then + + if test "$OPT_MBEDTLS" = "yes"; then + OPT_MBEDTLS="" + fi + + if test -z "$OPT_MBEDTLS" ; then + dnl check for lib first without setting any new path + + AC_CHECK_LIB(mbedtls, mbedtls_havege_init, + dnl libmbedtls found, set the variable + [ + AC_DEFINE(USE_MBEDTLS, 1, [if mbedTLS is enabled]) + AC_SUBST(USE_MBEDTLS, [1]) + MBEDTLS_ENABLED=1 + USE_MBEDTLS="yes" + curl_ssl_msg="enabled (mbedTLS)" + ], [], -lmbedx509 -lmbedcrypto) + fi + + addld="" + addlib="" + addcflags="" + mbedtlslib="" + + if test "x$USE_MBEDTLS" != "xyes"; then + dnl add the path and test again + addld=-L$OPT_MBEDTLS/lib$libsuff + addcflags=-I$OPT_MBEDTLS/include + mbedtlslib=$OPT_MBEDTLS/lib$libsuff + + LDFLAGS="$LDFLAGS $addld" + if test "$addcflags" != "-I/usr/include"; then + CPPFLAGS="$CPPFLAGS $addcflags" + fi + + AC_CHECK_LIB(mbedtls, mbedtls_ssl_init, + [ + AC_DEFINE(USE_MBEDTLS, 1, [if mbedTLS is enabled]) + AC_SUBST(USE_MBEDTLS, [1]) + MBEDTLS_ENABLED=1 + USE_MBEDTLS="yes" + curl_ssl_msg="enabled (mbedTLS)" + ], + [ + CPPFLAGS=$_cppflags + LDFLAGS=$_ldflags + ], -lmbedx509 -lmbedcrypto) + fi + + if test "x$USE_MBEDTLS" = "xyes"; then + AC_MSG_NOTICE([detected mbedTLS]) + + LIBS="-lmbedtls -lmbedx509 -lmbedcrypto $LIBS" + + if test -n "$mbedtlslib"; then + dnl when shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to + dnl LD_LIBRARY_PATH to prevent further configure tests to fail + dnl due to this + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$mbedtlslib" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $mbedtlslib to LD_LIBRARY_PATH]) + fi + fi + fi + + fi dnl mbedTLS not disabled + +fi + +dnl ---------------------------------------------------- +dnl check for CyaSSL +dnl ---------------------------------------------------- + +dnl Default to compiler & linker defaults for CyaSSL files & libraries. +OPT_CYASSL=no + +_cppflags=$CPPFLAGS +_ldflags=$LDFLAGS +AC_ARG_WITH(cyassl,dnl +AC_HELP_STRING([--with-cyassl=PATH],[where to look for CyaSSL, PATH points to the installation root (default: system lib default)]) +AC_HELP_STRING([--without-cyassl], [disable CyaSSL detection]), + OPT_CYASSL=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + + if test X"$OPT_CYASSL" != Xno; then + + if test "$OPT_CYASSL" = "yes"; then + OPT_CYASSL="" + fi + + dnl This should be reworked to use pkg-config instead + + cyassllibname=cyassl + + if test -z "$OPT_CYASSL" ; then + dnl check for lib in system default first + + AC_CHECK_LIB(cyassl, CyaSSL_Init, + dnl libcyassl found, set the variable + [ + AC_DEFINE(USE_CYASSL, 1, [if CyaSSL is enabled]) + AC_SUBST(USE_CYASSL, [1]) + CYASSL_ENABLED=1 + USE_CYASSL="yes" + curl_ssl_msg="enabled (CyaSSL)" + ]) + fi + + addld="" + addlib="" + addcflags="" + cyassllib="" + + if test "x$USE_CYASSL" != "xyes"; then + dnl add the path and test again + addld=-L$OPT_CYASSL/lib$libsuff + addcflags=-I$OPT_CYASSL/include + cyassllib=$OPT_CYASSL/lib$libsuff + + LDFLAGS="$LDFLAGS $addld" + if test "$addcflags" != "-I/usr/include"; then + CPPFLAGS="$CPPFLAGS $addcflags" + fi + + AC_CHECK_LIB(cyassl, CyaSSL_Init, + [ + AC_DEFINE(USE_CYASSL, 1, [if CyaSSL is enabled]) + AC_SUBST(USE_CYASSL, [1]) + CYASSL_ENABLED=1 + USE_CYASSL="yes" + curl_ssl_msg="enabled (CyaSSL)" + ], + [ + CPPFLAGS=$_cppflags + LDFLAGS=$_ldflags + cyassllib="" + ]) + fi + + addld="" + addlib="" + addcflags="" + + if test "x$USE_CYASSL" != "xyes"; then + dnl libcyassl renamed to libwolfssl as of 3.4.0 + addld=-L$OPT_CYASSL/lib$libsuff + addcflags=-I$OPT_CYASSL/include + cyassllib=$OPT_CYASSL/lib$libsuff + + LDFLAGS="$LDFLAGS $addld" + if test "$addcflags" != "-I/usr/include"; then + CPPFLAGS="$CPPFLAGS $addcflags" + fi + + cyassllibname=wolfssl + my_ac_save_LIBS="$LIBS" + LIBS="-l$cyassllibname -lm $LIBS" + + AC_MSG_CHECKING([for CyaSSL_Init in -lwolfssl]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +/* These aren't needed for detection and confuse WolfSSL. + They are set up properly later if it is detected. */ +#undef SIZEOF_LONG +#undef SIZEOF_LONG_LONG +#include + ]],[[ + return CyaSSL_Init(); + ]]) + ],[ + AC_MSG_RESULT(yes) + AC_DEFINE(USE_CYASSL, 1, [if CyaSSL/WolfSSL is enabled]) + AC_SUBST(USE_CYASSL, [1]) + CYASSL_ENABLED=1 + USE_CYASSL="yes" + curl_ssl_msg="enabled (WolfSSL)" + ], + [ + AC_MSG_RESULT(no) + CPPFLAGS=$_cppflags + LDFLAGS=$_ldflags + cyassllib="" + ]) + LIBS="$my_ac_save_LIBS" + fi + + if test "x$USE_CYASSL" = "xyes"; then + AC_MSG_NOTICE([detected $cyassllibname]) + + dnl cyassl/ctaocrypt/types.h needs SIZEOF_LONG_LONG defined! + AC_CHECK_SIZEOF(long long) + + dnl Versions since at least 2.6.0 may have options.h + AC_CHECK_HEADERS(cyassl/options.h) + + dnl Versions since at least 2.9.4 renamed error.h to error-ssl.h + AC_CHECK_HEADERS(cyassl/error-ssl.h) + + LIBS="-l$cyassllibname -lm $LIBS" + + if test "x$cyassllibname" = "xwolfssl"; then + dnl Recent WolfSSL versions build without SSLv3 by default + dnl WolfSSL needs configure --enable-opensslextra to have *get_peer* + AC_CHECK_FUNCS(wolfSSLv3_client_method \ + wolfSSL_CTX_UseSupportedCurve \ + wolfSSL_get_peer_certificate \ + wolfSSL_UseALPN) + else + dnl Cyassl needs configure --enable-opensslextra to have *get_peer* + AC_CHECK_FUNCS(CyaSSL_CTX_UseSupportedCurve \ + CyaSSL_get_peer_certificate) + fi + + if test -n "$cyassllib"; then + dnl when shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to + dnl LD_LIBRARY_PATH to prevent further configure tests to fail + dnl due to this + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$cyassllib" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $cyassllib to LD_LIBRARY_PATH]) + fi + fi + + fi + + fi dnl CyaSSL not disabled + +fi + +dnl ---------------------------------------------------- +dnl NSS. Only check if GnuTLS and OpenSSL are not enabled +dnl ---------------------------------------------------- + +dnl Default to compiler & linker defaults for NSS files & libraries. +OPT_NSS=no + +AC_ARG_WITH(nss,dnl +AC_HELP_STRING([--with-nss=PATH],[where to look for NSS, PATH points to the installation root]) +AC_HELP_STRING([--without-nss], [disable NSS detection]), + OPT_NSS=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + + if test X"$OPT_NSS" != Xno; then + + addld="" + addlib="" + addcflags="" + nssprefix="" + version="" + + if test "x$OPT_NSS" = "xyes"; then + + CURL_CHECK_PKGCONFIG(nss) + + if test "$PKGCONFIG" != "no" ; then + addlib=`$PKGCONFIG --libs nss` + addcflags=`$PKGCONFIG --cflags nss` + version=`$PKGCONFIG --modversion nss` + nssprefix=`$PKGCONFIG --variable=prefix nss` + else + dnl Without pkg-config, we check for nss-config + + check=`nss-config --version 2>/dev/null` + if test -n "$check"; then + addlib=`nss-config --libs` + addcflags=`nss-config --cflags` + version=`nss-config --version` + nssprefix=`nss-config --prefix` + else + addlib="-lnss3" + addcflags="" + version="unknown" + fi + fi + else + NSS_PCDIR="$OPT_NSS/lib/pkgconfig" + if test -f "$NSS_PCDIR/nss.pc"; then + CURL_CHECK_PKGCONFIG(nss, [$NSS_PCDIR]) + if test "$PKGCONFIG" != "no" ; then + addld=`CURL_EXPORT_PCDIR([$NSS_PCDIR]) $PKGCONFIG --libs-only-L nss` + addlib=`CURL_EXPORT_PCDIR([$NSS_PCDIR]) $PKGCONFIG --libs-only-l nss` + addcflags=`CURL_EXPORT_PCDIR([$NSS_PCDIR]) $PKGCONFIG --cflags nss` + version=`CURL_EXPORT_PCDIR([$NSS_PCDIR]) $PKGCONFIG --modversion nss` + nssprefix=`CURL_EXPORT_PCDIR([$NSS_PCDIR]) $PKGCONFIG --variable=prefix nss` + fi + fi + fi + + if test -z "$addlib"; then + # Without pkg-config, we'll kludge in some defaults + AC_MSG_WARN([Using hard-wired libraries and compilation flags for NSS.]) + addld="-L$OPT_NSS/lib" + addlib="-lssl3 -lsmime3 -lnss3 -lplds4 -lplc4 -lnspr4" + addcflags="-I$OPT_NSS/include" + version="unknown" + nssprefix=$OPT_NSS + fi + + CLEANLDFLAGS="$LDFLAGS" + CLEANLIBS="$LIBS" + CLEANCPPFLAGS="$CPPFLAGS" + + LDFLAGS="$addld $LDFLAGS" + LIBS="$addlib $LIBS" + if test "$addcflags" != "-I/usr/include"; then + CPPFLAGS="$CPPFLAGS $addcflags" + fi + + dnl The function SSL_VersionRangeSet() is needed to enable TLS > 1.0 + AC_CHECK_LIB(nss3, SSL_VersionRangeSet, + [ + AC_DEFINE(USE_NSS, 1, [if NSS is enabled]) + AC_SUBST(USE_NSS, [1]) + USE_NSS="yes" + NSS_ENABLED=1 + curl_ssl_msg="enabled (NSS)" + ], + [ + LDFLAGS="$CLEANLDFLAGS" + LIBS="$CLEANLIBS" + CPPFLAGS="$CLEANCPPFLAGS" + ]) + + if test "x$USE_NSS" = "xyes"; then + AC_MSG_NOTICE([detected NSS version $version]) + + dnl needed when linking the curl tool without USE_EXPLICIT_LIB_DEPS + NSS_LIBS=$addlib + AC_SUBST([NSS_LIBS]) + + dnl when shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to + dnl LD_LIBRARY_PATH to prevent further configure tests to fail + dnl due to this + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$nssprefix/lib$libsuff" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $nssprefix/lib$libsuff to LD_LIBRARY_PATH]) + fi + + fi dnl NSS found + + fi dnl NSS not disabled + +fi dnl curl_ssl_msg = init_ssl_msg + +OPT_AXTLS=off + +AC_ARG_WITH(axtls,dnl +AC_HELP_STRING([--with-axtls=PATH],[Where to look for axTLS, PATH points to the axTLS installation prefix (default: /usr/local). Ignored if another SSL engine is selected.]) +AC_HELP_STRING([--without-axtls], [disable axTLS]), + OPT_AXTLS=$withval) + +if test "$curl_ssl_msg" = "$init_ssl_msg"; then + if test X"$OPT_AXTLS" != Xno; then + dnl backup the pre-axtls variables + CLEANLDFLAGS="$LDFLAGS" + CLEANCPPFLAGS="$CPPFLAGS" + CLEANLIBS="$LIBS" + + case "$OPT_AXTLS" in + yes) + dnl --with-axtls (without path) used + PREFIX_AXTLS=/usr/local + LIB_AXTLS="$PREFIX_AXTLS/lib" + LDFLAGS="$LDFLAGS -L$LIB_AXTLS" + CPPFLAGS="$CPPFLAGS -I$PREFIX_AXTLS/include" + ;; + off) + dnl no --with-axtls option given, just check default places + PREFIX_AXTLS= + ;; + *) + dnl check the given --with-axtls spot + PREFIX_AXTLS=$OPT_AXTLS + LIB_AXTLS="$PREFIX_AXTLS/lib" + LDFLAGS="$LDFLAGS -L$LIB_AXTLS" + CPPFLAGS="$CPPFLAGS -I$PREFIX_AXTLS/include" + ;; + esac + + AC_CHECK_LIB(axtls, ssl_version,[ + LIBS="-laxtls $LIBS" + AC_DEFINE(USE_AXTLS, 1, [if axTLS is enabled]) + AC_SUBST(USE_AXTLS, [1]) + AXTLS_ENABLED=1 + USE_AXTLS="yes" + curl_ssl_msg="enabled (axTLS)" + + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$LIB_AXTLS" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $LIB_AXTLS to LD_LIBRARY_PATH]) + fi + ],[ + LDFLAGS="$CLEANLDFLAGS" + CPPFLAGS="$CLEANCPPFLAGS" + LIBS="$CLEANLIBS" + ]) + fi +fi + +if test "x$OPENSSL_ENABLED$GNUTLS_ENABLED$NSS_ENABLED$POLARSSL_ENABLED$MBEDTLS_ENABLED$AXTLS_ENABLED$CYASSL_ENABLED$WINSSL_ENABLED$DARWINSSL_ENABLED" = "x"; then + AC_MSG_WARN([SSL disabled, you will not be able to use HTTPS, FTPS, NTLM and more.]) + AC_MSG_WARN([Use --with-ssl, --with-gnutls, --with-polarssl, --with-cyassl, --with-nss, --with-axtls, --with-winssl, or --with-darwinssl to address this.]) +else + # SSL is enabled, genericly + AC_SUBST(SSL_ENABLED) + SSL_ENABLED="1" +fi + +dnl ********************************************************************** +dnl Check for the CA bundle +dnl ********************************************************************** + +CURL_CHECK_CA_BUNDLE + +dnl ********************************************************************** +dnl Check for libpsl +dnl ********************************************************************** + +AC_ARG_WITH(libpsl, + AS_HELP_STRING([--without-libpsl], + [disable support for libpsl cookie checking]), + with_libpsl=$withval, + with_libpsl=yes) +if test $with_libpsl != "no"; then + AC_SEARCH_LIBS(psl_builtin, psl, + [curl_psl_msg="yes"; + AC_DEFINE([USE_LIBPSL], [1], [PSL support enabled]) + ], + [curl_psl_msg="no (libpsl not found)"; + AC_MSG_WARN([libpsl was not found]) + ] + ) +fi +AM_CONDITIONAL([USE_LIBPSL], [test "$curl_psl_msg" = "yes"]) + +dnl ********************************************************************** +dnl Check for libmetalink +dnl ********************************************************************** + +OPT_LIBMETALINK=no + +AC_ARG_WITH(libmetalink,dnl +AC_HELP_STRING([--with-libmetalink=PATH],[where to look for libmetalink, PATH points to the installation root]) +AC_HELP_STRING([--without-libmetalink], [disable libmetalink detection]), + OPT_LIBMETALINK=$withval) + +if test X"$OPT_LIBMETALINK" != Xno; then + + addld="" + addlib="" + addcflags="" + version="" + libmetalinklib="" + + PKGTEST="no" + if test "x$OPT_LIBMETALINK" = "xyes"; then + dnl this is with no partiular path given + PKGTEST="yes" + CURL_CHECK_PKGCONFIG(libmetalink) + else + dnl When particular path is given, set PKG_CONFIG_LIBDIR using the path. + LIBMETALINK_PCDIR="$OPT_LIBMETALINK/lib/pkgconfig" + AC_MSG_NOTICE([PKG_CONFIG_LIBDIR will be set to "$LIBMETALINK_PCDIR"]) + if test -f "$LIBMETALINK_PCDIR/libmetalink.pc"; then + PKGTEST="yes" + fi + if test "$PKGTEST" = "yes"; then + CURL_CHECK_PKGCONFIG(libmetalink, [$LIBMETALINK_PCDIR]) + fi + fi + if test "$PKGTEST" = "yes" && test "$PKGCONFIG" != "no"; then + addlib=`CURL_EXPORT_PCDIR([$LIBMETALINK_PCDIR]) dnl + $PKGCONFIG --libs-only-l libmetalink` + addld=`CURL_EXPORT_PCDIR([$LIBMETALINK_PCDIR]) dnl + $PKGCONFIG --libs-only-L libmetalink` + addcflags=`CURL_EXPORT_PCDIR([$LIBMETALINK_PCDIR]) dnl + $PKGCONFIG --cflags-only-I libmetalink` + version=`CURL_EXPORT_PCDIR([$LIBMETALINK_PCDIR]) dnl + $PKGCONFIG --modversion libmetalink` + libmetalinklib=`echo $addld | $SED -e 's/-L//'` + fi + if test -n "$addlib"; then + + clean_CPPFLAGS="$CPPFLAGS" + clean_LDFLAGS="$LDFLAGS" + clean_LIBS="$LIBS" + CPPFLAGS="$clean_CPPFLAGS $addcflags" + LDFLAGS="$clean_LDFLAGS $addld" + LIBS="$addlib $clean_LIBS" + AC_MSG_CHECKING([if libmetalink is recent enough]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ +# include + ]],[[ + if(0 != metalink_strerror(0)) /* added in 0.1.0 */ + return 1; + ]]) + ],[ + AC_MSG_RESULT([yes ($version)]) + want_metalink="yes" + ],[ + AC_MSG_RESULT([no ($version)]) + AC_MSG_NOTICE([libmetalink library defective or too old]) + want_metalink="no" + ]) + CPPFLAGS="$clean_CPPFLAGS" + LDFLAGS="$clean_LDFLAGS" + LIBS="$clean_LIBS" + if test "$want_metalink" = "yes"; then + dnl finally libmetalink will be used + AC_DEFINE(USE_METALINK, 1, [Define to enable metalink support]) + LIBMETALINK_LIBS=$addlib + LIBMETALINK_LDFLAGS=$addld + LIBMETALINK_CPPFLAGS=$addcflags + AC_SUBST([LIBMETALINK_LIBS]) + AC_SUBST([LIBMETALINK_LDFLAGS]) + AC_SUBST([LIBMETALINK_CPPFLAGS]) + curl_mtlnk_msg="enabled" + fi + + fi +fi + +dnl ********************************************************************** +dnl Check for the presence of LIBSSH2 libraries and headers +dnl ********************************************************************** + +dnl Default to compiler & linker defaults for LIBSSH2 files & libraries. +OPT_LIBSSH2=off +AC_ARG_WITH(libssh2,dnl +AC_HELP_STRING([--with-libssh2=PATH],[Where to look for libssh2, PATH points to the LIBSSH2 installation; when possible, set the PKG_CONFIG_PATH environment variable instead of using this option]) +AC_HELP_STRING([--without-libssh2], [disable LIBSSH2]), + OPT_LIBSSH2=$withval) + +if test X"$OPT_LIBSSH2" != Xno; then + dnl backup the pre-libssh2 variables + CLEANLDFLAGS="$LDFLAGS" + CLEANCPPFLAGS="$CPPFLAGS" + CLEANLIBS="$LIBS" + + case "$OPT_LIBSSH2" in + yes) + dnl --with-libssh2 (without path) used + CURL_CHECK_PKGCONFIG(libssh2) + + if test "$PKGCONFIG" != "no" ; then + LIB_SSH2=`$PKGCONFIG --libs-only-l libssh2` + LD_SSH2=`$PKGCONFIG --libs-only-L libssh2` + CPP_SSH2=`$PKGCONFIG --cflags-only-I libssh2` + version=`$PKGCONFIG --modversion libssh2` + DIR_SSH2=`echo $LD_SSH2 | $SED -e 's/-L//'` + fi + + ;; + off) + dnl no --with-libssh2 option given, just check default places + ;; + *) + dnl use the given --with-libssh2 spot + PREFIX_SSH2=$OPT_LIBSSH2 + ;; + esac + + dnl if given with a prefix, we set -L and -I based on that + if test -n "$PREFIX_SSH2"; then + LIB_SSH2="-lssh2" + LD_SSH2=-L${PREFIX_SSH2}/lib$libsuff + CPP_SSH2=-I${PREFIX_SSH2}/include + DIR_SSH2=${PREFIX_SSH2}/lib$libsuff + fi + + LDFLAGS="$LDFLAGS $LD_SSH2" + CPPFLAGS="$CPPFLAGS $CPP_SSH2" + LIBS="$LIB_SSH2 $LIBS" + + AC_CHECK_LIB(ssh2, libssh2_channel_open_ex) + + AC_CHECK_HEADERS(libssh2.h, + curl_ssh_msg="enabled (libSSH2)" + LIBSSH2_ENABLED=1 + AC_DEFINE(USE_LIBSSH2, 1, [if libSSH2 is in use]) + AC_SUBST(USE_LIBSSH2, [1]) + ) + + if test X"$OPT_LIBSSH2" != Xoff && + test "$LIBSSH2_ENABLED" != "1"; then + AC_MSG_ERROR([libSSH2 libs and/or directories were not found where specified!]) + fi + + if test "$LIBSSH2_ENABLED" = "1"; then + if test -n "$DIR_SSH2"; then + dnl when the libssh2 shared libs were found in a path that the run-time + dnl linker doesn't search through, we need to add it to LD_LIBRARY_PATH + dnl to prevent further configure tests to fail due to this + + if test "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$DIR_SSH2" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $DIR_SSH2 to LD_LIBRARY_PATH]) + fi + fi + else + dnl no libssh2, revert back to clean variables + LDFLAGS=$CLEANLDFLAGS + CPPFLAGS=$CLEANCPPFLAGS + LIBS=$CLEANLIBS + fi +fi + +dnl ********************************************************************** +dnl Check for the presence of LIBRTMP libraries and headers +dnl ********************************************************************** + +dnl Default to compiler & linker defaults for LIBRTMP files & libraries. +OPT_LIBRTMP=off +AC_ARG_WITH(librtmp,dnl +AC_HELP_STRING([--with-librtmp=PATH],[Where to look for librtmp, PATH points to the LIBRTMP installation; when possible, set the PKG_CONFIG_PATH environment variable instead of using this option]) +AC_HELP_STRING([--without-librtmp], [disable LIBRTMP]), + OPT_LIBRTMP=$withval) + +if test X"$OPT_LIBRTMP" != Xno; then + dnl backup the pre-librtmp variables + CLEANLDFLAGS="$LDFLAGS" + CLEANCPPFLAGS="$CPPFLAGS" + CLEANLIBS="$LIBS" + + case "$OPT_LIBRTMP" in + yes) + dnl --with-librtmp (without path) used + CURL_CHECK_PKGCONFIG(librtmp) + + if test "$PKGCONFIG" != "no" ; then + LIB_RTMP=`$PKGCONFIG --libs-only-l librtmp` + LD_RTMP=`$PKGCONFIG --libs-only-L librtmp` + CPP_RTMP=`$PKGCONFIG --cflags-only-I librtmp` + version=`$PKGCONFIG --modversion librtmp` + DIR_RTMP=`echo $LD_RTMP | $SED -e 's/-L//'` + else + dnl To avoid link errors, we do not allow --librtmp without + dnl a pkgconfig file + AC_MSG_ERROR([--librtmp was specified but could not find librtmp pkgconfig file.]) + fi + + ;; + off) + dnl no --with-librtmp option given, just check default places + LIB_RTMP="-lrtmp" + ;; + *) + dnl use the given --with-librtmp spot + LIB_RTMP="-lrtmp" + PREFIX_RTMP=$OPT_LIBRTMP + ;; + esac + + dnl if given with a prefix, we set -L and -I based on that + if test -n "$PREFIX_RTMP"; then + LD_RTMP=-L${PREFIX_RTMP}/lib$libsuff + CPP_RTMP=-I${PREFIX_RTMP}/include + DIR_RTMP=${PREFIX_RTMP}/lib$libsuff + fi + + LDFLAGS="$LDFLAGS $LD_RTMP" + CPPFLAGS="$CPPFLAGS $CPP_RTMP" + LIBS="$LIB_RTMP $LIBS" + + AC_CHECK_LIB(rtmp, RTMP_Init, + [ + AC_CHECK_HEADERS(librtmp/rtmp.h, + curl_rtmp_msg="enabled (librtmp)" + LIBRTMP_ENABLED=1 + AC_DEFINE(USE_LIBRTMP, 1, [if librtmp is in use]) + AC_SUBST(USE_LIBRTMP, [1]) + ) + ], + dnl not found, revert back to clean variables + LDFLAGS=$CLEANLDFLAGS + CPPFLAGS=$CLEANCPPFLAGS + LIBS=$CLEANLIBS + ) + + if test X"$OPT_LIBRTMP" != Xoff && + test "$LIBRTMP_ENABLED" != "1"; then + AC_MSG_ERROR([librtmp libs and/or directories were not found where specified!]) + fi + +fi + +dnl ********************************************************************** +dnl Check for linker switch for versioned symbols +dnl ********************************************************************** + +versioned_symbols_flavour= +AC_MSG_CHECKING([whether versioned symbols are wanted]) +AC_ARG_ENABLE(versioned-symbols, +AC_HELP_STRING([--enable-versioned-symbols], [Enable versioned symbols in shared library]) +AC_HELP_STRING([--disable-versioned-symbols], [Disable versioned symbols in shared library]), +[ case "$enableval" in + yes) AC_MSG_RESULT(yes) + AC_MSG_CHECKING([if libraries can be versioned]) + GLD=`$LD --help < /dev/null 2>/dev/null | grep version-script` + if test -z "$GLD"; then + AC_MSG_RESULT(no) + AC_MSG_WARN([You need an ld version supporting the --version-script option]) + else + AC_MSG_RESULT(yes) + if test "x$OPENSSL_ENABLED" = "x1"; then + versioned_symbols_flavour="OPENSSL_" + elif test "x$GNUTLS_ENABLED" = "x1"; then + versioned_symbols_flavour="GNUTLS_" + elif test "x$NSS_ENABLED" = "x1"; then + versioned_symbols_flavour="NSS_" + elif test "x$POLARSSL_ENABLED" = "x1"; then + versioned_symbols_flavour="POLARSSL_" + elif test "x$CYASSL_ENABLED" = "x1"; then + versioned_symbols_flavour="CYASSL_" + elif test "x$AXTLS_ENABLED" = "x1"; then + versioned_symbols_flavour="AXTLS_" + elif test "x$WINSSL_ENABLED" = "x1"; then + versioned_symbols_flavour="WINSSL_" + elif test "x$DARWINSSL_ENABLED" = "x1"; then + versioned_symbols_flavour="DARWINSSL_" + else + versioned_symbols_flavour="" + fi + versioned_symbols="yes" + fi + ;; + + *) AC_MSG_RESULT(no) + ;; + esac +], [ +AC_MSG_RESULT(no) +] +) + +AC_SUBST([CURL_LT_SHLIB_VERSIONED_FLAVOUR], + ["$versioned_symbols_flavour"]) +AM_CONDITIONAL([CURL_LT_SHLIB_USE_VERSIONED_SYMBOLS], + [test "x$versioned_symbols" = 'xyes']) + +dnl ------------------------------------------------- +dnl check winidn option before other IDN libraries +dnl ------------------------------------------------- + +AC_MSG_CHECKING([whether to enable Windows native IDN (Windows native builds only)]) +OPT_WINIDN="default" +AC_ARG_WITH(winidn, +AC_HELP_STRING([--with-winidn=PATH],[enable Windows native IDN]) +AC_HELP_STRING([--without-winidn], [disable Windows native IDN]), + OPT_WINIDN=$withval) +case "$OPT_WINIDN" in + no|default) + dnl --without-winidn option used or configure option not specified + want_winidn="no" + AC_MSG_RESULT([no]) + ;; + yes) + dnl --with-winidn option used without path + want_winidn="yes" + want_winidn_path="default" + AC_MSG_RESULT([yes]) + ;; + *) + dnl --with-winidn option used with path + want_winidn="yes" + want_winidn_path="$withval" + AC_MSG_RESULT([yes ($withval)]) + ;; +esac + +if test "$want_winidn" = "yes"; then + dnl winidn library support has been requested + clean_CPPFLAGS="$CPPFLAGS" + clean_LDFLAGS="$LDFLAGS" + clean_LIBS="$LIBS" + WINIDN_LIBS="-lnormaliz" + # + if test "$want_winidn_path" != "default"; then + dnl path has been specified + dnl pkg-config not available or provides no info + WINIDN_LDFLAGS="-L$want_winidn_path/lib$libsuff" + WINIDN_CPPFLAGS="-I$want_winidn_path/include" + WINIDN_DIR="$want_winidn_path/lib$libsuff" + fi + # + CPPFLAGS="$CPPFLAGS $WINIDN_CPPFLAGS" + LDFLAGS="$LDFLAGS $WINIDN_LDFLAGS" + LIBS="$WINIDN_LIBS $LIBS" + # + AC_MSG_CHECKING([if IdnToUnicode can be linked]) + AC_LINK_IFELSE([ + AC_LANG_FUNC_LINK_TRY([IdnToUnicode]) + ],[ + AC_MSG_RESULT([yes]) + tst_links_winidn="yes" + ],[ + AC_MSG_RESULT([no]) + tst_links_winidn="no" + ]) + # + if test "$tst_links_winidn" = "yes"; then + AC_DEFINE(USE_WIN32_IDN, 1, [Define to 1 if you have the `normaliz' (WinIDN) library (-lnormaliz).]) + AC_DEFINE(WANT_IDN_PROTOTYPES, 1, [Define to 1 to provide own prototypes.]) + AC_SUBST([IDN_ENABLED], [1]) + curl_idn_msg="enabled (Windows-native)" + else + AC_MSG_WARN([Cannot find libraries for IDN support: IDN disabled]) + CPPFLAGS="$clean_CPPFLAGS" + LDFLAGS="$clean_LDFLAGS" + LIBS="$clean_LIBS" + fi +fi + +dnl ********************************************************************** +dnl Check for the presence of IDN libraries and headers +dnl ********************************************************************** + +AC_MSG_CHECKING([whether to build with libidn2]) +OPT_IDN="default" +AC_ARG_WITH(libidn2, +AC_HELP_STRING([--with-libidn2=PATH],[Enable libidn2 usage]) +AC_HELP_STRING([--without-libidn2],[Disable libidn2 usage]), + [OPT_IDN=$withval]) +case "$OPT_IDN" in + no) + dnl --without-libidn2 option used + want_idn="no" + AC_MSG_RESULT([no]) + ;; + default) + dnl configure option not specified + want_idn="yes" + want_idn_path="default" + AC_MSG_RESULT([(assumed) yes]) + ;; + yes) + dnl --with-libidn2 option used without path + want_idn="yes" + want_idn_path="default" + AC_MSG_RESULT([yes]) + ;; + *) + dnl --with-libidn2 option used with path + want_idn="yes" + want_idn_path="$withval" + AC_MSG_RESULT([yes ($withval)]) + ;; +esac + +if test "$want_idn" = "yes"; then + dnl idn library support has been requested + clean_CPPFLAGS="$CPPFLAGS" + clean_LDFLAGS="$LDFLAGS" + clean_LIBS="$LIBS" + PKGCONFIG="no" + # + if test "$want_idn_path" != "default"; then + dnl path has been specified + IDN_PCDIR="$want_idn_path/lib$libsuff/pkgconfig" + CURL_CHECK_PKGCONFIG(libidn2, [$IDN_PCDIR]) + if test "$PKGCONFIG" != "no"; then + IDN_LIBS=`CURL_EXPORT_PCDIR([$IDN_PCDIR]) dnl + $PKGCONFIG --libs-only-l libidn2 2>/dev/null` + IDN_LDFLAGS=`CURL_EXPORT_PCDIR([$IDN_PCDIR]) dnl + $PKGCONFIG --libs-only-L libidn2 2>/dev/null` + IDN_CPPFLAGS=`CURL_EXPORT_PCDIR([$IDN_PCDIR]) dnl + $PKGCONFIG --cflags-only-I libidn2 2>/dev/null` + IDN_DIR=`echo $IDN_LDFLAGS | $SED -e 's/-L//'` + else + dnl pkg-config not available or provides no info + IDN_LIBS="-lidn2" + IDN_LDFLAGS="-L$want_idn_path/lib$libsuff" + IDN_CPPFLAGS="-I$want_idn_path/include" + IDN_DIR="$want_idn_path/lib$libsuff" + fi + else + dnl path not specified + CURL_CHECK_PKGCONFIG(libidn2) + if test "$PKGCONFIG" != "no"; then + IDN_LIBS=`$PKGCONFIG --libs-only-l libidn2 2>/dev/null` + IDN_LDFLAGS=`$PKGCONFIG --libs-only-L libidn2 2>/dev/null` + IDN_CPPFLAGS=`$PKGCONFIG --cflags-only-I libidn2 2>/dev/null` + IDN_DIR=`echo $IDN_LDFLAGS | $SED -e 's/-L//'` + else + dnl pkg-config not available or provides no info + IDN_LIBS="-lidn2" + fi + fi + # + if test "$PKGCONFIG" != "no"; then + AC_MSG_NOTICE([pkg-config: IDN_LIBS: "$IDN_LIBS"]) + AC_MSG_NOTICE([pkg-config: IDN_LDFLAGS: "$IDN_LDFLAGS"]) + AC_MSG_NOTICE([pkg-config: IDN_CPPFLAGS: "$IDN_CPPFLAGS"]) + AC_MSG_NOTICE([pkg-config: IDN_DIR: "$IDN_DIR"]) + else + AC_MSG_NOTICE([IDN_LIBS: "$IDN_LIBS"]) + AC_MSG_NOTICE([IDN_LDFLAGS: "$IDN_LDFLAGS"]) + AC_MSG_NOTICE([IDN_CPPFLAGS: "$IDN_CPPFLAGS"]) + AC_MSG_NOTICE([IDN_DIR: "$IDN_DIR"]) + fi + # + CPPFLAGS="$CPPFLAGS $IDN_CPPFLAGS" + LDFLAGS="$LDFLAGS $IDN_LDFLAGS" + LIBS="$IDN_LIBS $LIBS" + # + AC_MSG_CHECKING([if idn2_lookup_ul can be linked]) + AC_LINK_IFELSE([ + AC_LANG_FUNC_LINK_TRY([idn2_lookup_ul]) + ],[ + AC_MSG_RESULT([yes]) + tst_links_libidn="yes" + ],[ + AC_MSG_RESULT([no]) + tst_links_libidn="no" + ]) + # + AC_CHECK_HEADERS( idn2.h ) + + if test "$tst_links_libidn" = "yes"; then + AC_DEFINE(HAVE_LIBIDN2, 1, [Define to 1 if you have the `idn2' library (-lidn2).]) + dnl different versions of libidn have different setups of these: + + AC_SUBST([IDN_ENABLED], [1]) + curl_idn_msg="enabled (libidn2)" + if test -n "$IDN_DIR" -a "x$cross_compiling" != "xyes"; then + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$IDN_DIR" + export LD_LIBRARY_PATH + AC_MSG_NOTICE([Added $IDN_DIR to LD_LIBRARY_PATH]) + fi + else + AC_MSG_WARN([Cannot find libraries for IDN support: IDN disabled]) + CPPFLAGS="$clean_CPPFLAGS" + LDFLAGS="$clean_LDFLAGS" + LIBS="$clean_LIBS" + fi +fi + + +dnl Let's hope this split URL remains working: +dnl http://publibn.boulder.ibm.com/doc_link/en_US/a_doc_lib/aixprggd/ \ +dnl genprogc/thread_quick_ref.htm + + +dnl ********************************************************************** +dnl Check for nghttp2 +dnl ********************************************************************** + +OPT_H2="yes" +AC_ARG_WITH(nghttp2, +AC_HELP_STRING([--with-nghttp2=PATH],[Enable nghttp2 usage]) +AC_HELP_STRING([--without-nghttp2],[Disable nghttp2 usage]), + [OPT_H2=$withval]) +case "$OPT_H2" in + no) + dnl --without-nghttp2 option used + want_h2="no" + ;; + yes) + dnl --with-nghttp2 option used without path + want_h2="default" + want_h2_path="" + ;; + *) + dnl --with-nghttp2 option used with path + want_h2="yes" + want_h2_path="$withval/lib/pkgconfig" + ;; +esac + +curl_h2_msg="disabled (--with-nghttp2)" +if test X"$want_h2" != Xno; then + dnl backup the pre-nghttp2 variables + CLEANLDFLAGS="$LDFLAGS" + CLEANCPPFLAGS="$CPPFLAGS" + CLEANLIBS="$LIBS" + + CURL_CHECK_PKGCONFIG(libnghttp2, $want_h2_path) + + if test "$PKGCONFIG" != "no" ; then + LIB_H2=`CURL_EXPORT_PCDIR([$want_h2_path]) + $PKGCONFIG --libs-only-l libnghttp2` + AC_MSG_NOTICE([-l is $LIB_H2]) + + CPP_H2=`CURL_EXPORT_PCDIR([$want_h2_path]) dnl + $PKGCONFIG --cflags-only-I libnghttp2` + AC_MSG_NOTICE([-I is $CPP_H2]) + + LD_H2=`CURL_EXPORT_PCDIR([$want_h2_path]) + $PKGCONFIG --libs-only-L libnghttp2` + AC_MSG_NOTICE([-L is $LD_H2]) + + LDFLAGS="$LDFLAGS $LD_H2" + CPPFLAGS="$CPPFLAGS $CPP_H2" + LIBS="$LIB_H2 $LIBS" + + # use nghttp2_option_set_no_recv_client_magic to require nghttp2 + # >= 1.0.0 + AC_CHECK_LIB(nghttp2, nghttp2_option_set_no_recv_client_magic, + [ + AC_CHECK_HEADERS(nghttp2/nghttp2.h, + curl_h2_msg="enabled (nghttp2)" + NGHTTP2_ENABLED=1 + AC_DEFINE(USE_NGHTTP2, 1, [if nghttp2 is in use]) + AC_SUBST(USE_NGHTTP2, [1]) + ) + ], + dnl not found, revert back to clean variables + LDFLAGS=$CLEANLDFLAGS + CPPFLAGS=$CLEANCPPFLAGS + LIBS=$CLEANLIBS + ) + + else + dnl no nghttp2 pkg-config found, deal with it + if test X"$want_h2" != Xdefault; then + dnl To avoid link errors, we do not allow --with-nghttp2 without + dnl a pkgconfig file + AC_MSG_ERROR([--with-nghttp2 was specified but could not find libnghttp2 pkg-config file.]) + fi + fi + +fi + +dnl ********************************************************************** +dnl Check for zsh completion path +dnl ********************************************************************** + +OPT_ZSH_FPATH=default +AC_ARG_WITH(zsh-functions-dir, +AC_HELP_STRING([--with-zsh-functions-dir=PATH],[Install zsh completions to PATH]) +AC_HELP_STRING([--without-zsh-functions-dir],[Do not install zsh completions]), + [OPT_ZSH_FPATH=$withval]) +case "$OPT_ZSH_FPATH" in + no) + dnl --without-zsh-functions-dir option used + ;; + default|yes) + dnl --with-zsh-functions-dir option used without path + ZSH_FUNCTIONS_DIR="$datarootdir/zsh/site-functions" + AC_SUBST(ZSH_FUNCTIONS_DIR) + ;; + *) + dnl --with-zsh-functions-dir option used with path + ZSH_FUNCTIONS_DIR="$withval" + AC_SUBST(ZSH_FUNCTIONS_DIR) + ;; +esac + +dnl ********************************************************************** +dnl Back to "normal" configuring +dnl ********************************************************************** + +dnl Checks for header files. +AC_HEADER_STDC + +CURL_CHECK_HEADER_MALLOC +CURL_CHECK_HEADER_MEMORY + +dnl Now check for the very most basic headers. Then we can use these +dnl ones as default-headers when checking for the rest! +AC_CHECK_HEADERS( + sys/types.h \ + sys/time.h \ + sys/select.h \ + sys/socket.h \ + sys/ioctl.h \ + sys/uio.h \ + assert.h \ + unistd.h \ + stdlib.h \ + limits.h \ + arpa/inet.h \ + net/if.h \ + netinet/in.h \ + sys/un.h \ + netinet/tcp.h \ + netdb.h \ + sys/sockio.h \ + sys/stat.h \ + sys/param.h \ + termios.h \ + termio.h \ + sgtty.h \ + fcntl.h \ + alloca.h \ + time.h \ + io.h \ + pwd.h \ + utime.h \ + sys/utime.h \ + sys/poll.h \ + poll.h \ + socket.h \ + sys/resource.h \ + libgen.h \ + locale.h \ + errno.h \ + stdbool.h \ + arpa/tftp.h \ + sys/filio.h \ + sys/wait.h \ + setjmp.h, +dnl to do if not found +[], +dnl to do if found +[], +dnl default includes +[ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#endif +#ifdef HAVE_SYS_SELECT_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_SYS_UN_H +#include +#endif +] +) + +dnl Checks for typedefs, structures, and compiler characteristics. +AC_C_CONST +CURL_CHECK_VARIADIC_MACROS +AC_TYPE_SIZE_T +AC_HEADER_TIME +CURL_CHECK_STRUCT_TIMEVAL +CURL_VERIFY_RUNTIMELIBS + +AC_CHECK_SIZEOF(size_t) +AC_CHECK_SIZEOF(long) +AC_CHECK_SIZEOF(int) +AC_CHECK_SIZEOF(short) +CURL_CONFIGURE_LONG +AC_CHECK_SIZEOF(time_t) +AC_CHECK_SIZEOF(off_t) + +soname_bump=no +if test x"$curl_cv_native_windows" != "xyes" && + test $ac_cv_sizeof_off_t -ne $curl_sizeof_curl_off_t; then + AC_MSG_WARN([This libcurl built is probably not ABI compatible with previous]) + AC_MSG_WARN([builds! You MUST read lib/README.curl_off_t to figure it out.]) + soname_bump=yes +fi + + +AC_CHECK_TYPE(long long, + [AC_DEFINE(HAVE_LONGLONG, 1, + [Define to 1 if the compiler supports the 'long long' data type.])] + longlong="yes" +) + +if test "xyes" = "x$longlong"; then + AC_MSG_CHECKING([if numberLL works]) + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ + long long val = 1000LL; + ]]) + ],[ + AC_MSG_RESULT([yes]) + AC_DEFINE(HAVE_LL, 1, [if your compiler supports LL]) + ],[ + AC_MSG_RESULT([no]) + ]) +fi + + +# check for ssize_t +AC_CHECK_TYPE(ssize_t, , + AC_DEFINE(ssize_t, int, [the signed version of size_t])) + +# check for bool type +AC_CHECK_TYPE([bool],[ + AC_DEFINE(HAVE_BOOL_T, 1, + [Define to 1 if bool is an available type.]) +], ,[ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_STDBOOL_H +#include +#endif +]) + +CURL_CONFIGURE_CURL_SOCKLEN_T + +CURL_CONFIGURE_PULL_SYS_POLL + +TYPE_IN_ADDR_T + +TYPE_SOCKADDR_STORAGE + +TYPE_SIG_ATOMIC_T + +AC_TYPE_SIGNAL + +CURL_CHECK_FUNC_SELECT + +CURL_CHECK_FUNC_RECV +CURL_CHECK_FUNC_SEND +CURL_CHECK_MSG_NOSIGNAL + +CURL_CHECK_FUNC_ALARM +CURL_CHECK_FUNC_BASENAME +CURL_CHECK_FUNC_CLOSESOCKET +CURL_CHECK_FUNC_CLOSESOCKET_CAMEL +CURL_CHECK_FUNC_CONNECT +CURL_CHECK_FUNC_FCNTL +CURL_CHECK_FUNC_FDOPEN +CURL_CHECK_FUNC_FREEADDRINFO +CURL_CHECK_FUNC_FREEIFADDRS +CURL_CHECK_FUNC_FSETXATTR +CURL_CHECK_FUNC_FTRUNCATE +CURL_CHECK_FUNC_GETADDRINFO +CURL_CHECK_FUNC_GAI_STRERROR +CURL_CHECK_FUNC_GETHOSTBYADDR +CURL_CHECK_FUNC_GETHOSTBYADDR_R +CURL_CHECK_FUNC_GETHOSTBYNAME +CURL_CHECK_FUNC_GETHOSTBYNAME_R +CURL_CHECK_FUNC_GETHOSTNAME +CURL_CHECK_FUNC_GETIFADDRS +CURL_CHECK_FUNC_GETSERVBYPORT_R +CURL_CHECK_FUNC_GMTIME_R +CURL_CHECK_FUNC_INET_NTOA_R +CURL_CHECK_FUNC_INET_NTOP +CURL_CHECK_FUNC_INET_PTON +CURL_CHECK_FUNC_IOCTL +CURL_CHECK_FUNC_IOCTLSOCKET +CURL_CHECK_FUNC_IOCTLSOCKET_CAMEL +CURL_CHECK_FUNC_LOCALTIME_R +CURL_CHECK_FUNC_MEMRCHR +CURL_CHECK_FUNC_POLL +CURL_CHECK_FUNC_SETSOCKOPT +CURL_CHECK_FUNC_SIGACTION +CURL_CHECK_FUNC_SIGINTERRUPT +CURL_CHECK_FUNC_SIGNAL +CURL_CHECK_FUNC_SIGSETJMP +CURL_CHECK_FUNC_SOCKET +CURL_CHECK_FUNC_SOCKETPAIR +CURL_CHECK_FUNC_STRCASECMP +CURL_CHECK_FUNC_STRCMPI +CURL_CHECK_FUNC_STRDUP +CURL_CHECK_FUNC_STRERROR_R +CURL_CHECK_FUNC_STRICMP +CURL_CHECK_FUNC_STRNCASECMP +CURL_CHECK_FUNC_STRNCMPI +CURL_CHECK_FUNC_STRNICMP +CURL_CHECK_FUNC_STRSTR +CURL_CHECK_FUNC_STRTOK_R +CURL_CHECK_FUNC_STRTOLL +CURL_CHECK_FUNC_WRITEV + +case $host in + *msdosdjgpp) + ac_cv_func_pipe=no + skipcheck_pipe=yes + AC_MSG_NOTICE([skip check for pipe on msdosdjgpp]) + ;; +esac + +AC_CHECK_FUNCS([fork \ + geteuid \ + getpass_r \ + getppid \ + getprotobyname \ + getpwuid \ + getpwuid_r \ + getrlimit \ + gettimeofday \ + if_nametoindex \ + inet_addr \ + perror \ + pipe \ + setlocale \ + setmode \ + setrlimit \ + uname \ + utime \ + utimes +],[ +],[ + func="$ac_func" + eval skipcheck=\$skipcheck_$func + if test "x$skipcheck" != "xyes"; then + AC_MSG_CHECKING([deeper for $func]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ + ]],[[ + $func (); + ]]) + ],[ + AC_MSG_RESULT([yes]) + eval "ac_cv_func_$func=yes" + AC_DEFINE_UNQUOTED(XC_SH_TR_CPP([HAVE_$func]), [1], + [Define to 1 if you have the $func function.]) + ],[ + AC_MSG_RESULT([but still no]) + ]) + fi +]) + +dnl Check if the getnameinfo function is available +dnl and get the types of five of its arguments. +CURL_CHECK_FUNC_GETNAMEINFO + +if test "$ipv6" = "yes"; then + if test "$curl_cv_func_getaddrinfo" = "yes"; then + AC_DEFINE(ENABLE_IPV6, 1, [Define if you want to enable IPv6 support]) + IPV6_ENABLED=1 + AC_SUBST(IPV6_ENABLED) + fi + CURL_CHECK_NI_WITHSCOPEID +fi + +CURL_CHECK_NONBLOCKING_SOCKET + +dnl ************************************************************ +dnl nroff tool stuff +dnl + +AC_PATH_PROG( PERL, perl, , + $PATH:/usr/local/bin/perl:/usr/bin/:/usr/local/bin ) +AC_SUBST(PERL) + +AC_PATH_PROGS( NROFF, gnroff nroff, , + $PATH:/usr/bin/:/usr/local/bin ) +AC_SUBST(NROFF) + +if test -n "$NROFF"; then + dnl only check for nroff options if an nroff command was found + + AC_MSG_CHECKING([how to use *nroff to get plain text from man pages]) + MANOPT="-man" + mancheck=`echo foo | $NROFF $MANOPT 2>/dev/null` + if test -z "$mancheck"; then + MANOPT="-mandoc" + mancheck=`echo foo | $NROFF $MANOPT 2>/dev/null` + if test -z "$mancheck"; then + MANOPT="" + AC_MSG_RESULT([failed]) + AC_MSG_WARN([found no *nroff option to get plaintext from man pages]) + else + AC_MSG_RESULT([$MANOPT]) + fi + else + AC_MSG_RESULT([$MANOPT]) + fi + AC_SUBST(MANOPT) +fi + +if test -z "$MANOPT" +then + dnl if no nroff tool was found, or no option that could convert man pages + dnl was found, then disable the built-in manual stuff + AC_MSG_WARN([disabling built-in manual]) + USE_MANUAL="no"; +fi + +dnl ************************************************************************* +dnl If the manual variable still is set, then we go with providing a built-in +dnl manual + +if test "$USE_MANUAL" = "1"; then + AC_DEFINE(USE_MANUAL, 1, [If you want to build curl with the built-in manual]) + curl_manual_msg="enabled" +fi + +dnl set variable for use in automakefile(s) +AM_CONDITIONAL(USE_MANUAL, test x"$USE_MANUAL" = x1) + +CURL_CHECK_LIB_ARES +AM_CONDITIONAL(USE_EMBEDDED_ARES, test x$embedded_ares = xyes) + +if test "x$curl_cv_native_windows" != "xyes" && + test "x$enable_shared" = "xyes"; then + build_libhostname=yes +else + build_libhostname=no +fi +AM_CONDITIONAL(BUILD_LIBHOSTNAME, test x$build_libhostname = xyes) + +CURL_CHECK_OPTION_THREADED_RESOLVER + +if test "x$want_thres" = xyes && test "x$want_ares" = xyes; then + AC_MSG_ERROR( +[Options --enable-threaded-resolver and --enable-ares are mutually exclusive]) +fi + +dnl ************************************************************ +dnl disable POSIX threads +dnl +AC_MSG_CHECKING([whether to use POSIX threads for threaded resolver]) +AC_ARG_ENABLE(pthreads, +AC_HELP_STRING([--enable-pthreads], + [Enable POSIX threads (default for threaded resolver)]) +AC_HELP_STRING([--disable-pthreads],[Disable POSIX threads]), +[ case "$enableval" in + no) AC_MSG_RESULT(no) + want_pthreads=no + ;; + *) AC_MSG_RESULT(yes) + want_pthreads=yes + ;; + esac ], [ + AC_MSG_RESULT(auto) + want_pthreads=auto + ] +) + +dnl turn off pthreads if rt is disabled +if test "$want_pthreads" != "no"; then + if test "$want_pthreads" = "yes" && test "$dontwant_rt" = "yes"; then + AC_MSG_ERROR([options --enable-pthreads and --disable-rt are mutually exclusive]) + fi + if test "$dontwant_rt" != "no"; then + dnl if --enable-pthreads was explicit then warn it's being ignored + if test "$want_pthreads" = "yes"; then + AC_MSG_WARN([--enable-pthreads Ignored since librt is disabled.]) + fi + want_pthreads=no + fi +fi + +dnl turn off pthreads if no threaded resolver +if test "$want_pthreads" != "no" && test "$want_thres" != "yes"; then + want_pthreads=no +fi + +dnl detect pthreads +if test "$want_pthreads" != "no"; then + AC_CHECK_HEADER(pthread.h, + [ AC_DEFINE(HAVE_PTHREAD_H, 1, [if you have ]) + save_CFLAGS="$CFLAGS" + + dnl first check for function without lib + AC_CHECK_FUNC(pthread_create, [USE_THREADS_POSIX=1] ) + + dnl if it wasn't found without lib, search for it in pthread lib + if test "$USE_THREADS_POSIX" != "1" + then + CFLAGS="$CFLAGS -pthread" + AC_CHECK_LIB(pthread, pthread_create, + [USE_THREADS_POSIX=1], + [ CFLAGS="$save_CFLAGS"]) + fi + + if test "x$USE_THREADS_POSIX" = "x1" + then + AC_DEFINE(USE_THREADS_POSIX, 1, [if you want POSIX threaded DNS lookup]) + curl_res_msg="POSIX threaded" + fi + ]) +fi + +dnl threaded resolver check +if test "$want_thres" = "yes" && test "x$USE_THREADS_POSIX" != "x1"; then + if test "$want_pthreads" = "yes"; then + AC_MSG_ERROR([--enable-pthreads but pthreads was not found]) + fi + dnl If native Windows fallback on Win32 threads since no POSIX threads + if test "$curl_cv_native_windows" = "yes"; then + USE_THREADS_WIN32=1 + AC_DEFINE(USE_THREADS_WIN32, 1, [if you want Win32 threaded DNS lookup]) + curl_res_msg="Win32 threaded" + else + AC_MSG_ERROR([Threaded resolver enabled but no thread library found]) + fi +fi + +dnl ************************************************************ +dnl disable verbose text strings +dnl +AC_MSG_CHECKING([whether to enable verbose strings]) +AC_ARG_ENABLE(verbose, +AC_HELP_STRING([--enable-verbose],[Enable verbose strings]) +AC_HELP_STRING([--disable-verbose],[Disable verbose strings]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_VERBOSE_STRINGS, 1, [to disable verbose strings]) + curl_verbose_msg="no" + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +dnl ************************************************************ +dnl enable SSPI support +dnl +AC_MSG_CHECKING([whether to enable SSPI support (Windows native builds only)]) +AC_ARG_ENABLE(sspi, +AC_HELP_STRING([--enable-sspi],[Enable SSPI]) +AC_HELP_STRING([--disable-sspi],[Disable SSPI]), +[ case "$enableval" in + yes) + if test "$curl_cv_native_windows" = "yes"; then + AC_MSG_RESULT(yes) + AC_DEFINE(USE_WINDOWS_SSPI, 1, [to enable SSPI support]) + AC_SUBST(USE_WINDOWS_SSPI, [1]) + curl_sspi_msg="enabled" + else + AC_MSG_RESULT(no) + AC_MSG_WARN([--enable-sspi Ignored. Only supported on native Windows builds.]) + fi + ;; + *) + if test "x$WINSSL_ENABLED" = "x1"; then + # --with-winssl implies --enable-sspi + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) + fi + ;; + esac ], + if test "x$WINSSL_ENABLED" = "x1"; then + # --with-winssl implies --enable-sspi + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) + fi +) + +dnl ************************************************************ +dnl disable cryptographic authentication +dnl +AC_MSG_CHECKING([whether to enable cryptographic authentication methods]) +AC_ARG_ENABLE(crypto-auth, +AC_HELP_STRING([--enable-crypto-auth],[Enable cryptographic authentication]) +AC_HELP_STRING([--disable-crypto-auth],[Disable cryptographic authentication]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_CRYPTO_AUTH, 1, [to disable cryptographic authentication]) + CURL_DISABLE_CRYPTO_AUTH=1 + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +CURL_CHECK_OPTION_NTLM_WB + +CURL_CHECK_NTLM_WB + +dnl ************************************************************ +dnl disable TLS-SRP authentication +dnl +AC_MSG_CHECKING([whether to enable TLS-SRP authentication]) +AC_ARG_ENABLE(tls-srp, +AC_HELP_STRING([--enable-tls-srp],[Enable TLS-SRP authentication]) +AC_HELP_STRING([--disable-tls-srp],[Disable TLS-SRP authentication]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_TLS_SRP, 1, [to disable TLS-SRP authentication]) + want_tls_srp=no + ;; + *) AC_MSG_RESULT(yes) + want_tls_srp=yes + ;; + esac ], + AC_MSG_RESULT(yes) + want_tls_srp=yes +) + +if test "$want_tls_srp" = "yes" && ( test "x$HAVE_GNUTLS_SRP" = "x1" || test "x$HAVE_OPENSSL_SRP" = "x1") ; then + AC_DEFINE(USE_TLS_SRP, 1, [Use TLS-SRP authentication]) + USE_TLS_SRP=1 + curl_tls_srp_msg="enabled" +fi + +dnl ************************************************************ +dnl disable Unix domain sockets support +dnl +AC_MSG_CHECKING([whether to enable Unix domain sockets]) +AC_ARG_ENABLE(unix-sockets, +AC_HELP_STRING([--enable-unix-sockets],[Enable Unix domain sockets]) +AC_HELP_STRING([--disable-unix-sockets],[Disable Unix domain sockets]), +[ case "$enableval" in + no) AC_MSG_RESULT(no) + want_unix_sockets=no + ;; + *) AC_MSG_RESULT(yes) + want_unix_sockets=yes + ;; + esac ], [ + AC_MSG_RESULT(auto) + want_unix_sockets=auto + ] +) +if test "x$want_unix_sockets" != "xno"; then + AC_CHECK_MEMBER([struct sockaddr_un.sun_path], [ + AC_DEFINE(USE_UNIX_SOCKETS, 1, [Use Unix domain sockets]) + AC_SUBST(USE_UNIX_SOCKETS, [1]) + curl_unix_sockets_msg="enabled" + ], [ + if test "x$want_unix_sockets" = "xyes"; then + AC_MSG_ERROR([--enable-unix-sockets is not available on this platform!]) + fi + ], [ + #include + ]) +fi + +dnl ************************************************************ +dnl disable cookies support +dnl +AC_MSG_CHECKING([whether to enable support for cookies]) +AC_ARG_ENABLE(cookies, +AC_HELP_STRING([--enable-cookies],[Enable cookies support]) +AC_HELP_STRING([--disable-cookies],[Disable cookies support]), +[ case "$enableval" in + no) + AC_MSG_RESULT(no) + AC_DEFINE(CURL_DISABLE_COOKIES, 1, [to disable cookies support]) + ;; + *) AC_MSG_RESULT(yes) + ;; + esac ], + AC_MSG_RESULT(yes) +) + +dnl ************************************************************ +dnl hiding of library internal symbols +dnl +CURL_CONFIGURE_SYMBOL_HIDING + +dnl ************************************************************ +dnl enforce SONAME bump +dnl + +AC_MSG_CHECKING([whether to enforce SONAME bump]) +AC_ARG_ENABLE(soname-bump, +AC_HELP_STRING([--enable-soname-bump],[Enable enforced SONAME bump]) +AC_HELP_STRING([--disable-soname-bump],[Disable enforced SONAME bump]), +[ case "$enableval" in + yes) AC_MSG_RESULT(yes) + soname_bump=yes + ;; + *) + AC_MSG_RESULT(no) + ;; + esac ], + AC_MSG_RESULT($soname_bump) +) +AM_CONDITIONAL(SONAME_BUMP, test x$soname_bump = xyes) + +dnl +dnl All the library dependencies put into $LIB apply to libcurl only. +dnl +LIBCURL_LIBS=$LIBS + +AC_SUBST(LIBCURL_LIBS) +AC_SUBST(CURL_NETWORK_LIBS) +AC_SUBST(CURL_NETWORK_AND_TIME_LIBS) + +dnl BLANK_AT_MAKETIME may be used in our Makefile.am files to blank +dnl LIBS variable used in generated makefile at makefile processing +dnl time. Doing this functionally prevents LIBS from being used for +dnl all link targets in given makefile. +BLANK_AT_MAKETIME= +AC_SUBST(BLANK_AT_MAKETIME) + +AM_CONDITIONAL(CROSSCOMPILING, test x$cross_compiling = xyes) + +dnl yes or no +ENABLE_SHARED="$enable_shared" +AC_SUBST(ENABLE_SHARED) + +dnl to let curl-config output the static libraries correctly +ENABLE_STATIC="$enable_static" +AC_SUBST(ENABLE_STATIC) + + +dnl +dnl For keeping supported features and protocols also in pkg-config file +dnl since it is more cross-compile friendly than curl-config +dnl + +if test "x$OPENSSL_ENABLED" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES SSL" +elif test -n "$SSL_ENABLED"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES SSL" +fi +if test "x$IPV6_ENABLED" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES IPv6" +fi +if test "x$USE_UNIX_SOCKETS" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES UnixSockets" +fi +if test "x$HAVE_LIBZ" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES libz" +fi +if test "x$USE_ARES" = "x1" -o "x$USE_THREADS_POSIX" = "x1" \ + -o "x$USE_THREADS_WIN32" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES AsynchDNS" +fi +if test "x$IDN_ENABLED" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES IDN" +fi +if test "x$USE_WINDOWS_SSPI" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES SSPI" +fi + +if test "x$HAVE_GSSAPI" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES GSS-API" +fi + +if test "x$curl_psl_msg" = "xyes"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES PSL" +fi + +if test "x$CURL_DISABLE_CRYPTO_AUTH" != "x1" -a \ + \( "x$HAVE_GSSAPI" = "x1" -o "x$USE_WINDOWS_SSPI" = "x1" \); then + SUPPORT_FEATURES="$SUPPORT_FEATURES SPNEGO" +fi + +if test "x$CURL_DISABLE_CRYPTO_AUTH" != "x1" -a \ + \( "x$HAVE_GSSAPI" = "x1" -o "x$USE_WINDOWS_SSPI" = "x1" \); then + SUPPORT_FEATURES="$SUPPORT_FEATURES Kerberos" +fi + +if test "x$CURL_DISABLE_CRYPTO_AUTH" != "x1"; then + if test "x$OPENSSL_ENABLED" = "x1" -o "x$USE_WINDOWS_SSPI" = "x1" \ + -o "x$GNUTLS_ENABLED" = "x1" -o "x$MBEDTLS_ENABLED" = "x1" \ + -o "x$NSS_ENABLED" = "x1" -o "x$DARWINSSL_ENABLED" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES NTLM" + + if test "x$CURL_DISABLE_HTTP" != "x1" -a \ + "x$NTLM_WB_ENABLED" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES NTLM_WB" + fi + fi +fi + +if test "x$USE_TLS_SRP" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES TLS-SRP" +fi + +if test "x$USE_NGHTTP2" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES HTTP2" +fi + +if test "x$OPENSSL_ENABLED" = "x1" -o "x$GNUTLS_ENABLED" = "x1" \ + -o "x$NSS_ENABLED" = "x1"; then + SUPPORT_FEATURES="$SUPPORT_FEATURES HTTPS-proxy" +fi + +AC_SUBST(SUPPORT_FEATURES) + +dnl For supported protocols in pkg-config file +if test "x$CURL_DISABLE_HTTP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS HTTP" + if test "x$SSL_ENABLED" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS HTTPS" + fi +fi +if test "x$CURL_DISABLE_FTP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS FTP" + if test "x$SSL_ENABLED" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS FTPS" + fi +fi +if test "x$CURL_DISABLE_FILE" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS FILE" +fi +if test "x$CURL_DISABLE_TELNET" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS TELNET" +fi +if test "x$CURL_DISABLE_LDAP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS LDAP" + if test "x$CURL_DISABLE_LDAPS" != "x1"; then + if (test "x$USE_OPENLDAP" = "x1" && test "x$SSL_ENABLED" = "x1") || + (test "x$USE_OPENLDAP" != "x1" && test "x$HAVE_LDAP_SSL" = "x1"); then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS LDAPS" + fi + fi +fi +if test "x$CURL_DISABLE_DICT" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS DICT" +fi +if test "x$CURL_DISABLE_TFTP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS TFTP" +fi +if test "x$CURL_DISABLE_GOPHER" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS GOPHER" +fi +if test "x$CURL_DISABLE_POP3" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS POP3" + if test "x$SSL_ENABLED" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS POP3S" + fi +fi +if test "x$CURL_DISABLE_IMAP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS IMAP" + if test "x$SSL_ENABLED" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS IMAPS" + fi +fi +if test "x$CURL_DISABLE_SMB" != "x1" \ + -a "x$CURL_DISABLE_CRYPTO_AUTH" != "x1" \ + -a \( "x$OPENSSL_ENABLED" = "x1" -o "x$USE_WINDOWS_SSPI" = "x1" \ + -o "x$GNUTLS_ENABLED" = "x1" -o "x$MBEDTLS_ENABLED" = "x1" \ + -o "x$NSS_ENABLED" = "x1" -o "x$DARWINSSL_ENABLED" = "x1" \); then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS SMB" + if test "x$SSL_ENABLED" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS SMBS" + fi +fi +if test "x$CURL_DISABLE_SMTP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS SMTP" + if test "x$SSL_ENABLED" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS SMTPS" + fi +fi +if test "x$USE_LIBSSH2" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS SCP" + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS SFTP" +fi +if test "x$CURL_DISABLE_RTSP" != "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS RTSP" +fi +if test "x$USE_LIBRTMP" = "x1"; then + SUPPORT_PROTOCOLS="$SUPPORT_PROTOCOLS RTMP" +fi + +dnl replace spaces with newlines +dnl sort the lines +dnl replace the newlines back to spaces +SUPPORT_PROTOCOLS=`echo $SUPPORT_PROTOCOLS | tr ' ' '\012' | sort | tr '\012' ' '` + +AC_SUBST(SUPPORT_PROTOCOLS) + +dnl squeeze whitespace out of some variables + +squeeze CFLAGS +squeeze CPPFLAGS +squeeze DEFS +squeeze LDFLAGS +squeeze LIBS + +squeeze LIBCURL_LIBS +squeeze CURL_NETWORK_LIBS +squeeze CURL_NETWORK_AND_TIME_LIBS + +squeeze SUPPORT_FEATURES +squeeze SUPPORT_PROTOCOLS + +XC_CHECK_BUILD_FLAGS + +if test "x$want_curldebug_assumed" = "xyes" && + test "x$want_curldebug" = "xyes" && test "x$USE_ARES" = "x1"; then + ac_configure_args="$ac_configure_args --enable-curldebug" +fi + +AC_CONFIG_FILES([Makefile \ + docs/Makefile \ + docs/examples/Makefile \ + docs/libcurl/Makefile \ + docs/libcurl/opts/Makefile \ + docs/cmdline-opts/Makefile \ + include/Makefile \ + include/curl/Makefile \ + src/Makefile \ + lib/Makefile \ + scripts/Makefile \ + lib/libcurl.vers \ + tests/Makefile \ + tests/certs/Makefile \ + tests/certs/scripts/Makefile \ + tests/data/Makefile \ + tests/server/Makefile \ + tests/libtest/Makefile \ + tests/unit/Makefile \ + packages/Makefile \ + packages/Win32/Makefile \ + packages/Win32/cygwin/Makefile \ + packages/Linux/Makefile \ + packages/Linux/RPM/Makefile \ + packages/Linux/RPM/curl.spec \ + packages/Linux/RPM/curl-ssl.spec \ + packages/Solaris/Makefile \ + packages/EPM/curl.list \ + packages/EPM/Makefile \ + packages/vms/Makefile \ + packages/AIX/Makefile \ + packages/AIX/RPM/Makefile \ + packages/AIX/RPM/curl.spec \ + curl-config \ + libcurl.pc +]) +AC_OUTPUT + +CURL_GENERATE_CONFIGUREHELP_PM + +XC_AMEND_DISTCLEAN([lib src tests/unit tests/server tests/libtest docs/examples]) + +AC_MSG_NOTICE([Configured to build curl/libcurl: + + curl version: ${CURLVERSION} + Host setup: ${host} + Install prefix: ${prefix} + Compiler: ${CC} + SSL support: ${curl_ssl_msg} + SSH support: ${curl_ssh_msg} + zlib support: ${curl_zlib_msg} + GSS-API support: ${curl_gss_msg} + TLS-SRP support: ${curl_tls_srp_msg} + resolver: ${curl_res_msg} + IPv6 support: ${curl_ipv6_msg} + Unix sockets support: ${curl_unix_sockets_msg} + IDN support: ${curl_idn_msg} + Build libcurl: Shared=${enable_shared}, Static=${enable_static} + Built-in manual: ${curl_manual_msg} + --libcurl option: ${curl_libcurl_msg} + Verbose errors: ${curl_verbose_msg} + SSPI support: ${curl_sspi_msg} + ca cert bundle: ${ca}${ca_warning} + ca cert path: ${capath}${capath_warning} + ca fallback: ${with_ca_fallback} + LDAP support: ${curl_ldap_msg} + LDAPS support: ${curl_ldaps_msg} + RTSP support: ${curl_rtsp_msg} + RTMP support: ${curl_rtmp_msg} + metalink support: ${curl_mtlnk_msg} + PSL support: ${curl_psl_msg} + HTTP2 support: ${curl_h2_msg} + Protocols: ${SUPPORT_PROTOCOLS} +]) + +if test "x$soname_bump" = "xyes"; then + +cat <, et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### + +prefix=/home/ak/git/SUGAR/CPUMINER/sugarmaker/deps-win32/i686-w64-mingw32 +exec_prefix=${prefix} +includedir=${prefix}/include +cppflag_curl_staticlib=-DCURL_STATICLIB + +usage() +{ + cat <&2 + exit 1 + fi + ;; + + --configure) + echo " '--host=i686-w64-mingw32' '--disable-shared' '--enable-static' '--with-winssl' '--prefix=/home/ak/git/SUGAR/CPUMINER/sugarmaker/deps-win32/i686-w64-mingw32' 'host_alias=i686-w64-mingw32'" + ;; + + *) + echo "unknown option: $1" + usage 1 + ;; + esac + shift +done + +exit 0 diff --git a/deps-win32/curl-7.54.1/curl-config.in b/deps-win32/curl-7.54.1/curl-config.in new file mode 100644 index 0000000..af484b4 --- /dev/null +++ b/deps-win32/curl-7.54.1/curl-config.in @@ -0,0 +1,178 @@ +#! /bin/sh +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 2001 - 2012, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +includedir=@includedir@ +cppflag_curl_staticlib=@CPPFLAG_CURL_STATICLIB@ + +usage() +{ + cat <&2 + exit 1 + fi + ;; + + --configure) + echo @CONFIGURE_OPTIONS@ + ;; + + *) + echo "unknown option: $1" + usage 1 + ;; + esac + shift +done + +exit 0 diff --git a/deps-win32/curl-7.54.1/docs/BINDINGS.md b/deps-win32/curl-7.54.1/docs/BINDINGS.md new file mode 100644 index 0000000..ecd231a --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/BINDINGS.md @@ -0,0 +1,118 @@ +libcurl bindings +================ + + Creative people have written bindings or interfaces for various environments + and programming languages. Using one of these allows you to take advantage of + curl powers from within your favourite language or system. + + This is a list of all known interfaces as of this writing. + + The bindings listed below are not part of the curl/libcurl distribution + archives, but must be downloaded and installed separately. + +[Ada95](http://www.almroth.com/adacurl/index.html) Written by Andreas Almroth + +[Basic](http://scriptbasic.com/) ScriptBasic bindings written by Peter Verhas + +C++: [curlpp](http://curlpp.org/) Written by Jean-Philippe Barrette-LaPierre, +[curlcpp](https://github.com/JosephP91/curlcpp) by Giuseppe Persico and [C++ +Requests](https://github.com/whoshuu/cpr) by Huu Nguyen + +[Ch](https://chcurl.sourceforge.io/) Written by Stephen Nestinger and Jonathan Rogado + +Cocoa: [BBHTTP](https://github.com/brunodecarvalho/BBHTTP) written by Bruno de Carvalho +[curlhandle](https://github.com/karelia/curlhandle) Written by Dan Wood + +[D](https://dlang.org/library/std/net/curl.html) Written by Kenneth Bogert + +[Delphi](https://github.com/Mercury13/curl4delphi) Written by Mikhail Merkuryev + +[Dylan](https://dylanlibs.sourceforge.io/) Written by Chris Double + +[Eiffel](https://room.eiffel.com/library/curl) Written by Eiffel Software + +[Euphoria](http://rays-web.com/eulibcurl.htm) Written by Ray Smith + +[Falcon](http://www.falconpl.org/index.ftd?page_id=prjs&prj_id=curl) + +[Ferite](http://www.ferite.org/) Written by Paul Querna + +[Gambas](https://gambas.sourceforge.io/) + +[glib/GTK+](http://atterer.net/glibcurl/) Written by Richard Atterer + +Go: [go-curl](https://github.com/andelf/go-curl) by ShuYu Wang + +[Guile](http://www.lonelycactus.com/guile-curl.html) Written by Michael L. Gran + +[Harbour](https://github.com/vszakats/harbour-core/tree/master/contrib/hbcurl) Written by Viktor Szakáts + +[Haskell](https://hackage.haskell.org/cgi-bin/hackage-scripts/package/curl) Written by Galois, Inc + +[Java](https://github.com/pjlegato/curl-java) + +[Julia](https://github.com/forio/Curl.jl) Written by Paul Howe + +[Lisp](https://common-lisp.net/project/cl-curl/) Written by Liam Healy + +Lua: [luacurl](http://luacurl.luaforge.net/) by Alexander Marinov, [Lua-cURL](https://github.com/Lua-cURL) by Jürgen Hötzel + +[Mono](https://forge.novell.com/modules/xfmod/project/?libcurl-mono) Written by Jeffrey Phillips + +[.NET](https://sourceforge.net/projects/libcurl-net/) libcurl-net by Jeffrey Phillips + +[node.js](https://github.com/JCMais/node-libcurl) node-libcurl by Jonathan Cardoso Machado + +[Object-Pascal](http://www.tekool.com/opcurl) Free Pascal, Delphi and Kylix binding written by Christophe Espern. + +[OCaml](http://opam.ocaml.org/packages/ocurl/) Written by Lars Nilsson and ygrek + +[Pascal](http://houston.quik.com/jkp/curlpas/) Free Pascal, Delphi and Kylix binding written by Jeffrey Pohlmeyer. + +Perl: [WWW--Curl](https://github.com/szbalint/WWW--Curl) Maintained by Cris +Bailiff and Bálint Szilakszi, +[perl6-net-curl](https://github.com/azawawi/perl6-net-curl) by Ahmad M. Zawawi + +[PHP](https://php.net/curl) Originally written by Sterling Hughes + +[PostgreSQL](http://gborg.postgresql.org/project/pgcurl/projdisplay.php) Written by Gian Paolo Ciceri + +[Python](http://pycurl.io/) PycURL by Kjetil Jacobsen + +[R](https://cran.r-project.org/package=curl) + +[Rexx](https://rexxcurl.sourceforge.io/) Written Mark Hessling + +[Ring](http://ring-lang.sourceforge.net/doc1.3/libcurl.html) RingLibCurl by Mahmoud Fayed + +RPG, support for ILE/RPG on OS/400 is included in source distribution + +Ruby: [curb](http://curb.rubyforge.org/) written by Ross Bamford, [ruby-curl-multi](http://curl-multi.rubyforge.org/) written by Kristjan Petursson and Keith Rarick + +[Rust](https://github.com/carllerche/curl-rust) curl-rust - by Carl Lerche + +[Scheme](https://www.metapaper.net/lisovsky/web/curl/) Bigloo binding by Kirill Lisovsky + +[Scilab](https://help.scilab.org/docs/current/fr_FR/getURL.html) binding by Sylvestre Ledru + +[S-Lang](http://www.jedsoft.org/slang/modules/curl.html) by John E Davis + +[Smalltalk](http://www.squeaksource.com/CurlPlugin/) Written by Danil Osipchuk + +[SP-Forth](http://spf.cvs.sourceforge.net/viewvc/spf/devel/~ac/lib/lin/curl/) Written by Andrey Cherezov + +[SPL](http://www.clifford.at/spl/) Written by Clifford Wolf + +[Tcl](http://mirror.yellow5.com/tclcurl/) Tclcurl by Andrés García + +[Visual Basic](https://sourceforge.net/projects/libcurl-vb/) libcurl-vb by Jeffrey Phillips + +[Visual Foxpro](http://www.ctl32.com.ar/libcurl.asp) by Carlos Alloatti + +[Q](https://q-lang.sourceforge.io/) The libcurl module is part of the default install + +[wxWidgets](https://wxcode.sourceforge.io/components/wxcurl/) Written by Casey O'Donnell + +[XBLite](http://perso.wanadoo.fr/xblite/libraries.html) Written by David Szafranski + +[Xojo](https://github.com/charonn0/RB-libcURL) Written by Andrew Lambert diff --git a/deps-win32/curl-7.54.1/docs/BUGS b/deps-win32/curl-7.54.1/docs/BUGS new file mode 100644 index 0000000..12714cc --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/BUGS @@ -0,0 +1,280 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +BUGS + + 1. Bugs + 1.1 There are still bugs + 1.2 Where to report + 1.3 What to report + 1.4 libcurl problems + 1.5 Who will fix the problems + 1.6 How to get a stack trace + 1.7 Bugs in libcurl bindings + 1.8 Bugs in old versions + + 2. Bug fixing procedure + 2.1 What happens on first filing + 2.2 First response + 2.3 Not reproducible + 2.4 Unresponsive + 2.5 Lack of time/interest + 2.6 KNOWN_BUGS + 2.7 TODO + 2.8 Closing off stalled bugs + +============================================================================== + +1.1 There are still bugs + + Curl and libcurl have grown substantially since the beginning. At the time + of writing (January 2013), there are about 83,000 lines of source code, and + by the time you read this it has probably grown even more. + + Of course there are lots of bugs left. And lots of misfeatures. + + To help us make curl the stable and solid product we want it to be, we need + bug reports and bug fixes. + +1.2 Where to report + + If you can't fix a bug yourself and submit a fix for it, try to report an as + detailed report as possible to a curl mailing list to allow one of us to + have a go at a solution. You can optionally also post your bug/problem at + curl's bug tracking system over at + + https://github.com/curl/curl/issues + + Please read the rest of this document below first before doing that! + + If you feel you need to ask around first, find a suitable mailing list and + post there. The lists are available on https://curl.haxx.se/mail/ + +1.3 What to report + + When reporting a bug, you should include all information that will help us + understand what's wrong, what you expected to happen and how to repeat the + bad behavior. You therefore need to tell us: + + - your operating system's name and version number + + - what version of curl you're using (curl -V is fine) + + - versions of the used libraries that libcurl is built to use + + - what URL you were working with (if possible), at least which protocol + + and anything and everything else you think matters. Tell us what you + expected to happen, tell use what did happen, tell us how you could make it + work another way. Dig around, try out, test. Then include all the tiny bits + and pieces in your report. You will benefit from this yourself, as it will + enable us to help you quicker and more accurately. + + Since curl deals with networks, it often helps us if you include a protocol + debug dump with your bug report. The output you get by using the -v or + --trace options. + + If curl crashed, causing a core dump (in unix), there is hardly any use to + send that huge file to anyone of us. Unless we have an exact same system + setup as you, we can't do much with it. Instead we ask you to get a stack + trace and send that (much smaller) output to us instead! + + The address and how to subscribe to the mailing lists are detailed in the + MANUAL file. + +1.4 libcurl problems + + When you've written your own application with libcurl to perform transfers, + it is even more important to be specific and detailed when reporting bugs. + + Tell us the libcurl version and your operating system. Tell us the name and + version of all relevant sub-components like for example the SSL library + you're using and what name resolving your libcurl uses. If you use SFTP or + SCP, the libssh2 version is relevant etc. + + Showing us a real source code example repeating your problem is the best way + to get our attention and it will greatly increase our chances to understand + your problem and to work on a fix (if we agree it truly is a problem). + + Lots of problems that appear to be libcurl problems are actually just abuses + of the libcurl API or other malfunctions in your applications. It is advised + that you run your problematic program using a memory debug tool like + valgrind or similar before you post memory-related or "crashing" problems to + us. + +1.5 Who will fix the problems + + If the problems or bugs you describe are considered to be bugs, we want to + have the problems fixed. + + There are no developers in the curl project that are paid to work on bugs. + All developers that take on reported bugs do this on a voluntary basis. We + do it out of an ambition to keep curl and libcurl excellent products and out + of pride. + + But please do not assume that you can just lump over something to us and it + will then magically be fixed after some given time. Most often we need + feedback and help to understand what you've experienced and how to repeat a + problem. Then we may only be able to assist YOU to debug the problem and to + track down the proper fix. + + We get reports from many people every month and each report can take a + considerable amount of time to really go to the bottom with. + +1.6 How to get a stack trace + + First, you must make sure that you compile all sources with -g and that you + don't 'strip' the final executable. Try to avoid optimizing the code as + well, remove -O, -O2 etc from the compiler options. + + Run the program until it cores. + + Run your debugger on the core file, like ' curl core'. + should be replaced with the name of your debugger, in most cases that will + be 'gdb', but 'dbx' and others also occur. + + When the debugger has finished loading the core file and presents you a + prompt, enter 'where' (without the quotes) and press return. + + The list that is presented is the stack trace. If everything worked, it is + supposed to contain the chain of functions that were called when curl + crashed. Include the stack trace with your detailed bug report. It'll help a + lot. + +1.7 Bugs in libcurl bindings + + There will of course pop up bugs in libcurl bindings. You should then + primarily approach the team that works on that particular binding and see + what you can do to help them fix the problem. + + If you suspect that the problem exists in the underlying libcurl, then + please convert your program over to plain C and follow the steps outlined + above. + +1.8 Bugs in old versions + + The curl project typically releases new versions every other month, and we + fix several hundred bugs per year. For a huge table of releases, number of + bug fixes and more, see: https://curl.haxx.se/docs/releases.html + + The developers in the curl project do not have bandwidth or energy enough to + maintain several branches or to spend much time on hunting down problems in + old versions when chances are we already fixed them or at least that they've + changed nature and appearance in later versions. + + When you experience a problem and want to report it, you really SHOULD + include the version number of the curl you're using when you experience the + issue. If that version number shows us that you're using an out-of-date + curl, you should also try out a modern curl version to see if the problem + persists or how/if it has changed in apperance. + + Even if you cannot immediately upgrade your application/system to run the + latest curl version, you can most often at least run a test version or + experimental build or similar, to get this confirmed or not. + + At times people insist that they cannot upgrade to a modern curl version, + but instead they "just want the bug fixed". That's fine, just don't count on + us spending many cycles on trying to identify which single commit, if that's + even possible, that at some point in the past fixed the problem you're now + experiencing. + + Security wise, it is almost always a bad idea to lag behind the current curl + versions by a lot. We keeping discovering and reporting security problems + over time see you can see in this table: + https://curl.haxx.se/docs/vulnerabilities.html + +2. Bug fixing procedure + +2.1 What happens on first filing + + When a new issue is posted in the issue tracker or on the mailing list, the + team of developers first need to see the report. Maybe they took the day + off, maybe they're off in the woods hunting. Have patience. Allow at least a + few days before expecting someone to have responded. + + In the issue tracker you can expect that some labels will be set on the + issue to help categorize it. + +2.2 First response + + If your issue/bug report wasn't perfect at once (and few are), chances are + that someone will ask follow-up questions. Which version did you use? Which + options did you use? How often does the problem occur? How can we reproduce + this problem? Which protocols does it involve? Or perhaps much more specific + and deep diving questions. It all depends on your specific issue. + + You should then respond to these follow-up questions and provide more info + about the problem, so that we can help you figure it out. Or maybe you can + help us figure it out. An active back-and-forth communication is important + and the key for finding a cure and landing a fix. + +2.3 Not reproducible + + For problems that we can't reproduce and can't understand even after having + gotten all the info we need and having studied the source code over again, + are really hard to solve so then we may require further work from you who + actually see or experience the problem. + +2.4 Unresponsive + + If the problem haven't been understood or reproduced, and there's nobody + responding to follow-up questions or questions asking for clarifications or + for discussing possible ways to move forward with the task, we take that as + a strong suggestion that the bug is not important. + + Unimportant issues will be closed as inactive sooner or later as they can't + be fixed. The inactivity period (waiting for responses) should not be + shorter than two weeks but may extend months. + +2.5 Lack of time/interest + + Bugs that are filed and are understood can unfortunately end up in the + "nobody cares enough about it to work on it" category. Such bugs are + perfectly valid problems that *should* get fixed but apparently aren't. We + try to mark such bugs as "KNOWN_BUGS material" after a time of inactivity + and if no activity is noticed after yet some time those bugs are added to + KNOWN_BUGS and are closed in the issue tracker. + +2.6 KNOWN_BUGS + + This is a list of known bugs. Bugs we know exist and that have been pointed + out but that haven't yet been fixed. The reasons for why they haven't been + fixed can involve anything really, but the primary reason is that nobody has + considered these problems to be important enough to spend the necessary time + and effort to have them fixed. + + The KNOWN_BUGS are always up for grabs and we will always love the ones who + bring one of them back to live and offers solutions to them. + + The KNOWN_BUGS document has a sibling document known as TODO. + +2.7 TODO + + Issues that are filed or reported that aren't really bugs but more missing + features or ideas for future improvements and so on are marked as + 'enhancement' or 'feature-request' and will be added to the TODO document + instead and the issue is closed. We don't keep TODO items in the issue + tracker. + + The TODO document is full of ideas and suggestions of what we can add or fix + one day. You're always encouraged and free to grab one of those items and + take up a discussion with the curl development team on how that could be + implemented or provided in the project so that you can work on ticking it + odd that document. + + If the issue is rather a bug and not a missing feature or functionality, it + is listed in KNOWN_BUGS instead. + +2.8 Closing off stalled bugs + + The issue and pull request trackers on https://github.com/curl/curl will + only hold "active" entries (using a non-precise definition of what active + actually is, but they're at least not completely dead). Those that are + abandonded or in other ways dormant will be closed and sometimes added to + TODO and KNOWN_BUGS instead. + + This way, we only have "active" issues open on github. Irrelevant issues and + pull requests will not distract developes or casual visitors. diff --git a/deps-win32/curl-7.54.1/docs/CHECKSRC.md b/deps-win32/curl-7.54.1/docs/CHECKSRC.md new file mode 100644 index 0000000..b42de84 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/CHECKSRC.md @@ -0,0 +1,124 @@ +# checksrc + +This is the tool we use within the curl project to scan C source code and +check that it adheres to our [Source Code Style guide](CODE_STYLE.md). + +## Usage + + checksrc.pl [options] [file1] [file2] ... + +## Command line options + +`-W[file]` whitelists that file and excludes it from being checked. Helpful +when, for example, one of the files is generated. + +`-D[dir]` directory name to prepend to file names when accessing them. + +`-h` shows the help output, that also lists all recognized warnings + +## What does checksrc warn for? + +checksrc does not check and verify the code against the entire style guide, +but the script is instead an effort to detect the most common mistakes and +syntax mistakes that contributors make before they get accustomed to our code +style. Heck, many of us regulars do the mistakes too and this script helps us +keep the code in shape. + + checksrc.pl -h + +Lists how to use the script and it lists all existing warnings it has and +problems it detects. At the time of this writing, the existing checksrc +warnings are: + +- `BADCOMMAND`: There's a bad !checksrc! instruction in the code. See the + **Ignore certain warnings** section below for details. + +- `BANNEDFUNC`: A banned function was used. The functions sprintf, vsprintf, + strcat, strncat, gets are **never** allowed in curl source code. + +- `BRACEELSE`: '} else' on the same line. The else is supposed to be on the + following line. + +- `BRACEPOS`: wrong position for an open brace (`{`). + +- `COMMANOSPACE`: a comma without following space + +- `COPYRIGHT`: the file is missing a copyright statement! + +- `CPPCOMMENTS`: `//` comment detected, that's not C89 compliant + +- `FOPENMODE`: `fopen()` needs a macro for the mode string, use it + +- `INDENTATION`: detected a wrong start column for code. Note that this warning + only checks some specific places and will certainly miss many bad + indentations. + +- `LONGLINE`: A line is longer than 79 columns. + +- `PARENBRACE`: `){` was used without sufficient space in between. + +- `RETURNNOSPACE`: `return` was used without space between the keyword and the + following value. + +- `SPACEAFTERPAREN`: there was a space after open parenthesis, `( text`. + +- `SPACEBEFORECLOSE`: there was a space before a close parenthesis, `text )`. + +- `SPACEBEFORECOMMA`: there was a space before a comma, `one , two`. + +- `SPACEBEFOREPAREN`: there was a space before an open parenthesis, `if (`, + where one was not expected + +- `SPACESEMILCOLON`: there was a space before semicolon, ` ;`. + +- `TABS`: TAB characters are not allowed! + +- `TRAILINGSPACE`: Trailing white space on the line + +- `UNUSEDIGNORE`: a checksrc inlined warning ignore was asked for but not used, + that's an ignore that should be removed or changed to get used. + +## Ignore certain warnings + +Due to the nature of the source code and the flaws of the checksrc tool, there +is sometimes a need to ignore specific warnings. checksrc allows a few +different ways to do this. + +### Inline ignore + +You can control what to ignore within a specific source file by providing +instructions to checksrc in the source code itself. You need a magic marker +that is `!checksrc!` followed by the instruction. The instruction can ask to +ignore a specific warning N number of times or you ignore all of them until +you mark the end of the ignored section. + +Inline ignores are only done for that single specific source code file. + +Example + + /* !checksrc! disable LONGLINE all */ + +This will ignore the warning for overly long lines until it is re-enabled with: + + /* !checksrc! enable LONGLINE */ + +If the enabling isn't performed before the end of the file, it will be enabled +automatically for the next file. + +You can also opt to ignore just N violations so that if you have a single long +line you just can't shorten and is agreed to be fine anyway: + + /* !checksrc! disable LONGLINE 1 */ + +... and the warning for long lines will be enabled again automatically after +it has ignored that single warning. The number `1` can of course be changed to +any other integer number. It can be used to make sure only the exact intended +instances are ignored and nothing extra. + +### Directory wide ignore patterns + +This is a method we've transitioned away from. Use inline ignores as far as +possible. + +Make a `checksrc.whitelist` file in the directory of the source code with the +false positive, and include the full offending line into this file. diff --git a/deps-win32/curl-7.54.1/docs/CIPHERS.md b/deps-win32/curl-7.54.1/docs/CIPHERS.md new file mode 100644 index 0000000..99d261b --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/CIPHERS.md @@ -0,0 +1,426 @@ +# Ciphers + +With curl's options `CURLOPT_SSL_CIPHER_LIST` and `--ciphers` users can +control which ciphers to consider when negotiating TLS connections. + +The names of the known ciphers differ depending on which TLS backend that +libcurl was built to use. This is an attempt to list known cipher names. + +## OpenSSL + +(based on [OpenSSL docs](https://www.openssl.org/docs/man1.1.0/apps/ciphers.html)) + +### SSL3 cipher suites + +`NULL-MD5` +`NULL-SHA` +`RC4-MD5` +`RC4-SHA` +`IDEA-CBC-SHA` +`DES-CBC3-SHA` +`DH-DSS-DES-CBC3-SHA` +`DH-RSA-DES-CBC3-SHA` +`DHE-DSS-DES-CBC3-SHA` +`DHE-RSA-DES-CBC3-SHA` +`ADH-RC4-MD5` +`ADH-DES-CBC3-SHA` + +### TLS v1.0 cipher suites + +`NULL-MD5` +`NULL-SHA` +`RC4-MD5` +`RC4-SHA` +`IDEA-CBC-SHA` +`DES-CBC3-SHA` +`DHE-DSS-DES-CBC3-SHA` +`DHE-RSA-DES-CBC3-SHA` +`ADH-RC4-MD5` +`ADH-DES-CBC3-SHA` + +### AES ciphersuites from RFC3268, extending TLS v1.0 + +`AES128-SHA` +`AES256-SHA` +`DH-DSS-AES128-SHA` +`DH-DSS-AES256-SHA` +`DH-RSA-AES128-SHA` +`DH-RSA-AES256-SHA` +`DHE-DSS-AES128-SHA` +`DHE-DSS-AES256-SHA` +`DHE-RSA-AES128-SHA` +`DHE-RSA-AES256-SHA` +`ADH-AES128-SHA` +`ADH-AES256-SHA` + +### SEED ciphersuites from RFC4162, extending TLS v1.0 + +`SEED-SHA` +`DH-DSS-SEED-SHA` +`DH-RSA-SEED-SHA` +`DHE-DSS-SEED-SHA` +`DHE-RSA-SEED-SHA` +`ADH-SEED-SHA` + +### GOST ciphersuites, extending TLS v1.0 + +`GOST94-GOST89-GOST89` +`GOST2001-GOST89-GOST89` +`GOST94-NULL-GOST94` +`GOST2001-NULL-GOST94` + +### Elliptic curve cipher suites + +`ECDHE-RSA-NULL-SHA` +`ECDHE-RSA-RC4-SHA` +`ECDHE-RSA-DES-CBC3-SHA` +`ECDHE-RSA-AES128-SHA` +`ECDHE-RSA-AES256-SHA` +`ECDHE-ECDSA-NULL-SHA` +`ECDHE-ECDSA-RC4-SHA` +`ECDHE-ECDSA-DES-CBC3-SHA` +`ECDHE-ECDSA-AES128-SHA` +`ECDHE-ECDSA-AES256-SHA` +`AECDH-NULL-SHA` +`AECDH-RC4-SHA` +`AECDH-DES-CBC3-SHA` +`AECDH-AES128-SHA` +`AECDH-AES256-SHA` + +### TLS v1.2 cipher suites + +`NULL-SHA256` +`AES128-SHA256` +`AES256-SHA256` +`AES128-GCM-SHA256` +`AES256-GCM-SHA384` +`DH-RSA-AES128-SHA256` +`DH-RSA-AES256-SHA256` +`DH-RSA-AES128-GCM-SHA256` +`DH-RSA-AES256-GCM-SHA384` +`DH-DSS-AES128-SHA256` +`DH-DSS-AES256-SHA256` +`DH-DSS-AES128-GCM-SHA256` +`DH-DSS-AES256-GCM-SHA384` +`DHE-RSA-AES128-SHA256` +`DHE-RSA-AES256-SHA256` +`DHE-RSA-AES128-GCM-SHA256` +`DHE-RSA-AES256-GCM-SHA384` +`DHE-DSS-AES128-SHA256` +`DHE-DSS-AES256-SHA256` +`DHE-DSS-AES128-GCM-SHA256` +`DHE-DSS-AES256-GCM-SHA384` +`ECDHE-RSA-AES128-SHA256` +`ECDHE-RSA-AES256-SHA384` +`ECDHE-RSA-AES128-GCM-SHA256` +`ECDHE-RSA-AES256-GCM-SHA384` +`ECDHE-ECDSA-AES128-SHA256` +`ECDHE-ECDSA-AES256-SHA384` +`ECDHE-ECDSA-AES128-GCM-SHA256` +`ECDHE-ECDSA-AES256-GCM-SHA384` +`ADH-AES128-SHA256` +`ADH-AES256-SHA256` +`ADH-AES128-GCM-SHA256` +`ADH-AES256-GCM-SHA384` +`AES128-CCM` +`AES256-CCM` +`DHE-RSA-AES128-CCM` +`DHE-RSA-AES256-CCM` +`AES128-CCM8` +`AES256-CCM8` +`DHE-RSA-AES128-CCM8` +`DHE-RSA-AES256-CCM8` +`ECDHE-ECDSA-AES128-CCM` +`ECDHE-ECDSA-AES256-CCM` +`ECDHE-ECDSA-AES128-CCM8` +`ECDHE-ECDSA-AES256-CCM8` + +### Camellia HMAC-Based ciphersuites from RFC6367, extending TLS v1.2 + +`ECDHE-ECDSA-CAMELLIA128-SHA256` +`ECDHE-ECDSA-CAMELLIA256-SHA384` +`ECDHE-RSA-CAMELLIA128-SHA256` +`ECDHE-RSA-CAMELLIA256-SHA384` + +## NSS + +### Totally insecure + +`rc4` +`rc4-md5` +`rc4export` +`rc2` +`rc2export` +`des` +`desede3` + +### SSL3/TLS cipher suites + +`rsa_rc4_128_md5` +`rsa_rc4_128_sha` +`rsa_3des_sha` +`rsa_des_sha` +`rsa_rc4_40_md5` +`rsa_rc2_40_md5` +`rsa_null_md5` +`rsa_null_sha` +`fips_3des_sha` +`fips_des_sha` +`fortezza` +`fortezza_rc4_128_sha` +`fortezza_null` + +### TLS 1.0 Exportable 56-bit Cipher Suites + +`rsa_des_56_sha` +`rsa_rc4_56_sha` + +### AES ciphers + +`dhe_dss_aes_128_cbc_sha` +`dhe_dss_aes_256_cbc_sha` +`dhe_rsa_aes_128_cbc_sha` +`dhe_rsa_aes_256_cbc_sha` +`rsa_aes_128_sha` +`rsa_aes_256_sha` + +### ECC ciphers + +`ecdh_ecdsa_null_sha` +`ecdh_ecdsa_rc4_128_sha` +`ecdh_ecdsa_3des_sha` +`ecdh_ecdsa_aes_128_sha` +`ecdh_ecdsa_aes_256_sha` +`ecdhe_ecdsa_null_sha` +`ecdhe_ecdsa_rc4_128_sha` +`ecdhe_ecdsa_3des_sha` +`ecdhe_ecdsa_aes_128_sha` +`ecdhe_ecdsa_aes_256_sha` +`ecdh_rsa_null_sha` +`ecdh_rsa_128_sha` +`ecdh_rsa_3des_sha` +`ecdh_rsa_aes_128_sha` +`ecdh_rsa_aes_256_sha` +`ecdhe_rsa_null` +`ecdhe_rsa_rc4_128_sha` +`ecdhe_rsa_3des_sha` +`ecdhe_rsa_aes_128_sha` +`ecdhe_rsa_aes_256_sha` +`ecdh_anon_null_sha` +`ecdh_anon_rc4_128sha` +`ecdh_anon_3des_sha` +`ecdh_anon_aes_128_sha` +`ecdh_anon_aes_256_sha` + +### HMAC-SHA256 cipher suites + +`rsa_null_sha_256` +`rsa_aes_128_cbc_sha_256` +`rsa_aes_256_cbc_sha_256` +`dhe_rsa_aes_128_cbc_sha_256` +`dhe_rsa_aes_256_cbc_sha_256` +`ecdhe_ecdsa_aes_128_cbc_sha_256` +`ecdhe_rsa_aes_128_cbc_sha_256` + +### AES GCM cipher suites in RFC 5288 and RFC 5289 + +`rsa_aes_128_gcm_sha_256` +`dhe_rsa_aes_128_gcm_sha_256` +`dhe_dss_aes_128_gcm_sha_256` +`ecdhe_ecdsa_aes_128_gcm_sha_256` +`ecdh_ecdsa_aes_128_gcm_sha_256` +`ecdhe_rsa_aes_128_gcm_sha_256` +`ecdh_rsa_aes_128_gcm_sha_256` + +### cipher suites using SHA384 + +`rsa_aes_256_gcm_sha_384` +`dhe_rsa_aes_256_gcm_sha_384` +`dhe_dss_aes_256_gcm_sha_384` +`ecdhe_ecdsa_aes_256_sha_384` +`ecdhe_rsa_aes_256_sha_384` +`ecdhe_ecdsa_aes_256_gcm_sha_384` +`ecdhe_rsa_aes_256_gcm_sha_384` + +### chacha20-poly1305 cipher suites + +`ecdhe_rsa_chacha20_poly1305_sha_256` +`ecdhe_ecdsa_chacha20_poly1305_sha_256` +`dhe_rsa_chacha20_poly1305_sha_256` + +## GSKit + +Ciphers are internally defined as numeric codes (http://www.ibm.com/support/knowledgecenter/ssw_ibm_i_73/apis/gsk_attribute_set_buffer.htm), +but libcurl maps them to the following case-insensitive names. + +### SSL2 cipher suites (insecure: disabled by default) + +`rc2-md5` +`rc4-md5` +`exp-rc2-md5` +`exp-rc4-md5` +`des-cbc-md5` +`des-cbc3-md5` + +### SSL3 cipher suites + +`null-md5` +`null-sha` +`rc4-md5` +`rc4-sha` +`exp-rc2-cbc-md5` +`exp-rc4-md5` +`exp-des-cbc-sha` +`des-cbc3-sha` + +### TLS v1.0 cipher suites + +`null-md5` +`null-sha` +`rc4-md5` +`rc4-sha` +`exp-rc2-cbc-md5` +`exp-rc4-md5` +`exp-des-cbc-sha` +`des-cbc3-sha` +`aes128-sha` +`aes256-sha` + +### TLS v1.1 cipher suites + +`null-md5` +`null-sha` +`rc4-md5` +`rc4-sha` +`exp-des-cbc-sha` +`des-cbc3-sha` +`aes128-sha` +`aes256-sha` + +### TLS v1.2 cipher suites + +`null-md5` +`null-sha` +`null-sha256` +`rc4-md5` +`rc4-sha` +`des-cbc3-sha` +`aes128-sha` +`aes256-sha` +`aes128-sha256` +`aes256-sha256` +`aes128-gcm-sha256` +`aes256-gcm-sha384` + +## WolfSSL + +`RC4-SHA`, +`RC4-MD5`, +`DES-CBC3-SHA`, +`AES128-SHA`, +`AES256-SHA`, +`NULL-SHA`, +`NULL-SHA256`, +`DHE-RSA-AES128-SHA`, +`DHE-RSA-AES256-SHA`, +`DHE-PSK-AES256-GCM-SHA384`, +`DHE-PSK-AES128-GCM-SHA256`, +`PSK-AES256-GCM-SHA384`, +`PSK-AES128-GCM-SHA256`, +`DHE-PSK-AES256-CBC-SHA384`, +`DHE-PSK-AES128-CBC-SHA256`, +`PSK-AES256-CBC-SHA384`, +`PSK-AES128-CBC-SHA256`, +`PSK-AES128-CBC-SHA`, +`PSK-AES256-CBC-SHA`, +`DHE-PSK-AES128-CCM`, +`DHE-PSK-AES256-CCM`, +`PSK-AES128-CCM`, +`PSK-AES256-CCM`, +`PSK-AES128-CCM-8`, +`PSK-AES256-CCM-8`, +`DHE-PSK-NULL-SHA384`, +`DHE-PSK-NULL-SHA256`, +`PSK-NULL-SHA384`, +`PSK-NULL-SHA256`, +`PSK-NULL-SHA`, +`HC128-MD5`, +`HC128-SHA`, +`HC128-B2B256`, +`AES128-B2B256`, +`AES256-B2B256`, +`RABBIT-SHA`, +`NTRU-RC4-SHA`, +`NTRU-DES-CBC3-SHA`, +`NTRU-AES128-SHA`, +`NTRU-AES256-SHA`, +`AES128-CCM-8`, +`AES256-CCM-8`, +`ECDHE-ECDSA-AES128-CCM`, +`ECDHE-ECDSA-AES128-CCM-8`, +`ECDHE-ECDSA-AES256-CCM-8`, +`ECDHE-RSA-AES128-SHA`, +`ECDHE-RSA-AES256-SHA`, +`ECDHE-ECDSA-AES128-SHA`, +`ECDHE-ECDSA-AES256-SHA`, +`ECDHE-RSA-RC4-SHA`, +`ECDHE-RSA-DES-CBC3-SHA`, +`ECDHE-ECDSA-RC4-SHA`, +`ECDHE-ECDSA-DES-CBC3-SHA`, +`AES128-SHA256`, +`AES256-SHA256`, +`DHE-RSA-AES128-SHA256`, +`DHE-RSA-AES256-SHA256`, +`ECDH-RSA-AES128-SHA`, +`ECDH-RSA-AES256-SHA`, +`ECDH-ECDSA-AES128-SHA`, +`ECDH-ECDSA-AES256-SHA`, +`ECDH-RSA-RC4-SHA`, +`ECDH-RSA-DES-CBC3-SHA`, +`ECDH-ECDSA-RC4-SHA`, +`ECDH-ECDSA-DES-CBC3-SHA`, +`AES128-GCM-SHA256`, +`AES256-GCM-SHA384`, +`DHE-RSA-AES128-GCM-SHA256`, +`DHE-RSA-AES256-GCM-SHA384`, +`ECDHE-RSA-AES128-GCM-SHA256`, +`ECDHE-RSA-AES256-GCM-SHA384`, +`ECDHE-ECDSA-AES128-GCM-SHA256`, +`ECDHE-ECDSA-AES256-GCM-SHA384`, +`ECDH-RSA-AES128-GCM-SHA256`, +`ECDH-RSA-AES256-GCM-SHA384`, +`ECDH-ECDSA-AES128-GCM-SHA256`, +`ECDH-ECDSA-AES256-GCM-SHA384`, +`CAMELLIA128-SHA`, +`DHE-RSA-CAMELLIA128-SHA`, +`CAMELLIA256-SHA`, +`DHE-RSA-CAMELLIA256-SHA`, +`CAMELLIA128-SHA256`, +`DHE-RSA-CAMELLIA128-SHA256`, +`CAMELLIA256-SHA256`, +`DHE-RSA-CAMELLIA256-SHA256`, +`ECDHE-RSA-AES128-SHA256`, +`ECDHE-ECDSA-AES128-SHA256`, +`ECDH-RSA-AES128-SHA256`, +`ECDH-ECDSA-AES128-SHA256`, +`ECDHE-RSA-AES256-SHA384`, +`ECDHE-ECDSA-AES256-SHA384`, +`ECDH-RSA-AES256-SHA384`, +`ECDH-ECDSA-AES256-SHA384`, +`ECDHE-RSA-CHACHA20-POLY1305`, +`ECDHE-ECDSA-CHACHA20-POLY1305`, +`DHE-RSA-CHACHA20-POLY1305`, +`ECDHE-RSA-CHACHA20-POLY1305-OLD`, +`ECDHE-ECDSA-CHACHA20-POLY1305-OLD`, +`DHE-RSA-CHACHA20-POLY1305-OLD`, +`ADH-AES128-SHA`, +`QSH`, +`RENEGOTIATION-INFO`, +`IDEA-CBC-SHA`, +`ECDHE-ECDSA-NULL-SHA`, +`ECDHE-PSK-NULL-SHA256`, +`ECDHE-PSK-AES128-CBC-SHA256`, +`PSK-CHACHA20-POLY1305`, +`ECDHE-PSK-CHACHA20-POLY1305`, +`DHE-PSK-CHACHA20-POLY1305`, +`EDH-RSA-DES-CBC3-SHA`, diff --git a/deps-win32/curl-7.54.1/docs/CMakeLists.txt b/deps-win32/curl-7.54.1/docs/CMakeLists.txt new file mode 100644 index 0000000..6948617 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/CMakeLists.txt @@ -0,0 +1,3 @@ +#add_subdirectory(examples) +add_subdirectory(libcurl) +add_subdirectory(cmdline-opts) diff --git a/deps-win32/curl-7.54.1/docs/CODE_OF_CONDUCT.md b/deps-win32/curl-7.54.1/docs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..04ea66e --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,32 @@ +Contributor Code of Conduct +=========================== + +As contributors and maintainers of this project, we pledge to respect all +people who contribute through reporting issues, posting feature requests, +updating documentation, submitting pull requests or patches, and other +activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, or religion. + +Examples of unacceptable behavior by participants include the use of sexual +language or imagery, derogatory comments or personal attacks, trolling, public +or private harassment, insults, or other unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. Project maintainers who do not +follow the Code of Conduct may be removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by opening an issue or contacting one or more of the project +maintainers. + +This Code of Conduct is adapted from the [Contributor +Covenant](http://contributor-covenant.org), version 1.1.0, available at +[http://contributor-covenant.org/version/1/1/0/](http://contributor-covenant.org/version/1/1/0/) diff --git a/deps-win32/curl-7.54.1/docs/CODE_STYLE.md b/deps-win32/curl-7.54.1/docs/CODE_STYLE.md new file mode 100644 index 0000000..ba5f710 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/CODE_STYLE.md @@ -0,0 +1,238 @@ +# curl C code style + +Source code that has a common style is easier to read than code that uses +different styles in different places. It helps making the code feel like one +single code base. Easy-to-read is a very important property of code and helps +making it easier to review when new things are added and it helps debugging +code when developers are trying to figure out why things go wrong. A unified +style is more important than individual contributors having their own personal +tastes satisfied. + +Our C code has a few style rules. Most of them are verified and upheld by the +`lib/checksrc.pl` script. Invoked with `make checksrc` or even by default by +the build system when built after `./configure --enable-debug` has been used. + +It is normally not a problem for anyone to follow the guidelines, as you just +need to copy the style already used in the source code and there are no +particularly unusual rules in our set of rules. + +We also work hard on writing code that are warning-free on all the major +platforms and in general on as many platforms as possible. Code that obviously +will cause warnings will not be accepted as-is. + +## Naming + +Try using a non-confusing naming scheme for your new functions and variable +names. It doesn't necessarily have to mean that you should use the same as in +other places of the code, just that the names should be logical, +understandable and be named according to what they're used for. File-local +functions should be made static. We like lower case names. + +See the [INTERNALS](INTERNALS.md) document on how we name non-exported +library-global symbols. + +## Indenting + +We use only spaces for indentation, never TABs. We use two spaces for each new +open brace. + + if(something_is_true) { + while(second_statement == fine) { + moo(); + } + } + +## Comments + +Since we write C89 code, `//` comments are not allowed. They weren't +introduced in the C standard until C99. We use only `/*` and `*/` comments: + + /* this is a comment */ + +## Long lines + +Source code in curl may never be wider than 79 columns and there are two +reasons for maintaining this even in the modern era of very large and high +resolution screens: + +1. Narrower columns are easier to read than very wide ones. There's a reason + newspapers have used columns for decades or centuries. + +2. Narrower columns allow developers to easier show multiple pieces of code + next to each other in different windows. I often have two or three source + code windows next to each other on the same screen - as well as multiple + terminal and debugging windows. + +## Braces + +In if/while/do/for expressions, we write the open brace on the same line as +the keyword and we then set the closing brace on the same indentation level as +the initial keyword. Like this: + + if(age < 40) { + /* clearly a youngster */ + } + +You may omit the braces if they would contain only a one-line statement: + + if(!x) + continue; + +For functions the opening brace should be on a separate line: + + int main(int argc, char **argv) + { + return 1; + } + +## 'else' on the following line + +When adding an `else` clause to a conditional expression using braces, we add +it on a new line after the closing brace. Like this: + + if(age < 40) { + /* clearly a youngster */ + } + else { + /* probably grumpy */ + } + +## No space before parentheses + +When writing expressions using if/while/do/for, there shall be no space +between the keyword and the open parenthesis. Like this: + + while(1) { + /* loop forever */ + } + +## Use boolean conditions + +Rather than test a conditional value such as a bool against TRUE or FALSE, a +pointer against NULL or != NULL and an int against zero or not zero in +if/while conditions we prefer: + + result = do_something(); + if(!result) { + /* something went wrong */ + return result; + } + +## No assignments in conditions + +To increase readability and reduce complexity of conditionals, we avoid +assigning variables within if/while conditions. We frown upon this style: + + if((ptr = malloc(100)) == NULL) + return NULL; + +and instead we encourage the above version to be spelled out more clearly: + + ptr = malloc(100); + if(!ptr) + return NULL; + +## New block on a new line + +We never write multiple statements on the same source line, even for very +short if() conditions. + + if(a) + return TRUE; + else if(b) + return FALSE; + +and NEVER: + + if(a) return TRUE; + else if(b) return FALSE; + +## Space around operators + +Please use spaces on both sides of operators in C expressions. Postfix `(), +[], ->, ., ++, --` and Unary `+, - !, ~, &` operators excluded they should +have no space. + +Examples: + + bla = func(); + who = name[0]; + age += 1; + true = !false; + size += -2 + 3 * (a + b); + ptr->member = a++; + struct.field = b--; + ptr = &address; + contents = *pointer; + complement = ~bits; + empty = (!*string) ? TRUE : FALSE; + +## Column alignment + +Some statements cannot be completed on a single line because the line would +be too long, the statement too hard to read, or due to other style guidelines +above. In such a case the statement will span multiple lines. + +If a continuation line is part of an expression or sub-expression then you +should align on the appropriate column so that it's easy to tell what part of +the statement it is. Operators should not start continuation lines. In other +cases follow the 2-space indent guideline. Here are some examples from libcurl: + +~~~c + if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) && + (handle->set.httpversion != CURL_HTTP_VERSION_1_0) && + (handle->set.httpreq == HTTPREQ_GET || + handle->set.httpreq == HTTPREQ_HEAD)) + /* didn't ask for HTTP/1.0 and a GET or HEAD */ + return TRUE; +~~~ + +~~~c + case CURLOPT_KEEP_SENDING_ON_ERROR: + data->set.http_keep_sending_on_error = (0 != va_arg(param, long)) ? + TRUE : FALSE; + break; +~~~ + +~~~c + data->set.http_disable_hostname_check_before_authentication = + (0 != va_arg(param, long)) ? TRUE : FALSE; +~~~ + +~~~c + if(option) { + result = parse_login_details(option, strlen(option), + (userp ? &user : NULL), + (passwdp ? &passwd : NULL), + NULL); + } +~~~ + +~~~c + DEBUGF(infof(data, "Curl_pp_readresp_ %d bytes of trailing " + "server response left\n", + (int)clipamount)); +~~~ + +## Platform dependent code + +Use `#ifdef HAVE_FEATURE` to do conditional code. We avoid checking for +particular operating systems or hardware in the #ifdef lines. The HAVE_FEATURE +shall be generated by the configure script for unix-like systems and they are +hard-coded in the config-[system].h files for the others. + +We also encourage use of macros/functions that possibly are empty or defined +to constants when libcurl is built without that feature, to make the code +seamless. Like this style where the `magic()` function works differently +depending on a build-time conditional: + + #ifdef HAVE_MAGIC + void magic(int a) + { + return a + 2; + } + #else + #define magic(x) 1 + #endif + + int content = magic(3); diff --git a/deps-win32/curl-7.54.1/docs/CONTRIBUTE.md b/deps-win32/curl-7.54.1/docs/CONTRIBUTE.md new file mode 100644 index 0000000..7d3c2e0 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/CONTRIBUTE.md @@ -0,0 +1,266 @@ +# Contributing to the curl project + +This document is intended to offer guidelines on how to best contribute to the +curl project. This concerns new features as well as corrections to existing +flaws or bugs. + +## Learning curl + +### Join the Community + +Skip over to [https://curl.haxx.se/mail/](https://curl.haxx.se/mail/) and join +the appropriate mailing list(s). Read up on details before you post +questions. Read this file before you start sending patches! We prefer +questions sent to and discussions being held on the mailing list(s), not sent +to individuals. + +Before posting to one of the curl mailing lists, please read up on the +[mailing list etiquette](https://curl.haxx.se/mail/etiquette.html). + +We also hang out on IRC in #curl on irc.freenode.net + +If you're at all interested in the code side of things, consider clicking +'watch' on the [curl repo on github](https://github.com/curl/curl) to get +notified on pull requests and new issues posted there. + +### License and copyright + +When contributing with code, you agree to put your changes and new code under +the same license curl and libcurl is already using unless stated and agreed +otherwise. + +If you add a larger piece of code, you can opt to make that file or set of +files to use a different license as long as they don't enforce any changes to +the rest of the package and they make sense. Such "separate parts" can not be +GPL licensed (as we don't want copyleft to affect users of libcurl) but they +must use "GPL compatible" licenses (as we want to allow users to use libcurl +properly in GPL licensed environments). + +When changing existing source code, you do not alter the copyright of the +original file(s). The copyright will still be owned by the original creator(s) +or those who have been assigned copyright by the original author(s). + +By submitting a patch to the curl project, you are assumed to have the right +to the code and to be allowed by your employer or whatever to hand over that +patch/code to us. We will credit you for your changes as far as possible, to +give credit but also to keep a trace back to who made what changes. Please +always provide us with your full real name when contributing! + +### What To Read + +Source code, the man pages, the [INTERNALS +document](https://curl.haxx.se/dev/internals.html), +[TODO](https://curl.haxx.se/docs/todo.html), +[KNOWN_BUGS](https://curl.haxx.se/docs/knownbugs.html) and the [most recent +changes](https://curl.haxx.se/dev/sourceactivity.html) in git. Just lurking on +the [curl-library mailing +list](https://curl.haxx.se/mail/list.cgi?list=curl-library) will give you a +lot of insights on what's going on right now. Asking there is a good idea too. + +## Write a good patch + +### Follow code style + +When writing C code, follow the +[CODE_STYLE](https://curl.haxx.se/dev/code-style.html) already established in +the project. Consistent style makes code easier to read and mistakes less +likely to happen. Run `make checksrc` before you submit anything, to make sure +you follow the basic style. That script doesn't verify everything, but if it +complains you know you have work to do. + +### Non-clobbering All Over + +When you write new functionality or fix bugs, it is important that you don't +fiddle all over the source files and functions. Remember that it is likely +that other people have done changes in the same source files as you have and +possibly even in the same functions. If you bring completely new +functionality, try writing it in a new source file. If you fix bugs, try to +fix one bug at a time and send them as separate patches. + +### Write Separate Changes + +It is annoying when you get a huge patch from someone that is said to fix 511 +odd problems, but discussions and opinions don't agree with 510 of them - or +509 of them were already fixed in a different way. Then the person merging +this change needs to extract the single interesting patch from somewhere +within the huge pile of source, and that creates a lot of extra work. + +Preferably, each fix that corrects a problem should be in its own patch/commit +with its own description/commit message stating exactly what they correct so +that all changes can be selectively applied by the maintainer or other +interested parties. + +Also, separate changes enable bisecting much better for tracking problems +and regression in the future. + +### Patch Against Recent Sources + +Please try to get the latest available sources to make your patches against. +It makes the lives of the developers so much easier. The very best is if you +get the most up-to-date sources from the git repository, but the latest +release archive is quite OK as well! + +### Documentation + +Writing docs is dead boring and one of the big problems with many open source +projects. But someone's gotta do it! It makes things a lot easier if you +submit a small description of your fix or your new features with every +contribution so that it can be swiftly added to the package documentation. + +The documentation is always made in man pages (nroff formatted) or plain +ASCII files. All HTML files on the web site and in the release archives are +generated from the nroff/ASCII versions. + +### Test Cases + +Since the introduction of the test suite, we can quickly verify that the main +features are working as they're supposed to. To maintain this situation and +improve it, all new features and functions that are added need to be tested +in the test suite. Every feature that is added should get at least one valid +test case that verifies that it works as documented. If every submitter also +posts a few test cases, it won't end up as a heavy burden on a single person! + +If you don't have test cases or perhaps you have done something that is very +hard to write tests for, do explain exactly how you have otherwise tested and +verified your changes. + +## Sharing Your Changes + +### How to get your changes into the main sources + +Ideally you file a [pull request on +github](https://github.com/curl/curl/pulls), but you can also send your plain +patch to [the curl-library mailing +list](https://curl.haxx.se/mail/list.cgi?list=curl-library). + +Either way, your change will be reviewed and discussed there and you will be +expected to correct flaws pointed out and update accordingly, or the change +risks stalling and eventually just getting deleted without action. As a +submitter of a change, you are the owner of that change until it has been merged. + +Respond on the list or on github about the change and answer questions and/or +fix nits/flaws. This is very important. We will take lack of replies as a +sign that you're not very anxious to get your patch accepted and we tend to +simply drop such changes. + +### About pull requests + +With github it is easy to send a [pull +request](https://github.com/curl/curl/pulls) to the curl project to have +changes merged. + +We strongly prefer pull requests to mailed patches, as it makes it a proper +git commit that is easy to merge and they are easy to track and not that easy +to loose in the flood of many emails, like they sometimes do on the mailing +lists. + +Every pull request submitted will automatically be tested in several different +ways. Every pull request is verfied that: + + - ... the code still builds, warning-free, on Linux and macOS, with both + clang and gcc + - ... the code still builds fine on Windows with several MSVC versions + - ... the code still builds with cmake on Linux, with gcc and clang + - ... the code follows rudimentary code style rules + - ... the test suite still runs 100% fine + - ... the release tarball (the "dist") still works + - ... the code coverage doesn't shrink drastically + +If the pull-request fails one of these tests, it will show up as a red X and +you are expected to fix the problem. If you don't understand whan the issue is +or have other problems to fix the complaint, just ask and other project +members will likely be able to help out. + +When you adjust your pull requests after review, consider squashing the +commits so that we can review the full updated version more easily. + +### Making quality patches + +Make the patch against as recent source versions as possible. + +If you've followed the tips in this document and your patch still hasn't been +incorporated or responded to after some weeks, consider resubmitting it to the +list or better yet: change it to a pull request. + +### Write good commit messages + +A short guide to how to write commit messages in the curl project. + + ---- start ---- + [area]: [short line describing the main effect] + -- empty line -- + [full description, no wider than 72 columns that describe as much as + possible as to why this change is made, and possibly what things + it fixes and everything else that is related] + -- empty line -- + [Closes/Fixes #1234 - if this closes or fixes a github issue] + [Bug: URL to source of the report or more related discussion] + [Reported-by: John Doe - credit the reporter] + [whatever-else-by: credit all helpers, finders, doers] + ---- stop ---- + +Don't forget to use commit --author="" if you commit someone else's work, and +make sure that you have your own user and email setup correctly in git before +you commit + +### Write Access to git Repository + +If you are a very frequent contributor, you may be given push access to the +git repository and then you'll be able to push your changes straight into the +git repo instead of sending changes as pull requests or by mail as patches. + +Just ask if this is what you'd want. You will be required to have posted +several high quality patches first, before you can be granted push access. + +### How To Make a Patch with git + +You need to first checkout the repository: + + git clone https://github.com/curl/curl.git + +You then proceed and edit all the files you like and you commit them to your +local repository: + + git commit [file] + +As usual, group your commits so that you commit all changes at once that +constitute a logical change. + +Once you have done all your commits and you're happy with what you see, you +can make patches out of your changes that are suitable for mailing: + + git format-patch remotes/origin/master + +This creates files in your local directory named NNNN-[name].patch for each +commit. + +Now send those patches off to the curl-library list. You can of course opt to +do that with the 'git send-email' command. + +### How To Make a Patch without git + +Keep a copy of the unmodified curl sources. Make your changes in a separate +source tree. When you think you have something that you want to offer the +curl community, use GNU diff to generate patches. + +If you have modified a single file, try something like: + + diff -u unmodified-file.c my-changed-one.c > my-fixes.diff + +If you have modified several files, possibly in different directories, you +can use diff recursively: + + diff -ur curl-original-dir curl-modified-sources-dir > my-fixes.diff + +The GNU diff and GNU patch tools exist for virtually all platforms, including +all kinds of Unixes and Windows: + +For unix-like operating systems: + + - [https://savannah.gnu.org/projects/patch/](https://savannah.gnu.org/projects/patch/) + - [https://www.gnu.org/software/diffutils/](https://www.gnu.org/software/diffutils/) + +For Windows: + + - [https://gnuwin32.sourceforge.io/packages/patch.htm](https://gnuwin32.sourceforge.io/packages/patch.htm) + - [https://gnuwin32.sourceforge.io/packages/diffutils.htm](https://gnuwin32.sourceforge.io/packages/diffutils.htm) diff --git a/deps-win32/curl-7.54.1/docs/FAQ b/deps-win32/curl-7.54.1/docs/FAQ new file mode 100644 index 0000000..78c08ba --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/FAQ @@ -0,0 +1,1586 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +FAQ + + 1. Philosophy + 1.1 What is cURL? + 1.2 What is libcurl? + 1.3 What is curl not? + 1.4 When will you make curl do XXXX ? + 1.5 Who makes curl? + 1.6 What do you get for making curl? + 1.7 What about CURL from curl.com? + 1.8 I have a problem who do I mail? + 1.9 Where do I buy commercial support for curl? + 1.10 How many are using curl? + 1.11 Why don't you update ca-bundle.crt + 1.12 I have a problem who can I chat with? + 1.13 curl's ECCN number? + 1.14 How do I submit my patch? + 1.15 How do I port libcurl to my OS? + + 2. Install Related Problems + 2.1 configure doesn't find OpenSSL even when it is installed + 2.1.1 native linker doesn't find OpenSSL + 2.1.2 only the libssl lib is missing + 2.2 Does curl work/build with other SSL libraries? + 2.3 Where can I find a copy of LIBEAY32.DLL? + 2.4 Does curl support SOCKS (RFC 1928) ? + 2.5 Install libcurl for both 32bit and 64bit? + + 3. Usage Problems + 3.1 curl: (1) SSL is disabled, https: not supported + 3.2 How do I tell curl to resume a transfer? + 3.3 Why doesn't my posting using -F work? + 3.4 How do I tell curl to run custom FTP commands? + 3.5 How can I disable the Accept: */* header? + 3.6 Does curl support ASP, XML, XHTML or HTML version Y? + 3.7 Can I use curl to delete/rename a file through FTP? + 3.8 How do I tell curl to follow HTTP redirects? + 3.9 How do I use curl in my favorite programming language? + 3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP? + 3.11 How do I POST with a different Content-Type? + 3.12 Why do FTP specific features over HTTP proxy fail? + 3.13 Why does my single/double quotes fail? + 3.14 Does curl support Javascript or PAC (automated proxy config)? + 3.15 Can I do recursive fetches with curl? + 3.16 What certificates do I need when I use SSL? + 3.17 How do I list the root dir of an FTP server? + 3.18 Can I use curl to send a POST/PUT and not wait for a response? + 3.19 How do I get HTTP from a host using a specific IP address? + 3.20 How to SFTP from my user's home directory? + 3.21 Protocol xxx not supported or disabled in libcurl + 3.22 curl -X gives me HTTP problems + + 4. Running Problems + 4.1 Problems connecting to SSL servers. + 4.2 Why do I get problems when I use & or % in the URL? + 4.3 How can I use {, }, [ or ] to specify multiple URLs? + 4.4 Why do I get downloaded data even though the web page doesn't exist? + 4.5 Why do I get return code XXX from a HTTP server? + 4.5.1 "400 Bad Request" + 4.5.2 "401 Unauthorized" + 4.5.3 "403 Forbidden" + 4.5.4 "404 Not Found" + 4.5.5 "405 Method Not Allowed" + 4.5.6 "301 Moved Permanently" + 4.6 Can you tell me what error code 142 means? + 4.7 How do I keep user names and passwords secret in Curl command lines? + 4.8 I found a bug! + 4.9 Curl can't authenticate to the server that requires NTLM? + 4.10 My HTTP request using HEAD, PUT or DELETE doesn't work! + 4.11 Why does my HTTP range requests return the full document? + 4.12 Why do I get "certificate verify failed" ? + 4.13 Why is curl -R on Windows one hour off? + 4.14 Redirects work in browser but not with curl! + 4.15 FTPS doesn't work + 4.16 My HTTP POST or PUT requests are slow! + 4.17 Non-functional connect timeouts on Windows + 4.18 file:// URLs containing drive letters (Windows, NetWare) + 4.19 Why doesn't curl return an error when the network cable is unplugged? + 4.20 curl doesn't return error for HTTP non-200 responses! + 4.21 Why is there a HTTP/1.1 in my HTTP/2 request? + + 5. libcurl Issues + 5.1 Is libcurl thread-safe? + 5.2 How can I receive all data into a large memory chunk? + 5.3 How do I fetch multiple files with libcurl? + 5.4 Does libcurl do Winsock initing on win32 systems? + 5.5 Does CURLOPT_WRITEDATA and CURLOPT_READDATA work on win32 ? + 5.6 What about Keep-Alive or persistent connections? + 5.7 Link errors when building libcurl on Windows! + 5.8 libcurl.so.X: open failed: No such file or directory + 5.9 How does libcurl resolve host names? + 5.10 How do I prevent libcurl from writing the response to stdout? + 5.11 How do I make libcurl not receive the whole HTTP response? + 5.12 Can I make libcurl fake or hide my real IP address? + 5.13 How do I stop an ongoing transfer? + 5.14 Using C++ non-static functions for callbacks? + 5.15 How do I get an FTP directory listing? + 5.16 I want a different time-out! + 5.17 Can I write a server with libcurl? + 5.18 Does libcurl use threads? + + 6. License Issues + 6.1 I have a GPL program, can I use the libcurl library? + 6.2 I have a closed-source program, can I use the libcurl library? + 6.3 I have a BSD licensed program, can I use the libcurl library? + 6.4 I have a program that uses LGPL libraries, can I use libcurl? + 6.5 Can I modify curl/libcurl for my program and keep the changes secret? + 6.6 Can you please change the curl/libcurl license to XXXX? + 6.7 What are my obligations when using libcurl in my commercial apps? + + 7. PHP/CURL Issues + 7.1 What is PHP/CURL? + 7.2 Who wrote PHP/CURL? + 7.3 Can I perform multiple requests using the same handle? + 7.4 Does PHP/CURL have dependencies? + +============================================================================== + +1. Philosophy + + 1.1 What is cURL? + + cURL is the name of the project. The name is a play on 'Client for URLs', + originally with URL spelled in uppercase to make it obvious it deals with + URLs. The fact it can also be pronounced 'see URL' also helped, it works as + an abbreviation for "Client URL Request Library" or why not the recursive + version: "Curl URL Request Library". + + The cURL project produces two products: + + libcurl + + A free and easy-to-use client-side URL transfer library, supporting DICT, + FILE, FTP, FTPS, GOPHER, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, + POP3S, RTMP, RTSP, SCP, SFTP, SMB, SMBS, SMTP, SMTPS, TELNET and TFTP. + + libcurl supports HTTPS certificates, HTTP POST, HTTP PUT, FTP uploading, + Kerberos, SPNEGO, HTTP form based upload, proxies, cookies, user+password + authentication, file transfer resume, http proxy tunneling and more! + + libcurl is highly portable, it builds and works identically on numerous + platforms, including Solaris, NetBSD, FreeBSD, OpenBSD, Darwin, HP-UX, + IRIX, AIX, Tru64, Linux, UnixWare, HURD, Windows, Amiga, OS/2, BeOS, Mac + OS X, Ultrix, QNX, OpenVMS, RISC OS, Novell NetWare, DOS, Symbian, OSF, + Android, Minix, IBM TPF and more... + + libcurl is free, thread-safe, IPv6 compatible, feature rich, well + supported and fast. + + curl + + A command line tool for getting or sending files using URL syntax. + + Since curl uses libcurl, curl supports the same wide range of common + Internet protocols that libcurl does. + + We pronounce curl with an initial k sound. It rhymes with words like girl + and earl. This is a short WAV file to help you: + + http://media.merriam-webster.com/soundc11/c/curl0001.wav + + There are numerous sub-projects and related projects that also use the word + curl in the project names in various combinations, but you should take + notice that this FAQ is directed at the command-line tool named curl (and + libcurl the library), and may therefore not be valid for other curl-related + projects. (There is however a small section for the PHP/CURL in this FAQ.) + + 1.2 What is libcurl? + + libcurl is a reliable and portable library which provides you with an easy + interface to a range of common Internet protocols. + + You can use libcurl for free in your application, be it open source, + commercial or closed-source. + + libcurl is most probably the most portable, most powerful and most often + used C-based multi-platform file transfer library on this planet - be it + open source or commercial. + + 1.3 What is curl not? + + Curl is not a wget clone. That is a common misconception. Never, during + curl's development, have we intended curl to replace wget or compete on its + market. Curl is targeted at single-shot file transfers. + + Curl is not a web site mirroring program. If you want to use curl to mirror + something: fine, go ahead and write a script that wraps around curl to make + it reality (like curlmirror.pl does). + + Curl is not an FTP site mirroring program. Sure, get and send FTP with curl + but if you want systematic and sequential behavior you should write a + script (or write a new program that interfaces libcurl) and do it. + + Curl is not a PHP tool, even though it works perfectly well when used from + or with PHP (when using the PHP/CURL module). + + Curl is not a program for a single operating system. Curl exists, compiles, + builds and runs under a wide range of operating systems, including all + modern Unixes (and a bunch of older ones too), Windows, Amiga, BeOS, OS/2, + OS X, QNX etc. + + 1.4 When will you make curl do XXXX ? + + We love suggestions of what to change in order to make curl and libcurl + better. We do however believe in a few rules when it comes to the future of + curl: + + Curl -- the command line tool -- is to remain a non-graphical command line + tool. If you want GUIs or fancy scripting capabilities, you should look for + another tool that uses libcurl. + + We do not add things to curl that other small and available tools already do + very well at the side. Curl's output can be piped into another program or + redirected to another file for the next program to interpret. + + We focus on protocol related issues and improvements. If you wanna do more + magic with the supported protocols than curl currently does, chances are good + we will agree. If you wanna add more protocols, we may very well agree. + + If you want someone else to do all the work while you wait for us to + implement it for you, that is not a very friendly attitude. We spend a + considerable time already on maintaining and developing curl. In order to + get more out of us, you should consider trading in some of your time and + effort in return. Simply go to the GitHub repo which resides at + https://github.com/curl/curl, fork the project, and create pull requests + with your proposed changes. + + If you write the code, chances are better that it will get into curl faster. + + 1.5 Who makes curl? + + curl and libcurl are not made by any single individual. Daniel Stenberg is + project leader and main developer, but other persons' submissions are + important and crucial. Anyone can contribute and post their changes and + improvements and have them inserted in the main sources (of course on the + condition that developers agree that the fixes are good). + + The full list of all contributors is found in the docs/THANKS file. + + curl is developed by a community, with Daniel at the wheel. + + 1.6 What do you get for making curl? + + Project cURL is entirely free and open. No person gets paid for developing + curl full time. We do this voluntarily, mostly in our spare time. + Occasionally companies pay individual developers to work on curl, but that's + up to each company and developer. This is not controlled by nor supervised in + any way by the project. + + We still get help from companies. Haxx provides web site, bandwidth, mailing + lists etc, sourceforge.net hosts project services we take advantage from, + like the bug tracker, and GitHub hosts the primary git repository at + https://github.com/curl/curl. Also again, some companies have sponsored + certain parts of the development in the past and I hope some will continue to + do so in the future. + + If you want to support our project, consider a donation or a banner-program + or even better: by helping us with coding, documenting or testing etc. + + 1.7 What about CURL from curl.com? + + During the summer of 2001, curl.com was busy advertising their client-side + programming language for the web, named CURL. + + We are in no way associated with curl.com or their CURL programming + language. + + Our project name curl has been in effective use since 1998. We were not the + first computer related project to use the name "curl" and do not claim any + rights to the name. + + We recognize that we will be living in parallel with curl.com and wish them + every success. + + 1.8 I have a problem whom do I mail? + + Please do not mail any single individual unless you really need to. Keep + curl-related questions on a suitable mailing list. All available mailing + lists are listed in the MANUAL document and online at + https://curl.haxx.se/mail/ + + Keeping curl-related questions and discussions on mailing lists allows + others to join in and help, to share their ideas, to contribute their + suggestions and to spread their wisdom. Keeping discussions on public mailing + lists also allows for others to learn from this (both current and future + users thanks to the web based archives of the mailing lists), thus saving us + from having to repeat ourselves even more. Thanks for respecting this. + + If you have found or simply suspect a security problem in curl or libcurl, + mail curl-security at haxx.se (closed list of receivers, mails are not + disclosed) and tell. Then we can produce a fix in a timely manner before the + flaw is announced to the world, thus lessen the impact the problem will have + on existing users. + + 1.9 Where do I buy commercial support for curl? + + curl is fully open source. It means you can hire any skilled engineer to fix + your curl-related problems. + + We list available alternatives on the curl web site: + https://curl.haxx.se/support.html + + 1.10 How many are using curl? + + It is impossible to tell. + + We don't know how many users that knowingly have installed and use curl. + + We don't know how many users that use curl without knowing that they are in + fact using it. + + We don't know how many users that downloaded or installed curl and then + never use it. + + In May 2012 Daniel did a counting game and came up with a number that may + be completely wrong or somewhat accurate. Over 500 million! + + See https://daniel.haxx.se/blog/2012/05/16/300m-users/ + + 1.11 Why don't you update ca-bundle.crt + + The ca cert bundle that used to be shipped with curl was very outdated and + must be replaced with an up-to-date version by anyone who wants to verify + peers. It is no longer provided by curl. The last curl release that ever + shipped a ca cert bundle was curl 7.18.0. + + In the cURL project we've decided not to attempt to keep this file updated + (or even present anymore) since deciding what to add to a ca cert bundle is + an undertaking we've not been ready to accept, and the one we can get from + Mozilla is perfectly fine so there's no need to duplicate that work. + + Today, with many services performed over HTTPS, every operating system + should come with a default ca cert bundle that can be deemed somewhat + trustworthy and that collection (if reasonably updated) should be deemed to + be a lot better than a private curl version. + + If you want the most recent collection of ca certs that Mozilla Firefox + uses, we recommend that you extract the collection yourself from Mozilla + Firefox (by running 'make ca-bundle), or by using our online service setup + for this purpose: https://curl.haxx.se/docs/caextract.html + + 1.12 I have a problem who can I chat with? + + There's a bunch of friendly people hanging out in the #curl channel on the + IRC network irc.freenode.net. If you're polite and nice, chances are good + that you can get -- or provide -- help instantly. + + 1.13 curl's ECCN number? + + The US government restricts exports of software that contains or uses + cryptography. When doing so, the Export Control Classification Number (ECCN) + is used to identify the level of export control etc. + + Apache Software Foundation gives a good explanation of ECCNs at + https://www.apache.org/dev/crypto.html + + We believe curl's number might be ECCN 5D002, another possibility is + 5D992. It seems necessary to write them (the authority that administers ECCN + numbers), asking to confirm. + + Comprehensible explanations of the meaning of such numbers and how to obtain + them (resp.) are here + + http://www.bis.doc.gov/licensing/exportingbasics.htm + http://www.bis.doc.gov/licensing/do_i_needaneccn.html + + An incomprehensible description of the two numbers above is here + http://www.access.gpo.gov/bis/ear/pdf/ccl5-pt2.pdf + + 1.14 How do I submit my patch? + + When you have made a patch or a change of whatever sort, and want to submit + that to the project, there are a few different ways we prefer: + + o send a patch to the curl-library mailing list. We're many subscribers + there and there are lots of people who can review patches, comment on them + and "receive" them properly. + + o if your patch changes or fixes a bug, you can also opt to submit a bug + report in the bug tracker and attach your patch there. There are less + people involved there. + + Lots of more details are found in the CONTRIBUTE and INTERNALS docs. + + 1.15 How do I port libcurl to my OS? + + Here's a rough step-by-step: + + 1. copy a suitable lib/config-*.h file as a start to lib/config-[youros].h + + 2. edit lib/config-[youros].h to match your OS and setup + + 3. edit lib/curl_setup.h to include config-[youros].h when your OS is + detected by the preprocessor, in the style others already exist + + 4. compile lib/*.c and make them into a library + + +2. Install Related Problems + + 2.1 configure doesn't find OpenSSL even when it is installed + + This may be because of several reasons. + + 2.1.1 native linker doesn't find openssl + + Affected platforms: + Solaris (native cc compiler) + HPUX (native cc compiler) + SGI IRIX (native cc compiler) + SCO UNIX (native cc compiler) + + When configuring curl, I specify --with-ssl. OpenSSL is installed in + /usr/local/ssl Configure reports SSL in /usr/local/ssl, but fails to find + CRYPTO_lock in -lcrypto + + Cause: The cc for this test places the -L/usr/local/ssl/lib AFTER + -lcrypto, so ld can't find the library. This is due to a bug in the GNU + autoconf tool. + + Workaround: Specifying "LDFLAGS=-L/usr/local/ssl/lib" in front of + ./configure places the -L/usr/local/ssl/lib early enough in the command + line to make things work + + 2.1.2 only the libssl lib is missing + + If all include files and the libcrypto lib is present, with only the + libssl being missing according to configure, this is most likely because + a few functions are left out from the libssl. + + If the function names missing include RSA or RSAREF you can be certain + that this is because libssl requires the RSA and RSAREF libs to build. + + See the INSTALL file section that explains how to add those libs to + configure. Make sure that you remove the config.cache file before you + rerun configure with the new flags. + + 2.2 Does curl work/build with other SSL libraries? + + Curl has been written to use a generic SSL function layer internally, and + that SSL functionality can then be provided by one out of many different SSL + backends. + + curl can be built to use one of the following SSL alternatives: OpenSSL, + GnuTLS, yassl, NSS, PolarSSL, axTLS, Secure Transport (native iOS/OS X), + WinSSL (native Windows) or GSKit (native IBM i). They all have their pros + and cons, and we try to maintain a comparison of them here: + https://curl.haxx.se/docs/ssl-compared.html + + 2.3 Where can I find a copy of LIBEAY32.DLL? + + That is an OpenSSL binary built for Windows. + + Curl can be built with OpenSSL to do the SSL stuff. The LIBEAY32.DLL is then + what curl needs on a windows machine to do https:// etc. Check out the curl + web site to find accurate and up-to-date pointers to recent OpenSSL DLLs and + other binary packages. + + 2.4 Does curl support SOCKS (RFC 1928) ? + + Yes, SOCKS 4 and 5 are supported. + + 2.5 Install libcurl for both 32bit and 64bit? + + In curl's configure procedure one of the regular include files gets created + with platform specific information. The file 'curl/curlbuild.h' in the + installed libcurl file tree is therefore somewhat tied to that particular + platform. + + To allow applications to get built for either 32bit or 64bit you need to + install libcurl headers for both setups and unfortunately curl doesn't do + this automatically. + + A commonly used procedure is this: + + $ ./configure [32bit platform] + $ mv curl/curlbuild.h curl/curlbuild-32bit.h + $ ./configure [64bit platform] + $ mv curl/curlbuild.h curl/curlbuild-64bit.h + + Then you make a toplevel curl/curlbuild.h replacement that only does this: + + #ifdef IS_32BIT + #include "curlbuild-32bit.h" + else + #include "curlbuild-64bit.h" + #endif + + +3. Usage problems + + 3.1 curl: (1) SSL is disabled, https: not supported + + If you get this output when trying to get anything from a https:// server, + it means that the instance of curl/libcurl that you're using was built + without support for this protocol. + + This could've happened if the configure script that was run at build time + couldn't find all libs and include files curl requires for SSL to work. If + the configure script fails to find them, curl is simply built without SSL + support. + + To get the https:// support into a curl that was previously built but that + reports that https:// is not supported, you should dig through the document + and logs and check out why the configure script doesn't find the SSL libs + and/or include files. + + Also, check out the other paragraph in this FAQ labelled "configure doesn't + find OpenSSL even when it is installed". + + 3.2 How do I tell curl to resume a transfer? + + Curl supports resumed transfers both ways on both FTP and HTTP. + Try the -C option. + + 3.3 Why doesn't my posting using -F work? + + You can't arbitrarily use -F or -d, the choice between -F or -d depends on the + HTTP operation you need curl to do and what the web server that will receive + your post expects. + + If the form you're trying to submit uses the type 'multipart/form-data', then + and only then you must use the -F type. In all the most common cases, you + should use -d which then causes a posting with the type + 'application/x-www-form-urlencoded'. + + This is described in some detail in the MANUAL and TheArtOfHttpScripting + documents, and if you don't understand it the first time, read it again + before you post questions about this to the mailing list. Also, try reading + through the mailing list archives for old postings and questions regarding + this. + + 3.4 How do I tell curl to run custom FTP commands? + + You can tell curl to perform optional commands both before and/or after a + file transfer. Study the -Q/--quote option. + + Since curl is used for file transfers, you don't normally use curl to + perform FTP commands without transferring anything. Therefore you must + always specify a URL to transfer to/from even when doing custom FTP + commands, or use -I which implies the "no body" option sent to libcurl. + + 3.5 How can I disable the Accept: */* header? + + You can change all internally generated headers by adding a replacement with + the -H/--header option. By adding a header with empty contents you safely + disable that one. Use -H "Accept:" to disable that specific header. + + 3.6 Does curl support ASP, XML, XHTML or HTML version Y? + + To curl, all contents are alike. It doesn't matter how the page was + generated. It may be ASP, PHP, Perl, shell-script, SSI or plain HTML + files. There's no difference to curl and it doesn't even know what kind of + language that generated the page. + + See also item 3.14 regarding javascript. + + 3.7 Can I use curl to delete/rename a file through FTP? + + Yes. You specify custom FTP commands with -Q/--quote. + + One example would be to delete a file after you have downloaded it: + + curl -O ftp://download.com/coolfile -Q '-DELE coolfile' + + or rename a file after upload: + + curl -T infile ftp://upload.com/dir/ -Q "-RNFR infile" -Q "-RNTO newname" + + 3.8 How do I tell curl to follow HTTP redirects? + + Curl does not follow so-called redirects by default. The Location: header + that informs the client about this is only interpreted if you're using the + -L/--location option. As in: + + curl -L http://redirector.com + + Not all redirects are HTTP ones, see 4.14 + + 3.9 How do I use curl in my favorite programming language? + + There exist many language interfaces/bindings for curl that integrates it + better with various languages. If you are fluid in a script language, you + may very well opt to use such an interface instead of using the command line + tool. + + Find out more about which languages that support curl directly, and how to + install and use them, in the libcurl section of the curl web site: + https://curl.haxx.se/libcurl/ + + All the various bindings to libcurl are made by other projects and people, + outside of the cURL project. The cURL project itself only produces libcurl + with its plain C API. If you don't find anywhere else to ask you can ask + about bindings on the curl-library list too, but be prepared that people on + that list may not know anything about bindings. + + In October 2009, there were interfaces available for the following + languages: Ada95, Basic, C, C++, Ch, Cocoa, D, Dylan, Eiffel, Euphoria, + Ferite, Gambas, glib/GTK+, Haskell, ILE/RPG, Java, Lisp, Lua, Mono, .NET, + Object-Pascal, OCaml, Pascal, Perl, PHP, PostgreSQL, Python, R, Rexx, Ruby, + Scheme, S-Lang, Smalltalk, SP-Forth, SPL, Tcl, Visual Basic, Visual FoxPro, + Q, wxwidgets and XBLite. By the time you read this, additional ones may have + appeared! + + 3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP? + + Curl adheres to the HTTP spec, which basically means you can play with *any* + protocol that is built on top of HTTP. Protocols such as SOAP, WEBDAV and + XML-RPC are all such ones. You can use -X to set custom requests and -H to + set custom headers (or replace internally generated ones). + + Using libcurl is of course just as good and you'd just use the proper + library options to do the same. + + 3.11 How do I POST with a different Content-Type? + + You can always replace the internally generated headers with -H/--header. + To make a simple HTTP POST with text/xml as content-type, do something like: + + curl -d "datatopost" -H "Content-Type: text/xml" [URL] + + 3.12 Why do FTP specific features over HTTP proxy fail? + + Because when you use a HTTP proxy, the protocol spoken on the network will + be HTTP, even if you specify a FTP URL. This effectively means that you + normally can't use FTP specific features such as FTP upload and FTP quote + etc. + + There is one exception to this rule, and that is if you can "tunnel through" + the given HTTP proxy. Proxy tunneling is enabled with a special option (-p) + and is generally not available as proxy admins usually disable tunneling to + ports other than 443 (which is used for HTTPS access through proxies). + + 3.13 Why does my single/double quotes fail? + + To specify a command line option that includes spaces, you might need to + put the entire option within quotes. Like in: + + curl -d " with spaces " url.com + + or perhaps + + curl -d ' with spaces ' url.com + + Exactly what kind of quotes and how to do this is entirely up to the shell + or command line interpreter that you are using. For most unix shells, you + can more or less pick either single (') or double (") quotes. For + Windows/DOS prompts I believe you're forced to use double (") quotes. + + Please study the documentation for your particular environment. Examples in + the curl docs will use a mix of both of these as shown above. You must + adjust them to work in your environment. + + Remember that curl works and runs on more operating systems than most single + individuals have ever tried. + + 3.14 Does curl support Javascript or PAC (automated proxy config)? + + Many web pages do magic stuff using embedded Javascript. Curl and libcurl + have no built-in support for that, so it will be treated just like any other + contents. + + .pac files are a netscape invention and are sometimes used by organizations + to allow them to differentiate which proxies to use. The .pac contents is + just a Javascript program that gets invoked by the browser and that returns + the name of the proxy to connect to. Since curl doesn't support Javascript, + it can't support .pac proxy configuration either. + + Some workarounds usually suggested to overcome this Javascript dependency: + + Depending on the Javascript complexity, write up a script that translates it + to another language and execute that. + + Read the Javascript code and rewrite the same logic in another language. + + Implement a Javascript interpreter, people have successfully used the + Mozilla Javascript engine in the past. + + Ask your admins to stop this, for a static proxy setup or similar. + + 3.15 Can I do recursive fetches with curl? + + No. curl itself has no code that performs recursive operations, such as + those performed by wget and similar tools. + + There exists wrapper scripts with that functionality (for example the + curlmirror perl script), and you can write programs based on libcurl to do + it, but the command line tool curl itself cannot. + + 3.16 What certificates do I need when I use SSL? + + There are three different kinds of "certificates" to keep track of when we + talk about using SSL-based protocols (HTTPS or FTPS) using curl or libcurl. + + CLIENT CERTIFICATE + + The server you communicate with may require that you can provide this in + order to prove that you actually are who you claim to be. If the server + doesn't require this, you don't need a client certificate. + + A client certificate is always used together with a private key, and the + private key has a pass phrase that protects it. + + SERVER CERTIFICATE + + The server you communicate with has a server certificate. You can and should + verify this certificate to make sure that you are truly talking to the real + server and not a server impersonating it. + + CERTIFICATE AUTHORITY CERTIFICATE ("CA cert") + + You often have several CA certs in a CA cert bundle that can be used to + verify a server certificate that was signed by one of the authorities in the + bundle. curl does not come with a CA cert bundle but most curl installs + provide one. You can also override the default. + + The server certificate verification process is made by using a Certificate + Authority certificate ("CA cert") that was used to sign the server + certificate. Server certificate verification is enabled by default in curl + and libcurl and is often the reason for problems as explained in FAQ entry + 4.12 and the SSLCERTS document + (https://curl.haxx.se/docs/sslcerts.html). Server certificates that are + "self-signed" or otherwise signed by a CA that you do not have a CA cert + for, cannot be verified. If the verification during a connect fails, you are + refused access. You then need to explicitly disable the verification to + connect to the server. + + 3.17 How do I list the root dir of an FTP server? + + There are two ways. The way defined in the RFC is to use an encoded slash + in the first path part. List the "/tmp" dir like this: + + curl ftp://ftp.sunet.se/%2ftmp/ + + or the not-quite-kosher-but-more-readable way, by simply starting the path + section of the URL with a slash: + + curl ftp://ftp.sunet.se//tmp/ + + 3.18 Can I use curl to send a POST/PUT and not wait for a response? + + No. + + But you could easily write your own program using libcurl to do such stunts. + + 3.19 How do I get HTTP from a host using a specific IP address? + + For example, you may be trying out a web site installation that isn't yet in + the DNS. Or you have a site using multiple IP addresses for a given host + name and you want to address a specific one out of the set. + + Set a custom Host: header that identifies the server name you want to reach + but use the target IP address in the URL: + + curl --header "Host: www.example.com" http://127.0.0.1/ + + You can also opt to add faked host name entries to curl with the --resolve + option. That has the added benefit that things like redirects will also work + properly. The above operation would instead be done as: + + curl --resolve www.example.com:80:127.0.0.1 http://www.example.com/ + + 3.20 How to SFTP from my user's home directory? + + Contrary to how FTP works, SFTP and SCP URLs specify the exact directory to + work with. It means that if you don't specify that you want the user's home + directory, you get the actual root directory. + + To specify a file in your user's home directory, you need to use the correct + URL syntax which for sftp might look similar to: + + curl -O -u user:password sftp://example.com/~/file.txt + + and for SCP it is just a different protocol prefix: + + curl -O -u user:password scp://example.com/~/file.txt + + 3.21 Protocol xxx not supported or disabled in libcurl + + When passing on a URL to curl to use, it may respond that the particular + protocol is not supported or disabled. The particular way this error message + is phrased is because curl doesn't make a distinction internally of whether + a particular protocol is not supported (i.e. never got any code added that + knows how to speak that protocol) or if it was explicitly disabled. curl can + be built to only support a given set of protocols, and the rest would then + be disabled or not supported. + + Note that this error will also occur if you pass a wrongly spelled protocol + part as in "htpt://example.com" or as in the less evident case if you prefix + the protocol part with a space as in " http://example.com/". + + 3.22 curl -X gives me HTTP problems + + In normal circumstances, -X should hardly ever be used. + + By default you use curl without explicitly saying which request method to + use when the URL identifies a HTTP transfer. If you just pass in a URL like + "curl http://example.com" it will use GET. If you use -d or -F curl will use + POST, -I will cause a HEAD and -T will make it a PUT. + + If for whatever reason you're not happy with these default choices that curl + does for you, you can override those request methods by specifying -X + [WHATEVER]. This way you can for example send a DELETE by doing "curl -X + DELETE [URL]". + + It is thus pointless to do "curl -XGET [URL]" as GET would be used + anyway. In the same vein it is pointless to do "curl -X POST -d data + [URL]"... But you can make a fun and somewhat rare request that sends a + request-body in a GET request with something like "curl -X GET -d data + [URL]" + + Note that -X doesn't actually change curl's behavior as it only modifies the + actual string sent in the request, but that may of course trigger a + different set of events. + + Accordingly, by using -XPOST on a command line that for example would follow + a 303 redirect, you will effectively prevent curl from behaving + correctly. Be aware. + + +4. Running Problems + + 4.1 Problems connecting to SSL servers. + + It took a very long time before we could sort out why curl had problems to + connect to certain SSL servers when using SSLeay or OpenSSL v0.9+. The + error sometimes showed up similar to: + + 16570:error:1407D071:SSL routines:SSL2_READ:bad mac decode:s2_pkt.c:233: + + It turned out to be because many older SSL servers don't deal with SSLv3 + requests properly. To correct this problem, tell curl to select SSLv2 from + the command line (-2/--sslv2). + + There have also been examples where the remote server didn't like the SSLv2 + request and instead you had to force curl to use SSLv3 with -3/--sslv3. + + 4.2 Why do I get problems when I use & or % in the URL? + + In general unix shells, the & symbol is treated specially and when used, it + runs the specified command in the background. To safely send the & as a part + of a URL, you should quote the entire URL by using single (') or double (") + quotes around it. Similar problems can also occur on some shells with other + characters, including ?*!$~(){}<>\|;`. When in doubt, quote the URL. + + An example that would invoke a remote CGI that uses &-symbols could be: + + curl 'http://www.altavista.com/cgi-bin/query?text=yes&q=curl' + + In Windows, the standard DOS shell treats the percent sign specially and you + need to use TWO percent signs for each single one you want to use in the + URL. + + If you want a literal percent sign to be part of the data you pass in a POST + using -d/--data you must encode it as '%25' (which then also needs the + percent sign doubled on Windows machines). + + 4.3 How can I use {, }, [ or ] to specify multiple URLs? + + Because those letters have a special meaning to the shell, to be used in + a URL specified to curl you must quote them. + + An example that downloads two URLs (sequentially) would be: + + curl '{curl,www}.haxx.se' + + To be able to use those characters as actual parts of the URL (without using + them for the curl URL "globbing" system), use the -g/--globoff option: + + curl -g 'www.site.com/weirdname[].html' + + 4.4 Why do I get downloaded data even though the web page doesn't exist? + + Curl asks remote servers for the page you specify. If the page doesn't exist + at the server, the HTTP protocol defines how the server should respond and + that means that headers and a "page" will be returned. That's simply how + HTTP works. + + By using the --fail option you can tell curl explicitly to not get any data + if the HTTP return code doesn't say success. + + 4.5 Why do I get return code XXX from a HTTP server? + + RFC2616 clearly explains the return codes. This is a short transcript. Go + read the RFC for exact details: + + 4.5.1 "400 Bad Request" + + The request could not be understood by the server due to malformed + syntax. The client SHOULD NOT repeat the request without modifications. + + 4.5.2 "401 Unauthorized" + + The request requires user authentication. + + 4.5.3 "403 Forbidden" + + The server understood the request, but is refusing to fulfil it. + Authorization will not help and the request SHOULD NOT be repeated. + + 4.5.4 "404 Not Found" + + The server has not found anything matching the Request-URI. No indication + is given of whether the condition is temporary or permanent. + + 4.5.5 "405 Method Not Allowed" + + The method specified in the Request-Line is not allowed for the resource + identified by the Request-URI. The response MUST include an Allow header + containing a list of valid methods for the requested resource. + + 4.5.6 "301 Moved Permanently" + + If you get this return code and an HTML output similar to this: + +

Moved Permanently

The document has moved here. + + it might be because you request a directory URL but without the trailing + slash. Try the same operation again _with_ the trailing URL, or use the + -L/--location option to follow the redirection. + + 4.6 Can you tell me what error code 142 means? + + All curl error codes are described at the end of the man page, in the + section called "EXIT CODES". + + Error codes that are larger than the highest documented error code means + that curl has exited due to a crash. This is a serious error, and we + appreciate a detailed bug report from you that describes how we could go + ahead and repeat this! + + 4.7 How do I keep user names and passwords secret in Curl command lines? + + This problem has two sides: + + The first part is to avoid having clear-text passwords in the command line + so that they don't appear in 'ps' outputs and similar. That is easily + avoided by using the "-K" option to tell curl to read parameters from a file + or stdin to which you can pass the secret info. curl itself will also + attempt to "hide" the given password by blanking out the option - this + doesn't work on all platforms. + + To keep the passwords in your account secret from the rest of the world is + not a task that curl addresses. You could of course encrypt them somehow to + at least hide them from being read by human eyes, but that is not what + anyone would call security. + + Also note that regular HTTP (using Basic authentication) and FTP passwords + are sent in clear across the network. All it takes for anyone to fetch them + is to listen on the network. Eavesdropping is very easy. Use more secure + authentication methods (like Digest, Negotiate or even NTLM) or consider the + SSL-based alternatives HTTPS and FTPS. + + 4.8 I found a bug! + + It is not a bug if the behavior is documented. Read the docs first. + Especially check out the KNOWN_BUGS file, it may be a documented bug! + + If it is a problem with a binary you've downloaded or a package for your + particular platform, try contacting the person who built the package/archive + you have. + + If there is a bug, read the BUGS document first. Then report it as described + in there. + + 4.9 Curl can't authenticate to the server that requires NTLM? + + NTLM support requires OpenSSL, GnuTLS, mbedTLS, NSS, Secure Transport, or + Microsoft Windows libraries at build-time to provide this functionality. + + NTLM is a Microsoft proprietary protocol. Proprietary formats are evil. You + should not use such ones. + + 4.10 My HTTP request using HEAD, PUT or DELETE doesn't work! + + Many web servers allow or demand that the administrator configures the + server properly for these requests to work on the web server. + + Some servers seem to support HEAD only on certain kinds of URLs. + + To fully grasp this, try the documentation for the particular server + software you're trying to interact with. This is not anything curl can do + anything about. + + 4.11 Why does my HTTP range requests return the full document? + + Because the range may not be supported by the server, or the server may + choose to ignore it and return the full document anyway. + + 4.12 Why do I get "certificate verify failed" ? + + You invoke curl 7.10 or later to communicate on a https:// URL and get an + error back looking something similar to this: + + curl: (35) SSL: error:14090086:SSL routines: + SSL3_GET_SERVER_CERTIFICATE:certificate verify failed + + Then it means that curl couldn't verify that the server's certificate was + good. Curl verifies the certificate using the CA cert bundle that comes with + the curl installation. + + To disable the verification (which makes it act like curl did before 7.10), + use -k. This does however enable man-in-the-middle attacks. + + If you get this failure but are having a CA cert bundle installed and used, + the server's certificate is not signed by one of the CA's in the bundle. It + might for example be self-signed. You then correct this problem by obtaining + a valid CA cert for the server. Or again, decrease the security by disabling + this check. + + Details are also in the SSLCERTS file in the release archives, found online + here: https://curl.haxx.se/docs/sslcerts.html + + 4.13 Why is curl -R on Windows one hour off? + + Since curl 7.53.0 this issue should be fixed as long as curl was built with + any modern compiler that allows for a 64-bit curl_off_t type. For older + compilers or prior curl versions it may set a time that appears one hour off. + This happens due to a flaw in how Windows stores and uses file modification + times and it is not easily worked around. For more details read this: + http://www.codeproject.com/datetime/dstbugs.asp + + 4.14 Redirects work in browser but not with curl! + + curl supports HTTP redirects well (see item 3.8). Browsers generally support + at least two other ways to perform redirects that curl does not: + + Meta tags. You can write a HTML tag that will cause the browser to redirect + to another given URL after a certain time. + + Javascript. You can write a Javascript program embedded in a HTML page that + redirects the browser to another given URL. + + There is no way to make curl follow these redirects. You must either + manually figure out what the page is set to do, or you write a script that + parses the results and fetches the new URL. + + 4.15 FTPS doesn't work + + curl supports FTPS (sometimes known as FTP-SSL) both implicit and explicit + mode. + + When a URL is used that starts with FTPS://, curl assumes implicit SSL on + the control connection and will therefore immediately connect and try to + speak SSL. FTPS:// connections default to port 990. + + To use explicit FTPS, you use a FTP:// URL and the --ftp-ssl option (or one + of its related flavours). This is the most common method, and the one + mandated by RFC4217. This kind of connection will then of course use the + standard FTP port 21 by default. + + 4.16 My HTTP POST or PUT requests are slow! + + libcurl makes all POST and PUT requests (except for POST requests with a + very tiny request body) use the "Expect: 100-continue" header. This header + allows the server to deny the operation early so that libcurl can bail out + before having to send any data. This is useful in authentication + cases and others. + + However, many servers don't implement the Expect: stuff properly and if the + server doesn't respond (positively) within 1 second libcurl will continue + and send off the data anyway. + + You can disable libcurl's use of the Expect: header the same way you disable + any header, using -H / CURLOPT_HTTPHEADER, or by forcing it to use HTTP 1.0. + + 4.17 Non-functional connect timeouts + + In most Windows setups having a timeout longer than 21 seconds make no + difference, as it will only send 3 TCP SYN packets and no more. The second + packet sent three seconds after the first and the third six seconds after + the second. No more than three packets are sent, no matter how long the + timeout is set. + + See option TcpMaxConnectRetransmissions on this page: + https://support.microsoft.com/en-us/kb/175523/en-us + + Also, even on non-Windows systems there may run a firewall or anti-virus + software or similar that accepts the connection but does not actually do + anything else. This will make (lib)curl to consider the connection connected + and thus the connect timeout won't trigger. + + 4.18 file:// URLs containing drive letters (Windows, NetWare) + + When using curl to try to download a local file, one might use a URL + in this format: + + file://D:/blah.txt + + You'll find that even if D:\blah.txt does exist, curl returns a 'file + not found' error. + + According to RFC 1738 (https://www.ietf.org/rfc/rfc1738.txt), + file:// URLs must contain a host component, but it is ignored by + most implementations. In the above example, 'D:' is treated as the + host component, and is taken away. Thus, curl tries to open '/blah.txt'. + If your system is installed to drive C:, that will resolve to 'C:\blah.txt', + and if that doesn't exist you will get the not found error. + + To fix this problem, use file:// URLs with *three* leading slashes: + + file:///D:/blah.txt + + Alternatively, if it makes more sense, specify 'localhost' as the host + component: + + file://localhost/D:/blah.txt + + In either case, curl should now be looking for the correct file. + + 4.19 Why doesn't curl return an error when the network cable is unplugged? + + Unplugging a cable is not an error situation. The TCP/IP protocol stack + was designed to be fault tolerant, so even though there may be a physical + break somewhere the connection shouldn't be affected, just possibly + delayed. Eventually, the physical break will be fixed or the data will be + re-routed around the physical problem through another path. + + In such cases, the TCP/IP stack is responsible for detecting when the + network connection is irrevocably lost. Since with some protocols it is + perfectly legal for the client to wait indefinitely for data, the stack may + never report a problem, and even when it does, it can take up to 20 minutes + for it to detect an issue. The curl option --keepalive-time enables + keep-alive support in the TCP/IP stack which makes it periodically probe the + connection to make sure it is still available to send data. That should + reliably detect any TCP/IP network failure. + + But even that won't detect the network going down before the TCP/IP + connection is established (e.g. during a DNS lookup) or using protocols that + don't use TCP. To handle those situations, curl offers a number of timeouts + on its own. --speed-limit/--speed-time will abort if the data transfer rate + falls too low, and --connect-timeout and --max-time can be used to put an + overall timeout on the connection phase or the entire transfer. + + A libcurl-using application running in a known physical environment (e.g. + an embedded device with only a single network connection) may want to act + immediately if its lone network connection goes down. That can be achieved + by having the application monitor the network connection on its own using an + OS-specific mechanism, then signalling libcurl to abort (see also item 5.13). + + 4.20 curl doesn't return error for HTTP non-200 responses! + + Correct. Unless you use -f (--fail). + + When doing HTTP transfers, curl will perform exactly what you're asking it + to do and if successful it will not return an error. You can use curl to + test your web server's "file not found" page (that gets 404 back), you can + use it to check your authentication protected web pages (that gets a 401 + back) and so on. + + The specific HTTP response code does not constitute a problem or error for + curl. It simply sends and delivers HTTP as you asked and if that worked, + everything is fine and dandy. The response code is generally providing more + higher level error information that curl doesn't care about. The error was + not in the HTTP transfer. + + If you want your command line to treat error codes in the 400 and up range + as errors and thus return a non-zero value and possibly show an error + message, curl has a dedicated option for that: -f (CURLOPT_FAILONERROR in + libcurl speak). + + You can also use the -w option and the variable %{response_code} to extract + the exact response code that was returned in the response. + + 4.21 Why is there a HTTP/1.1 in my HTTP/2 request? + + If you use verbose to see the HTTP request when you send off a HTTP/2 + request, it will still say 1.1. + + The reason for this is that we first generate the request to send using the + old 1.1 style and show that request in the verbose output, and then we + convert it over to the binary header-compressed HTTP/2 style. The actual + "1.1" part from that request is then not actually used in the transfer. + The binary HTTP/2 headers are not human readable. + +5. libcurl Issues + + 5.1 Is libcurl thread-safe? + + Yes. + + We have written the libcurl code specifically adjusted for multi-threaded + programs. libcurl will use thread-safe functions instead of non-safe ones if + your system has such. Note that you must never share the same handle in + multiple threads. + + There may be some exceptions to thread safety depending on how libcurl was + built. Please review the guidelines for thread safety to learn more: + https://curl.haxx.se/libcurl/c/threadsafe.html + + 5.2 How can I receive all data into a large memory chunk? + + [ See also the examples/getinmemory.c source ] + + You are in full control of the callback function that gets called every time + there is data received from the remote server. You can make that callback do + whatever you want. You do not have to write the received data to a file. + + One solution to this problem could be to have a pointer to a struct that you + pass to the callback function. You set the pointer using the + CURLOPT_WRITEDATA option. Then that pointer will be passed to the callback + instead of a FILE * to a file: + + /* imaginary struct */ + struct MemoryStruct { + char *memory; + size_t size; + }; + + /* imaginary callback function */ + size_t + WriteMemoryCallback(void *ptr, size_t size, size_t nmemb, void *data) + { + size_t realsize = size * nmemb; + struct MemoryStruct *mem = (struct MemoryStruct *)data; + + mem->memory = (char *)realloc(mem->memory, mem->size + realsize + 1); + if (mem->memory) { + memcpy(&(mem->memory[mem->size]), ptr, realsize); + mem->size += realsize; + mem->memory[mem->size] = 0; + } + return realsize; + } + + 5.3 How do I fetch multiple files with libcurl? + + libcurl has excellent support for transferring multiple files. You should + just repeatedly set new URLs with curl_easy_setopt() and then transfer it + with curl_easy_perform(). The handle you get from curl_easy_init() is not + only reusable, but you're even encouraged to reuse it if you can, as that + will enable libcurl to use persistent connections. + + 5.4 Does libcurl do Winsock initialization on win32 systems? + + Yes, if told to in the curl_global_init() call. + + 5.5 Does CURLOPT_WRITEDATA and CURLOPT_READDATA work on win32 ? + + Yes, but you cannot open a FILE * and pass the pointer to a DLL and have + that DLL use the FILE * (as the DLL and the client application cannot access + each others' variable memory areas). If you set CURLOPT_WRITEDATA you must + also use CURLOPT_WRITEFUNCTION as well to set a function that writes the + file, even if that simply writes the data to the specified FILE *. + Similarly, if you use CURLOPT_READDATA you must also specify + CURLOPT_READFUNCTION. + + 5.6 What about Keep-Alive or persistent connections? + + curl and libcurl have excellent support for persistent connections when + transferring several files from the same server. Curl will attempt to reuse + connections for all URLs specified on the same command line/config file, and + libcurl will reuse connections for all transfers that are made using the + same libcurl handle. + + When you use the easy interface the connection cache is kept within the easy + handle. If you instead use the multi interface, the connection cache will be + kept within the multi handle and will be shared among all the easy handles + that are used within the same multi handle. + + 5.7 Link errors when building libcurl on Windows! + + You need to make sure that your project, and all the libraries (both static + and dynamic) that it links against, are compiled/linked against the same run + time library. + + This is determined by the /MD, /ML, /MT (and their corresponding /M?d) + options to the command line compiler. /MD (linking against MSVCRT dll) seems + to be the most commonly used option. + + When building an application that uses the static libcurl library, you must + add -DCURL_STATICLIB to your CFLAGS. Otherwise the linker will look for + dynamic import symbols. If you're using Visual Studio, you need to instead + add CURL_STATICLIB in the "Preprocessor Definitions" section. + + If you get linker error like "unknown symbol __imp__curl_easy_init ..." you + have linked against the wrong (static) library. If you want to use the + libcurl.dll and import lib, you don't need any extra CFLAGS, but use one of + the import libraries below. These are the libraries produced by the various + lib/Makefile.* files: + + Target: static lib. import lib for libcurl*.dll. + ----------------------------------------------------------- + MingW: libcurl.a libcurldll.a + MSVC (release): libcurl.lib libcurl_imp.lib + MSVC (debug): libcurld.lib libcurld_imp.lib + Borland: libcurl.lib libcurl_imp.lib + + 5.8 libcurl.so.X: open failed: No such file or directory + + This is an error message you might get when you try to run a program linked + with a shared version of libcurl and your run-time linker (ld.so) couldn't + find the shared library named libcurl.so.X. (Where X is the number of the + current libcurl ABI, typically 3 or 4). + + You need to make sure that ld.so finds libcurl.so.X. You can do that + multiple ways, and it differs somewhat between different operating systems, + but they are usually: + + * Add an option to the linker command line that specify the hard-coded path + the run-time linker should check for the lib (usually -R) + + * Set an environment variable (LD_LIBRARY_PATH for example) where ld.so + should check for libs + + * Adjust the system's config to check for libs in the directory where you've + put the dir (like Linux's /etc/ld.so.conf) + + 'man ld.so' and 'man ld' will tell you more details + + 5.9 How does libcurl resolve host names? + + libcurl supports a large a number of different name resolve functions. One + of them is picked at build-time and will be used unconditionally. Thus, if + you want to change name resolver function you must rebuild libcurl and tell + it to use a different function. + + - The non-IPv6 resolver that can use one of four different host name resolve + calls (depending on what your system supports): + + A - gethostbyname() + B - gethostbyname_r() with 3 arguments + C - gethostbyname_r() with 5 arguments + D - gethostbyname_r() with 6 arguments + + - The IPv6-resolver that uses getaddrinfo() + + - The c-ares based name resolver that uses the c-ares library for resolves. + Using this offers asynchronous name resolves. + + - The threaded resolver (default option on Windows). It uses: + + A - gethostbyname() on plain IPv4 hosts + B - getaddrinfo() on IPv6 enabled hosts + + Also note that libcurl never resolves or reverse-lookups addresses given as + pure numbers, such as 127.0.0.1 or ::1. + + 5.10 How do I prevent libcurl from writing the response to stdout? + + libcurl provides a default built-in write function that writes received data + to stdout. Set the CURLOPT_WRITEFUNCTION to receive the data, or possibly + set CURLOPT_WRITEDATA to a different FILE * handle. + + 5.11 How do I make libcurl not receive the whole HTTP response? + + You make the write callback (or progress callback) return an error and + libcurl will then abort the transfer. + + 5.12 Can I make libcurl fake or hide my real IP address? + + No. libcurl operates on a higher level. Besides, faking IP address would + imply sending IP packets with a made-up source address, and then you normally + get a problem with receiving the packet sent back as they would then not be + routed to you! + + If you use a proxy to access remote sites, the sites will not see your local + IP address but instead the address of the proxy. + + Also note that on many networks NATs or other IP-munging techniques are used + that makes you see and use a different IP address locally than what the + remote server will see you coming from. You may also consider using + https://www.torproject.org/ . + + 5.13 How do I stop an ongoing transfer? + + With the easy interface you make sure to return the correct error code from + one of the callbacks, but none of them are instant. There is no function you + can call from another thread or similar that will stop it immediately. + Instead, you need to make sure that one of the callbacks you use returns an + appropriate value that will stop the transfer. Suitable callbacks that you + can do this with include the progress callback, the read callback and the + write callback. + + If you're using the multi interface, you can also stop a transfer by + removing the particular easy handle from the multi stack at any moment you + think the transfer is done or when you wish to abort the transfer. + + 5.14 Using C++ non-static functions for callbacks? + + libcurl is a C library, it doesn't know anything about C++ member functions. + + You can overcome this "limitation" with relative ease using a static + member function that is passed a pointer to the class: + + // f is the pointer to your object. + static size_t YourClass::func(void *buffer, size_t sz, size_t n, void *f) + { + // Call non-static member function. + static_cast(f)->nonStaticFunction(); + } + + // This is how you pass pointer to the static function: + curl_easy_setopt(hcurl, CURLOPT_WRITEFUNCTION, YourClass::func); + curl_easy_setopt(hcurl, CURLOPT_WRITEDATA, this); + + 5.15 How do I get an FTP directory listing? + + If you end the FTP URL you request with a slash, libcurl will provide you + with a directory listing of that given directory. You can also set + CURLOPT_CUSTOMREQUEST to alter what exact listing command libcurl would use + to list the files. + + The follow-up question tends to be how is a program supposed to parse the + directory listing. How does it know what's a file and what's a dir and what's + a symlink etc. If the FTP server supports the MLSD command then it will + return data in a machine-readable format that can be parsed for type. The + types are specified by RFC3659 section 7.5.1. If MLSD is not supported then + you have to work with what you're given. The LIST output format is entirely + at the server's own liking and the NLST output doesn't reveal any types and + in many cases doesn't even include all the directory entries. Also, both LIST + and NLST tend to hide unix-style hidden files (those that start with a dot) + by default so you need to do "LIST -a" or similar to see them. + + Example - List only directories. + ftp.funet.fi supports MLSD and ftp.kernel.org does not: + + curl -s ftp.funet.fi/pub/ -X MLSD | \ + perl -lne 'print if s/(?:^|;)type=dir;[^ ]+ (.+)$/$1/' + + curl -s ftp.kernel.org/pub/linux/kernel/ | \ + perl -lne 'print if s/^d[-rwx]{9}(?: +[^ ]+){7} (.+)$/$1/' + + If you need to parse LIST output in libcurl one such existing + list parser is available at https://cr.yp.to/ftpparse.html Versions of + libcurl since 7.21.0 also provide the ability to specify a wildcard to + download multiple files from one FTP directory. + + 5.16 I want a different time-out! + + Time and time again users realize that CURLOPT_TIMEOUT and + CURLOPT_CONNECTIMEOUT are not sufficiently advanced or flexible to cover all + the various use cases and scenarios applications end up with. + + libcurl offers many more ways to time-out operations. A common alternative + is to use the CURLOPT_LOW_SPEED_LIMIT and CURLOPT_LOW_SPEED_TIME options to + specify the lowest possible speed to accept before to consider the transfer + timed out. + + The most flexible way is by writing your own time-out logic and using + CURLOPT_PROGRESSFUNCTION (perhaps in combination with other callbacks) and + use that to figure out exactly when the right condition is met when the + transfer should get stopped. + + 5.17 Can I write a server with libcurl? + + No. libcurl offers no functions or building blocks to build any kind of + internet protocol server. libcurl is only a client-side library. For server + libraries, you need to continue your search elsewhere but there exist many + good open source ones out there for most protocols you could possibly want a + server for. And there are really good stand-alone ones that have been tested + and proven for many years. There's no need for you to reinvent them! + + 5.18 Does libcurl use threads? + + Put simply: no, libcurl will execute in the same thread you call it in. All + callbacks will be called in the same thread as the one you call libcurl in. + + If you want to avoid your thread to be blocked by the libcurl call, you make + sure you use the non-blocking API which will do transfers asynchronously - + but still in the same single thread. + + libcurl will potentially internally use threads for name resolving, if it + was built to work like that, but in those cases it'll create the child + threads by itself and they will only be used and then killed internally by + libcurl and never exposed to the outside. + +6. License Issues + + Curl and libcurl are released under a MIT/X derivate license. The license is + very liberal and should not impose a problem for your project. This section + is just a brief summary for the cases we get the most questions. (Parts of + this section was much enhanced by Bjorn Reese.) + + We are not lawyers and this is not legal advice. You should probably consult + one if you want true and accurate legal insights without our prejudice. Note + especially that this section concerns the libcurl license only; compiling in + features of libcurl that depend on other libraries (e.g. OpenSSL) may affect + the licensing obligations of your application. + + 6.1 I have a GPL program, can I use the libcurl library? + + Yes! + + Since libcurl may be distributed under the MIT/X derivate license, it can be + used together with GPL in any software. + + 6.2 I have a closed-source program, can I use the libcurl library? + + Yes! + + libcurl does not put any restrictions on the program that uses the library. + + 6.3 I have a BSD licensed program, can I use the libcurl library? + + Yes! + + libcurl does not put any restrictions on the program that uses the library. + + 6.4 I have a program that uses LGPL libraries, can I use libcurl? + + Yes! + + The LGPL license doesn't clash with other licenses. + + 6.5 Can I modify curl/libcurl for my program and keep the changes secret? + + Yes! + + The MIT/X derivate license practically allows you to do almost anything with + the sources, on the condition that the copyright texts in the sources are + left intact. + + 6.6 Can you please change the curl/libcurl license to XXXX? + + No. + + We have carefully picked this license after years of development and + discussions and a large amount of people have contributed with source code + knowing that this is the license we use. This license puts the restrictions + we want on curl/libcurl and it does not spread to other programs or + libraries that use it. It should be possible for everyone to use libcurl or + curl in their projects, no matter what license they already have in use. + + 6.7 What are my obligations when using libcurl in my commercial apps? + + Next to none. All you need to adhere to is the MIT-style license (stated in + the COPYING file) which basically says you have to include the copyright + notice in "all copies" and that you may not use the copyright holder's name + when promoting your software. + + You do not have to release any of your source code. + + You do not have to reveal or make public any changes to the libcurl source + code. + + You do not have to broadcast to the world that you are using libcurl within + your app. + + All we ask is that you disclose "the copyright notice and this permission + notice" somewhere. Most probably like in the documentation or in the section + where other third party dependencies already are mentioned and acknowledged. + + As can be seen here: https://curl.haxx.se/docs/companies.html and elsewhere, + more and more companies are discovering the power of libcurl and take + advantage of it even in commercial environments. + + +7. PHP/CURL Issues + + 7.1 What is PHP/CURL? + + The module for PHP that makes it possible for PHP programs to access curl- + functions from within PHP. + + In the cURL project we call this module PHP/CURL to differentiate it from + curl the command line tool and libcurl the library. The PHP team however + does not refer to it like this (for unknown reasons). They call it plain + CURL (often using all caps) or sometimes ext/curl, but both cause much + confusion to users which in turn gives us a higher question load. + + 7.2 Who wrote PHP/CURL? + + PHP/CURL was initially written by Sterling Hughes. + + 7.3 Can I perform multiple requests using the same handle? + + Yes - at least in PHP version 4.3.8 and later (this has been known to not + work in earlier versions, but the exact version when it started to work is + unknown to me). + + After a transfer, you just set new options in the handle and make another + transfer. This will make libcurl re-use the same connection if it can. + + 7.4 Does PHP/CURL have dependencies? + + PHP/CURL is a module that comes with the regular PHP package. It depends on + and uses libcurl, so you need to have libcurl installed properly before + PHP/CURL can be used. diff --git a/deps-win32/curl-7.54.1/docs/FEATURES b/deps-win32/curl-7.54.1/docs/FEATURES new file mode 100644 index 0000000..39ac390 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/FEATURES @@ -0,0 +1,206 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +FEATURES + +curl tool + - config file support + - multiple URLs in a single command line + - range "globbing" support: [0-13], {one,two,three} + - multiple file upload on a single command line + - custom maximum transfer rate + - redirectable stderr + - metalink support (*13) + +libcurl + - full URL syntax with no length limit + - custom maximum download time + - custom least download speed acceptable + - custom output result after completion + - guesses protocol from host name unless specified + - uses .netrc + - progress bar with time statistics while downloading + - "standard" proxy environment variables support + - compiles on win32 (reported builds on 40+ operating systems) + - selectable network interface for outgoing traffic + - IPv6 support on unix and Windows + - persistent connections + - socks 4 + 5 support, with or without local name resolving + - supports user name and password in proxy environment variables + - operations through proxy "tunnel" (using CONNECT) + - support for large files (>2GB and >4GB) during upload and download + - replaceable memory functions (malloc, free, realloc, etc) + - asynchronous name resolving (*6) + - both a push and a pull style interface + - international domain names (*11) + +HTTP + - HTTP/1.1 compliant (optionally uses 1.0) + - GET + - PUT + - HEAD + - POST + - Pipelining + - multipart formpost (RFC1867-style) + - authentication: Basic, Digest, NTLM (*9) and Negotiate (SPNEGO) (*3) + to server and proxy + - resume (both GET and PUT) + - follow redirects + - maximum amount of redirects to follow + - custom HTTP request + - cookie get/send fully parsed + - reads/writes the netscape cookie file format + - custom headers (replace/remove internally generated headers) + - custom user-agent string + - custom referrer string + - range + - proxy authentication + - time conditions + - via http-proxy + - retrieve file modification date + - Content-Encoding support for deflate and gzip + - "Transfer-Encoding: chunked" support in uploads + - data compression (*12) + - HTTP/2 (*5) + +HTTPS (*1) + - (all the HTTP features) + - using client certificates + - verify server certificate + - via http-proxy + - select desired encryption + - force usage of a specific SSL version (SSLv2 (*7), SSLv3 (*10) or TLSv1) + +FTP + - download + - authentication + - Kerberos 5 (*14) + - active/passive using PORT, EPRT, PASV or EPSV + - single file size information (compare to HTTP HEAD) + - 'type=' URL support + - dir listing + - dir listing names-only + - upload + - upload append + - upload via http-proxy as HTTP PUT + - download resume + - upload resume + - custom ftp commands (before and/or after the transfer) + - simple "range" support + - via http-proxy + - all operations can be tunneled through a http-proxy + - customizable to retrieve file modification date + - no dir depth limit + +FTPS (*1) + - implicit ftps:// support that use SSL on both connections + - explicit "AUTH TLS" and "AUTH SSL" usage to "upgrade" plain ftp:// + connection to use SSL for both or one of the connections + +SCP (*8) + - both password and public key auth + +SFTP (*8) + - both password and public key auth + - with custom commands sent before/after the transfer + +TFTP + - download + - upload + +TELNET + - connection negotiation + - custom telnet options + - stdin/stdout I/O + +LDAP (*2) + - full LDAP URL support + +DICT + - extended DICT URL support + +FILE + - URL support + - upload + - resume + +SMB + - SMBv1 over TCP and SSL + - download + - upload + - authentication with NTLMv1 + +SMTP + - authentication: Plain, Login, CRAM-MD5, Digest-MD5, NTLM (*9), Kerberos 5 + (*4) and External. + - send e-mails + - mail from support + - mail size support + - mail auth support for trusted server-to-server relaying + - multiple recipients + - via http-proxy + +SMTPS (*1) + - implicit smtps:// support + - explicit "STARTTLS" usage to "upgrade" plain smtp:// connections to use SSL + - via http-proxy + +POP3 + - authentication: Clear Text, APOP and SASL + - SASL based authentication: Plain, Login, CRAM-MD5, Digest-MD5, NTLM (*9), + Kerberos 5 (*4) and External. + - list e-mails + - retrieve e-mails + - enhanced command support for: CAPA, DELE, TOP, STAT, UIDL and NOOP via + custom requests + - via http-proxy + +POP3S (*1) + - implicit pop3s:// support + - explicit "STLS" usage to "upgrade" plain pop3:// connections to use SSL + - via http-proxy + +IMAP + - authentication: Clear Text and SASL + - SASL based authentication: Plain, Login, CRAM-MD5, Digest-MD5, NTLM (*9), + Kerberos 5 (*4) and External. + - list the folders of a mailbox + - select a mailbox with support for verifying the UIDVALIDITY + - fetch e-mails with support for specifying the UID and SECTION + - upload e-mails via the append command + - enhanced command support for: EXAMINE, CREATE, DELETE, RENAME, STATUS, + STORE, COPY and UID via custom requests + - via http-proxy + +IMAPS (*1) + - implicit imaps:// support + - explicit "STARTTLS" usage to "upgrade" plain imap:// connections to use SSL + - via http-proxy + +FOOTNOTES +========= + + *1 = requires OpenSSL, GnuTLS, NSS, yassl, axTLS, PolarSSL, WinSSL (native + Windows), Secure Transport (native iOS/OS X) or GSKit (native IBM i) + *2 = requires OpenLDAP or WinLDAP + *3 = requires a GSS-API implementation (such as Heimdal or MIT Kerberos) or + SSPI (native Windows) + *4 = requires a GSS-API implementation, however, only Windows SSPI is + currently supported + *5 = requires nghttp2 and possibly a recent TLS library + *6 = requires c-ares + *7 = requires OpenSSL, NSS, GSKit, WinSSL or Secure Transport; GnuTLS, for + example, only supports SSLv3 and TLSv1 + *8 = requires libssh2 + *9 = requires OpenSSL, GnuTLS, mbedTLS, NSS, yassl, Secure Transport or SSPI + (native Windows) + *10 = requires any of the SSL libraries in (*1) above other than axTLS, which + does not support SSLv3 + *11 = requires libidn or Windows + *12 = requires libz + *13 = requires libmetalink, and either an Apple or Microsoft operating + system, or OpenSSL, or GnuTLS, or NSS + *14 = requires a GSS-API implementation (such as Heimdal or MIT Kerberos) diff --git a/deps-win32/curl-7.54.1/docs/HISTORY.md b/deps-win32/curl-7.54.1/docs/HISTORY.md new file mode 100644 index 0000000..551e7d2 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/HISTORY.md @@ -0,0 +1,277 @@ +How curl Became Like This +========================= + +Towards the end of 1996, Daniel Stenberg was spending time writing an IRC bot +for an Amiga related channel on EFnet. He then came up with the idea to make +currency-exchange calculations available to Internet Relay Chat (IRC) +users. All the necessary data were published on the Web; he just needed to +automate their retrieval. + +Daniel simply adopted an existing command-line open-source tool, httpget, that +Brazilian Rafael Sagula had written and recently released version 0.1 of. After +a few minor adjustments, it did just what he needed. + +1997 +---- + +HttpGet 1.0 was released on April 8th 1997 with brand new HTTP proxy support. + +We soon found and fixed support for getting currencies over GOPHER. Once FTP +download support was added, the name of the project was changed and urlget 2.0 +was released in August 1997. The http-only days were already passed. + +1998 +---- + +The project slowly grew bigger. When upload capabilities were added and the +name once again was misleading, a second name change was made and on March 20, +1998 curl 4 was released. (The version numbering from the previous names was +kept.) + +(Unrelated to this project a company called Curl Corporation registered a US +trademark on the name "CURL" on May 18 1998. That company had then already +registered the curl.com domain back in November of the previous year. All this +was revealed to us much later.) + +SSL support was added, powered by the SSLeay library. + +August: first announcement of curl on freshmeat.net. + +October: with the curl 4.9 release and the introduction of cookie support, +curl was no longer released under the GPL license. Now we're at 4000 lines of +code, we switched over to the MPL license to restrict the effects of +"copyleft". + +November: configure script and reported successful compiles on several +major operating systems. The never-quite-understood -F option was added and +curl could now simulate quite a lot of a browser. TELNET support was added. + +Curl 5 was released in December 1998 and introduced the first ever curl man +page. People started making Linux RPM packages out of it. + +1999 +---- + +January: DICT support added. + +OpenSSL took over and SSLeay was abandoned. + +May: first Debian package. + +August: LDAP:// and FILE:// support added. The curl web site gets 1300 visits +weekly. Moved site to curl.haxx.nu. + +September: Released curl 6.0. 15000 lines of code. + +December 28: added the project on Sourceforge and started using its services +for managing the project. + +2000 +---- + +Spring: major internal overhaul to provide a suitable library interface. +The first non-beta release was named 7.1 and arrived in August. This offered +the easy interface and turned out to be the beginning of actually getting +other software and programs to be based on and powered by libcurl. Almost +20000 lines of code. + +June: the curl site moves to "curl.haxx.se" + +August, the curl web site gets 4000 visits weekly. + +The PHP guys adopted libcurl already the same month, when the first ever third +party libcurl binding showed up. CURL has been a supported module in PHP since +the release of PHP 4.0.2. This would soon get followers. More than 16 +different bindings exist at the time of this writing. + +September: kerberos4 support was added. + +November: started the work on a test suite for curl. It was later re-written +from scratch again. The libcurl major SONAME number was set to 1. + +2001 +---- + +January: Daniel released curl 7.5.2 under a new license again: MIT (or +MPL). The MIT license is extremely liberal and can be combined with GPL +in other projects. This would finally put an end to the "complaints" from +people involved in GPLed projects that previously were prohibited from using +libcurl while it was released under MPL only. (Due to the fact that MPL is +deemed "GPL incompatible".) + +March 22: curl supports HTTP 1.1 starting with the release of 7.7. This +also introduced libcurl's ability to do persistent connections. 24000 lines of +code. The libcurl major SONAME number was bumped to 2 due to this overhaul. +The first experimental ftps:// support was added. + +August: curl is bundled in Mac OS X, 10.1. It was already becoming more and +more of a standard utility of Linux distributions and a regular in the BSD +ports collections. The curl web site gets 8000 visits weekly. Curl Corporation +contacted Daniel to discuss "the name issue". After Daniel's reply, they have +never since got back in touch again. + +September: libcurl 7.9 introduces cookie jar and curl_formadd(). During the +forthcoming 7.9.x releases, we introduced the multi interface slowly and +without many whistles. + +2002 +---- + +June: the curl web site gets 13000 visits weekly. curl and libcurl is +35000 lines of code. Reported successful compiles on more than 40 combinations +of CPUs and operating systems. + +To estimate number of users of the curl tool or libcurl library is next to +impossible. Around 5000 downloaded packages each week from the main site gives +a hint, but the packages are mirrored extensively, bundled with numerous OS +distributions and otherwise retrieved as part of other software. + +September: with the release of curl 7.10 it is released under the MIT license +only. + +2003 +---- + +January: Started working on the distributed curl tests. The autobuilds. + +February: the curl site averages at 20000 visits weekly. At any given moment, +there's an average of 3 people browsing the curl.haxx.se site. + +Multiple new authentication schemes are supported: Digest (May), NTLM (June) +and Negotiate (June). + +November: curl 7.10.8 is released. 45000 lines of code. ~55000 unique visitors +to the curl.haxx.se site. Five official web mirrors. + +December: full-fledged SSL for FTP is supported. + +2004 +---- + +January: curl 7.11.0 introduced large file support. + +June: curl 7.12.0 introduced IDN support. 10 official web mirrors. + +This release bumped the major SONAME to 3 due to the removal of the +curl_formparse() function + +August: Curl and libcurl 7.12.1 + + Public curl release number: 82 + Releases counted from the very beginning: 109 + Available command line options: 96 + Available curl_easy_setopt() options: 120 + Number of public functions in libcurl: 36 + Amount of public web site mirrors: 12 + Number of known libcurl bindings: 26 + +2005 +---- + +April: GnuTLS can now optionally be used for the secure layer when curl is +built. + +April: Added the multi_socket() API + +September: TFTP support was added. + +More than 100,000 unique visitors of the curl web site. 25 mirrors. + +December: security vulnerability: libcurl URL Buffer Overflow + +2006 +---- + +January: We dropped support for Gopher. We found bugs in the implementation +that turned out to have been introduced years ago, so with the conclusion that +nobody had found out in all this time we removed it instead of fixing it. + +March: security vulnerability: libcurl TFTP Packet Buffer Overflow + +September: The major SONAME number for libcurl was bumped to 4 due to the +removal of ftp third party transfer support. + +November: Added SCP and SFTP support + +2007 +---- + +February: Added support for the Mozilla NSS library to do the SSL/TLS stuff + +July: security vulnerability: libcurl GnuTLS insufficient cert verification + +2008 +---- + +November: + + Command line options: 128 + curl_easy_setopt() options: 158 + Public functions in libcurl: 58 + Known libcurl bindings: 37 + Contributors: 683 + + 145,000 unique visitors. >100 GB downloaded. + +2009 +---- + +March: security vulnerability: libcurl Arbitrary File Access + +August: security vulnerability: libcurl embedded zero in cert name + +December: Added support for IMAP, POP3 and SMTP + +2010 +---- + +January: Added support for RTSP + +February: security vulnerability: libcurl data callback excessive length + +March: The project switched over to use git (hosted by github) instead of CVS +for source code control + +May: Added support for RTMP + +Added support for PolarSSL to do the SSL/TLS stuff + +August: + + Public curl releases: 117 + Command line options: 138 + curl_easy_setopt() options: 180 + Public functions in libcurl: 58 + Known libcurl bindings: 39 + Contributors: 808 + + Gopher support added (re-added actually, see January 2006) + +2012 +---- + + July: Added support for Schannel (native Windows TLS backend) and Darwin SSL + (Native Mac OS X and iOS TLS backend). + + Supports metalink + + October: SSH-agent support. + +2013 +---- + + February: Cleaned up internals to always uses the "multi" non-blocking + approach internally and only expose the blocking API with a wrapper. + + September: First small steps on supporting HTTP/2 with nghttp2. + + October: Removed krb4 support. + + December: Happy eyeballs. + +2014 +---- + + March: first real release supporting HTTP/2 + + September: Web site had 245,000 unique visitors and served 236GB data diff --git a/deps-win32/curl-7.54.1/docs/HTTP-COOKIES.md b/deps-win32/curl-7.54.1/docs/HTTP-COOKIES.md new file mode 100644 index 0000000..a1b2834 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/HTTP-COOKIES.md @@ -0,0 +1,104 @@ +# HTTP Cookies + +## Cookie overview + + Cookies are `name=contents` pairs that a HTTP server tells the client to + hold and then the client sends back those to the server on subsequent + requests to the same domains and paths for which the cookies were set. + + Cookies are either "session cookies" which typically are forgotten when the + session is over which is often translated to equal when browser quits, or + the cookies aren't session cookies they have expiration dates after which + the client will throw them away. + + Cookies are set to the client with the Set-Cookie: header and are sent to + servers with the Cookie: header. + + For a very long time, the only spec explaining how to use cookies was the + original [Netscape spec from 1994](https://curl.haxx.se/rfc/cookie_spec.html). + + In 2011, [RFC6265](https://www.ietf.org/rfc/rfc6265.txt) was finally + published and details how cookies work within HTTP. + +## Cookies saved to disk + + Netscape once created a file format for storing cookies on disk so that they + would survive browser restarts. curl adopted that file format to allow + sharing the cookies with browsers, only to see browsers move away from that + format. Modern browsers no longer use it, while curl still does. + + The netscape cookie file format stores one cookie per physical line in the + file with a bunch of associated meta data, each field separated with + TAB. That file is called the cookiejar in curl terminology. + + When libcurl saves a cookiejar, it creates a file header of its own in which + there is a URL mention that will link to the web version of this document. + +## Cookies with curl the command line tool + + curl has a full cookie "engine" built in. If you just activate it, you can + have curl receive and send cookies exactly as mandated in the specs. + + Command line options: + + `-b, --cookie` + + tell curl a file to read cookies from and start the cookie engine, or if it + isn't a file it will pass on the given string. -b name=var works and so does + -b cookiefile. + + `-j, --junk-session-cookies` + + when used in combination with -b, it will skip all "session cookies" on load + so as to appear to start a new cookie session. + + `-c, --cookie-jar` + + tell curl to start the cookie engine and write cookies to the given file + after the request(s) + +## Cookies with libcurl + + libcurl offers several ways to enable and interface the cookie engine. These + options are the ones provided by the native API. libcurl bindings may offer + access to them using other means. + + `CURLOPT_COOKIE` + + Is used when you want to specify the exact contents of a cookie header to + send to the server. + + `CURLOPT_COOKIEFILE` + + Tell libcurl to activate the cookie engine, and to read the initial set of + cookies from the given file. Read-only. + + `CURLOPT_COOKIEJAR` + + Tell libcurl to activate the cookie engine, and when the easy handle is + closed save all known cookies to the given cookiejar file. Write-only. + + `CURLOPT_COOKIELIST` + + Provide detailed information about a single cookie to add to the internal + storage of cookies. Pass in the cookie as a HTTP header with all the details + set, or pass in a line from a netscape cookie file. This option can also be + used to flush the cookies etc. + + `CURLINFO_COOKIELIST` + + Extract cookie information from the internal cookie storage as a linked + list. + +## Cookies with javascript + + These days a lot of the web is built up by javascript. The webbrowser loads + complete programs that render the page you see. These javascript programs + can also set and access cookies. + + Since curl and libcurl are plain HTTP clients without any knowledge of or + capability to handle javascript, such cookies will not be detected or used. + + Often, if you want to mimic what a browser does on such web sites, you can + record web browser HTTP traffic when using such a site and then repeat the + cookie operations using curl or libcurl. diff --git a/deps-win32/curl-7.54.1/docs/HTTP2.md b/deps-win32/curl-7.54.1/docs/HTTP2.md new file mode 100644 index 0000000..efbe699 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/HTTP2.md @@ -0,0 +1,126 @@ +HTTP/2 with curl +================ + +[HTTP/2 Spec](https://www.rfc-editor.org/rfc/rfc7540.txt) +[http2 explained](https://daniel.haxx.se/http2/) + +Build prerequisites +------------------- + - nghttp2 + - OpenSSL, libressl, BoringSSL, NSS, GnutTLS, mbedTLS, wolfSSL or SChannel + with a new enough version. + +[nghttp2](https://nghttp2.org/) +------------------------------- + +libcurl uses this 3rd party library for the low level protocol handling +parts. The reason for this is that HTTP/2 is much more complex at that layer +than HTTP/1.1 (which we implement on our own) and that nghttp2 is an already +existing and well functional library. + +We require at least version 1.0.0. + +Over an http:// URL +------------------- + +If `CURLOPT_HTTP_VERSION` is set to `CURL_HTTP_VERSION_2_0`, libcurl will +include an upgrade header in the initial request to the host to allow +upgrading to HTTP/2. + +Possibly we can later introduce an option that will cause libcurl to fail if +not possible to upgrade. Possibly we introduce an option that makes libcurl +use HTTP/2 at once over http:// + +Over an https:// URL +-------------------- + +If `CURLOPT_HTTP_VERSION` is set to `CURL_HTTP_VERSION_2_0`, libcurl will use +ALPN (or NPN) to negotiate which protocol to continue with. Possibly introduce +an option that will cause libcurl to fail if not possible to use HTTP/2. + +`CURL_HTTP_VERSION_2TLS` was added in 7.47.0 as a way to ask libcurl to prefer +HTTP/2 for HTTPS but stick to 1.1 by default for plain old HTTP connections. + +ALPN is the TLS extension that HTTP/2 is expected to use. The NPN extension is +for a similar purpose, was made prior to ALPN and is used for SPDY so early +HTTP/2 servers are implemented using NPN before ALPN support is widespread. + +`CURLOPT_SSL_ENABLE_ALPN` and `CURLOPT_SSL_ENABLE_NPN` are offered to allow +applications to explicitly disable ALPN or NPN. + +SSL libs +-------- + +The challenge is the ALPN and NPN support and all our different SSL +backends. You may need a fairly updated SSL library version for it to provide +the necessary TLS features. Right now we support: + + - OpenSSL: ALPN and NPN + - libressl: ALPN and NPN + - BoringSSL: ALPN and NPN + - NSS: ALPN and NPN + - GnuTLS: ALPN + - mbedTLS: ALPN + - SChannel: ALPN + - wolfSSL: ALPN + +Multiplexing +------------ + +Starting in 7.43.0, libcurl fully supports HTTP/2 multiplexing, which is the +term for doing multiple independent transfers over the same physical TCP +connection. + +To take advantage of multiplexing, you need to use the multi interface and set +`CURLMOPT_PIPELINING` to `CURLPIPE_MULTIPLEX`. With that bit set, libcurl will +attempt to re-use existing HTTP/2 connections and just add a new stream over +that when doing subsequent parallel requests. + +While libcurl sets up a connection to a HTTP server there is a period during +which it doesn't know if it can pipeline or do multiplexing and if you add new +transfers in that period, libcurl will default to start new connections for +those transfers. With the new option `CURLOPT_PIPEWAIT` (added in 7.43.0), you +can ask that a transfer should rather wait and see in case there's a +connection for the same host in progress that might end up being possible to +multiplex on. It favours keeping the number of connections low to the cost of +slightly longer time to first byte transferred. + +Applications +------------ + +We hide HTTP/2's binary nature and convert received HTTP/2 traffic to headers +in HTTP 1.1 style. This allows applications to work unmodified. + +curl tool +--------- + +curl offers the `--http2` command line option to enable use of HTTP/2. + +curl offers the `--http2-prior-knowledge` command line option to enable use of +HTTP/2 without HTTP/1.1 Upgrade. + +Since 7.47.0, the curl tool enables HTTP/2 by default for HTTPS connections. + +curl tool limitations +--------------------- + +The command line tool won't do any HTTP/2 multiplexing even though libcurl +supports it, simply because the curl tool is not written to take advantage of +the libcurl API that's necessary for this (the multi interface). We have an +outstanding TODO item for this and **you** can help us make it happen. + +The command line tool also doesn't support HTTP/2 server push for the same +reason it doesn't do multiplexing: it needs to use the multi interface for +that so that multiplexing is supported. + +HTTP Alternative Services +------------------------- + +Alt-Svc is an extension with a corresponding frame (ALTSVC) in HTTP/2 that +tells the client about an alternative "route" to the same content for the same +origin server that you get the response from. A browser or long-living client +can use that hint to create a new connection asynchronously. For libcurl, we +may introduce a way to bring such clues to the application and/or let a +subsequent request use the alternate route automatically. + +[Detailed in RFC 7838](https://tools.ietf.org/html/rfc7838) diff --git a/deps-win32/curl-7.54.1/docs/INSTALL.cmake b/deps-win32/curl-7.54.1/docs/INSTALL.cmake new file mode 100644 index 0000000..61492db --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/INSTALL.cmake @@ -0,0 +1,102 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + + How To Compile with CMake + +Building with CMake +========================== + This document describes how to compile, build and install curl and libcurl + from source code using the CMake build tool. To build with CMake, you will + of course have to first install CMake. The minimum required version of + CMake is specified in the file CMakeLists.txt found in the top of the curl + source tree. Once the correct version of CMake is installed you can follow + the instructions below for the platform you are building on. + + CMake builds can be configured either from the command line, or from one + of CMake's GUI's. + +Current flaws in the curl CMake build +===================================== + + Missing features in the cmake build: + + - Builds libcurl without large file support + - Does not support all SSL libraries (only OpenSSL, WinSSL, DarwinSSL, and + mbed TLS) + - Doesn't build with SCP and SFTP support (libssh2) (see issue #1155) + - Doesn't allow different resolver backends (no c-ares build support) + - No RTMP support built + - Doesn't allow build curl and libcurl debug enabled + - Doesn't allow a custom CA bundle path + - Doesn't allow you to disable specific protocols from the build + - Doesn't find or use krb4 or GSS + - Rebuilds test files too eagerly, but still can't run the tests + - Does't detect the correct strerror_r flavor when cross-compiling (issue #1123) + + +Important notice +================== + If you got your curl sources from a distribution tarball, make sure to + delete the generic 'include/curl/curlbuild.h' file that comes with it: + rm -f curl/include/curl/curlbuild.h + + The purpose of this file is to provide reasonable definitions for systems + where autoconfiguration is not available. CMake will create its own + version of this file in its build directory. If the "generic" version + is not deleted, weird build errors may occur on some systems. + +Command Line CMake +================== + A CMake build of curl is similar to the autotools build of curl. It + consists of the following steps after you have unpacked the source. + + 1. Create an out of source build tree parallel to the curl source + tree and change into that directory + + $ mkdir curl-build + $ cd curl-build + + 2. Run CMake from the build tree, giving it the path to the top of + the curl source tree. CMake will pick a compiler for you. If you + want to specify the compile, you can set the CC environment + variable prior to running CMake. + + $ cmake ../curl + $ make + + 3. Install to default location: + + $ make install + + (The test suite does not work with the cmake build) + +ccmake +========= + CMake comes with a curses based interface called ccmake. To run ccmake on + a curl use the instructions for the command line cmake, but substitute + ccmake ../curl for cmake ../curl. This will bring up a curses interface + with instructions on the bottom of the screen. You can press the "c" key + to configure the project, and the "g" key to generate the project. After + the project is generated, you can run make. + +cmake-gui +========= + CMake also comes with a Qt based GUI called cmake-gui. To configure with + cmake-gui, you run cmake-gui and follow these steps: + 1. Fill in the "Where is the source code" combo box with the path to + the curl source tree. + 2. Fill in the "Where to build the binaries" combo box with the path + to the directory for your build tree, ideally this should not be the + same as the source tree, but a parallel directory called curl-build or + something similar. + 3. Once the source and binary directories are specified, press the + "Configure" button. + 4. Select the native build tool that you want to use. + 5. At this point you can change any of the options presented in the + GUI. Once you have selected all the options you want, click the + "Generate" button. + 6. Run the native build tool that you used CMake to generate. + diff --git a/deps-win32/curl-7.54.1/docs/INSTALL.md b/deps-win32/curl-7.54.1/docs/INSTALL.md new file mode 100644 index 0000000..529a907 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/INSTALL.md @@ -0,0 +1,513 @@ +# how to install curl and libcurl + +## Installing Binary Packages + +Lots of people download binary distributions of curl and libcurl. This +document does not describe how to install curl or libcurl using such a binary +package. This document describes how to compile, build and install curl and +libcurl from source code. + +## Building from git + +If you get your code off a git repository instead of a release tarball, see +the `GIT-INFO` file in the root directory for specific instructions on how to +proceed. + +# Unix + +A normal Unix installation is made in three or four steps (after you've +unpacked the source archive): + + ./configure + make + make test (optional) + make install + +You probably need to be root when doing the last command. + +Get a full listing of all available configure options by invoking it like: + + ./configure --help + +If you want to install curl in a different file hierarchy than `/usr/local`, +specify that when running configure: + + ./configure --prefix=/path/to/curl/tree + +If you have write permission in that directory, you can do 'make install' +without being root. An example of this would be to make a local install in +your own home directory: + + ./configure --prefix=$HOME + make + make install + +The configure script always tries to find a working SSL library unless +explicitly told not to. If you have OpenSSL installed in the default search +path for your compiler/linker, you don't need to do anything special. If you +have OpenSSL installed in /usr/local/ssl, you can run configure like: + + ./configure --with-ssl + +If you have OpenSSL installed somewhere else (for example, /opt/OpenSSL) and +you have pkg-config installed, set the pkg-config path first, like this: + + env PKG_CONFIG_PATH=/opt/OpenSSL/lib/pkgconfig ./configure --with-ssl + +Without pkg-config installed, use this: + + ./configure --with-ssl=/opt/OpenSSL + +If you insist on forcing a build without SSL support, even though you may +have OpenSSL installed in your system, you can run configure like this: + + ./configure --without-ssl + +If you have OpenSSL installed, but with the libraries in one place and the +header files somewhere else, you have to set the LDFLAGS and CPPFLAGS +environment variables prior to running configure. Something like this should +work: + + CPPFLAGS="-I/path/to/ssl/include" LDFLAGS="-L/path/to/ssl/lib" ./configure + +If you have shared SSL libs installed in a directory where your run-time +linker doesn't find them (which usually causes configure failures), you can +provide the -R option to ld on some operating systems to set a hard-coded +path to the run-time linker: + + LDFLAGS=-R/usr/local/ssl/lib ./configure --with-ssl + +## More Options + +To force a static library compile, disable the shared library creation by +running configure like: + + ./configure --disable-shared + +To tell the configure script to skip searching for thread-safe functions, add +an option like: + + ./configure --disable-thread + +If you're a curl developer and use gcc, you might want to enable more debug +options with the `--enable-debug` option. + +curl can be built to use a whole range of libraries to provide various useful +services, and configure will try to auto-detect a decent default. But if you +want to alter it, you can select how to deal with each individual library. + +## Select TLS backend + +The default OpenSSL configure check will also detect and use BoringSSL or +libressl. + + - GnuTLS: `--without-ssl --with-gnutls`. + - Cyassl: `--without-ssl --with-cyassl` + - NSS: `--without-ssl --with-nss` + - PolarSSL: `--without-ssl --with-polarssl` + - mbedTLS: `--without-ssl --with-mbedtls` + - axTLS: `--without-ssl --with-axtls` + - schannel: `--without-ssl --with-winssl` + - secure transport: `--without-ssl --with-darwinssl` + +# Windows + +## Building Windows DLLs and C run-time (CRT) linkage issues + + As a general rule, building a DLL with static CRT linkage is highly + discouraged, and intermixing CRTs in the same app is something to avoid at + any cost. + + Reading and comprehending Microsoft Knowledge Base articles KB94248 and + KB140584 is a must for any Windows developer. Especially important is full + understanding if you are not going to follow the advice given above. + + - [How To Use the C Run-Time](https://support.microsoft.com/kb/94248/en-us) + - [How to link with the correct C Run-Time CRT library](https://support.microsoft.com/kb/140584/en-us) + - [Potential Errors Passing CRT Objects Across DLL Boundaries](https://msdn.microsoft.com/en-us/library/ms235460) + +If your app is misbehaving in some strange way, or it is suffering from +memory corruption, before asking for further help, please try first to +rebuild every single library your app uses as well as your app using the +debug multithreaded dynamic C runtime. + + If you get linkage errors read section 5.7 of the FAQ document. + +## MingW32 + +Make sure that MinGW32's bin dir is in the search path, for example: + + set PATH=c:\mingw32\bin;%PATH% + +then run `mingw32-make mingw32` in the root dir. There are other +make targets available to build libcurl with more features, use: + + - `mingw32-make mingw32-zlib` to build with Zlib support; + - `mingw32-make mingw32-ssl-zlib` to build with SSL and Zlib enabled; + - `mingw32-make mingw32-ssh2-ssl-zlib` to build with SSH2, SSL, Zlib; + - `mingw32-make mingw32-ssh2-ssl-sspi-zlib` to build with SSH2, SSL, Zlib + and SSPI support. + +If you have any problems linking libraries or finding header files, be sure +to verify that the provided "Makefile.m32" files use the proper paths, and +adjust as necessary. It is also possible to override these paths with +environment variables, for example: + + set ZLIB_PATH=c:\zlib-1.2.8 + set OPENSSL_PATH=c:\openssl-1.0.2c + set LIBSSH2_PATH=c:\libssh2-1.6.0 + +It is also possible to build with other LDAP SDKs than MS LDAP; currently +it is possible to build with native Win32 OpenLDAP, or with the Novell CLDAP +SDK. If you want to use these you need to set these vars: + + set LDAP_SDK=c:\openldap + set USE_LDAP_OPENLDAP=1 + +or for using the Novell SDK: + + set USE_LDAP_NOVELL=1 + +If you want to enable LDAPS support then set LDAPS=1. + +## Cygwin + +Almost identical to the unix installation. Run the configure script in the +curl source tree root with `sh configure`. Make sure you have the sh +executable in /bin/ or you'll see the configure fail toward the end. + +Run `make` + +## Borland C++ compiler + +Ensure that your build environment is properly set up to use the compiler and +associated tools. PATH environment variable must include the path to bin +subdirectory of your compiler installation, eg: `c:\Borland\BCC55\bin` + +It is advisable to set environment variable BCCDIR to the base path of the +compiler installation. + + set BCCDIR=c:\Borland\BCC55 + +In order to build a plain vanilla version of curl and libcurl run the +following command from curl's root directory: + + make borland + +To build curl and libcurl with zlib and OpenSSL support set environment +variables `ZLIB_PATH` and `OPENSSL_PATH` to the base subdirectories of the +already built zlib and OpenSSL libraries and from curl's root directory run +command: + + make borland-ssl-zlib + +libcurl library will be built in 'lib' subdirectory while curl tool is built +in 'src' subdirectory. In order to use libcurl library it is advisable to +modify compiler's configuration file bcc32.cfg located in +`c:\Borland\BCC55\bin` to reflect the location of libraries include paths for +example the '-I' line could result in something like: + + -I"c:\Borland\BCC55\include;c:\curl\include;c:\openssl\inc32" + +bcc3.cfg `-L` line could also be modified to reflect the location of of +libcurl library resulting for example: + + -L"c:\Borland\BCC55\lib;c:\curl\lib;c:\openssl\out32" + +In order to build sample program `simple.c` from the docs\examples +subdirectory run following command from mentioned subdirectory: + + bcc32 simple.c libcurl.lib cw32mt.lib + +In order to build sample program simplessl.c an SSL enabled libcurl is +required, as well as the OpenSSL libeay32.lib and ssleay32.lib libraries. + +## Disabling Specific Protocols in Windows builds + +The configure utility, unfortunately, is not available for the Windows +environment, therefore, you cannot use the various disable-protocol options of +the configure utility on this platform. + +However, you can use the following defines to disable specific +protocols: + + - `HTTP_ONLY` disables all protocols except HTTP + - `CURL_DISABLE_FTP` disables FTP + - `CURL_DISABLE_LDAP` disables LDAP + - `CURL_DISABLE_TELNET` disables TELNET + - `CURL_DISABLE_DICT` disables DICT + - `CURL_DISABLE_FILE` disables FILE + - `CURL_DISABLE_TFTP` disables TFTP + - `CURL_DISABLE_HTTP` disables HTTP + - `CURL_DISABLE_IMAP` disables IMAP + - `CURL_DISABLE_POP3` disables POP3 + - `CURL_DISABLE_SMTP` disables SMTP + +If you want to set any of these defines you have the following options: + + - Modify lib/config-win32.h + - Modify lib/curl_setup.h + - Modify winbuild/Makefile.vc + - Modify the "Preprocessor Definitions" in the libcurl project + +Note: The pre-processor settings can be found using the Visual Studio IDE +under "Project -> Settings -> C/C++ -> General" in VC6 and "Project -> +Properties -> Configuration Properties -> C/C++ -> Preprocessor" in later +versions. + +## Using BSD-style lwIP instead of Winsock TCP/IP stack in Win32 builds + +In order to compile libcurl and curl using BSD-style lwIP TCP/IP stack it is +necessary to make definition of preprocessor symbol USE_LWIPSOCK visible to +libcurl and curl compilation processes. To set this definition you have the +following alternatives: + + - Modify lib/config-win32.h and src/config-win32.h + - Modify winbuild/Makefile.vc + - Modify the "Preprocessor Definitions" in the libcurl project + +Note: The pre-processor settings can be found using the Visual Studio IDE +under "Project -> Settings -> C/C++ -> General" in VC6 and "Project -> +Properties -> Configuration Properties -> C/C++ -> Preprocessor" in later +versions. + +Once that libcurl has been built with BSD-style lwIP TCP/IP stack support, in +order to use it with your program it is mandatory that your program includes +lwIP header file `` (or another lwIP header that includes this) +before including any libcurl header. Your program does not need the +`USE_LWIPSOCK` preprocessor definition which is for libcurl internals only. + +Compilation has been verified with [lwIP +1.4.0](http://download.savannah.gnu.org/releases/lwip/lwip-1.4.0.zip) and +[contrib-1.4.0](http://download.savannah.gnu.org/releases/lwip/contrib-1.4.0.zip). + +This BSD-style lwIP TCP/IP stack support must be considered experimental given +that it has been verified that lwIP 1.4.0 still needs some polish, and libcurl +might yet need some additional adjustment, caveat emptor. + +## Important static libcurl usage note + +When building an application that uses the static libcurl library on Windows, +you must add `-DCURL_STATICLIB` to your `CFLAGS`. Otherwise the linker will +look for dynamic import symbols. + +## Legacy Windows and SSL + +WinSSL (specifically SChannel from Windows SSPI), is the native SSL library in +Windows. However, WinSSL in Windows <= XP is unable to connect to servers that +no longer support the legacy handshakes and algorithms used by those +versions. If you will be using curl in one of those earlier versions of +Windows you should choose another SSL backend such as OpenSSL. + +# Apple iOS and Mac OS X + +On modern Apple operating systems, curl can be built to use Apple's SSL/TLS +implementation, Secure Transport, instead of OpenSSL. To build with Secure +Transport for SSL/TLS, use the configure option `--with-darwinssl`. (It is not +necessary to use the option `--without-ssl`.) This feature requires iOS 5.0 or +later, or OS X 10.5 ("Leopard") or later. + +When Secure Transport is in use, the curl options `--cacert` and `--capath` +and their libcurl equivalents, will be ignored, because Secure Transport uses +the certificates stored in the Keychain to evaluate whether or not to trust +the server. This, of course, includes the root certificates that ship with the +OS. The `--cert` and `--engine` options, and their libcurl equivalents, are +currently unimplemented in curl with Secure Transport. + +For OS X users: In OS X 10.8 ("Mountain Lion"), Apple made a major overhaul to +the Secure Transport API that, among other things, added support for the newer +TLS 1.1 and 1.2 protocols. To get curl to support TLS 1.1 and 1.2, you must +build curl on Mountain Lion or later, or by using the equivalent SDK. If you +set the `MACOSX_DEPLOYMENT_TARGET` environmental variable to an earlier +version of OS X prior to building curl, then curl will use the new Secure +Transport API on Mountain Lion and later, and fall back on the older API when +the same curl binary is executed on older cats. For example, running these +commands in curl's directory in the shell will build the code such that it +will run on cats as old as OS X 10.6 ("Snow Leopard") (using bash): + + export MACOSX_DEPLOYMENT_TARGET="10.6" + ./configure --with-darwinssl + make + +# Cross compile + +Download and unpack the curl package. + +'cd' to the new directory. (e.g. `cd curl-7.12.3`) + +Set environment variables to point to the cross-compile toolchain and call +configure with any options you need. Be sure and specify the `--host` and +`--build` parameters at configuration time. The following script is an +example of cross-compiling for the IBM 405GP PowerPC processor using the +toolchain from MonteVista for Hardhat Linux. + + #! /bin/sh + + export PATH=$PATH:/opt/hardhat/devkit/ppc/405/bin + export CPPFLAGS="-I/opt/hardhat/devkit/ppc/405/target/usr/include" + export AR=ppc_405-ar + export AS=ppc_405-as + export LD=ppc_405-ld + export RANLIB=ppc_405-ranlib + export CC=ppc_405-gcc + export NM=ppc_405-nm + + ./configure --target=powerpc-hardhat-linux + --host=powerpc-hardhat-linux + --build=i586-pc-linux-gnu + --prefix=/opt/hardhat/devkit/ppc/405/target/usr/local + --exec-prefix=/usr/local + +You may also need to provide a parameter like `--with-random=/dev/urandom` to +configure as it cannot detect the presence of a random number generating +device for a target system. The `--prefix` parameter specifies where curl +will be installed. If `configure` completes successfully, do `make` and `make +install` as usual. + +In some cases, you may be able to simplify the above commands to as little as: + + ./configure --host=ARCH-OS + +# REDUCING SIZE + +There are a number of configure options that can be used to reduce the size of +libcurl for embedded applications where binary size is an important factor. +First, be sure to set the CFLAGS variable when configuring with any relevant +compiler optimization flags to reduce the size of the binary. For gcc, this +would mean at minimum the -Os option, and potentially the `-march=X`, +`-mdynamic-no-pic` and `-flto` options as well, e.g. + + ./configure CFLAGS='-Os' LDFLAGS='-Wl,-Bsymbolic'... + +Note that newer compilers often produce smaller code than older versions +due to improved optimization. + +Be sure to specify as many `--disable-` and `--without-` flags on the +configure command-line as you can to disable all the libcurl features that you +know your application is not going to need. Besides specifying the +`--disable-PROTOCOL` flags for all the types of URLs your application will not +use, here are some other flags that can reduce the size of the library: + + - `--disable-ares` (disables support for the C-ARES DNS library) + - `--disable-cookies` (disables support for HTTP cookies) + - `--disable-crypto-auth` (disables HTTP cryptographic authentication) + - `--disable-ipv6` (disables support for IPv6) + - `--disable-manual` (disables support for the built-in documentation) + - `--disable-proxy` (disables support for HTTP and SOCKS proxies) + - `--disable-unix-sockets` (disables support for UNIX sockets) + - `--disable-verbose` (eliminates debugging strings and error code strings) + - `--disable-versioned-symbols` (disables support for versioned symbols) + - `--enable-hidden-symbols` (eliminates unneeded symbols in the shared library) + - `--without-libidn` (disables support for the libidn DNS library) + - `--without-librtmp` (disables support for RTMP) + - `--without-ssl` (disables support for SSL/TLS) + - `--without-zlib` (disables support for on-the-fly decompression) + +The GNU compiler and linker have a number of options that can reduce the +size of the libcurl dynamic libraries on some platforms even further. +Specify them by providing appropriate CFLAGS and LDFLAGS variables on the +configure command-line, e.g. + + CFLAGS="-Os -ffunction-sections -fdata-sections + -fno-unwind-tables -fno-asynchronous-unwind-tables -flto" + LDFLAGS="-Wl,-s -Wl,-Bsymbolic -Wl,--gc-sections" + +Be sure also to strip debugging symbols from your binaries after compiling +using 'strip' (or the appropriate variant if cross-compiling). If space is +really tight, you may be able to remove some unneeded sections of the shared +library using the -R option to objcopy (e.g. the .comment section). + +Using these techniques it is possible to create a basic HTTP-only shared +libcurl library for i386 Linux platforms that is only 113 KiB in size, and an +FTP-only library that is 113 KiB in size (as of libcurl version 7.50.3, using +gcc 5.4.0). + +You may find that statically linking libcurl to your application will result +in a lower total size than dynamically linking. + +Note that the curl test harness can detect the use of some, but not all, of +the `--disable` statements suggested above. Use will cause tests relying on +those features to fail. The test harness can be manually forced to skip the +relevant tests by specifying certain key words on the runtests.pl command +line. Following is a list of appropriate key words: + + - `--disable-cookies` !cookies + - `--disable-manual` !--manual + - `--disable-proxy` !HTTP\ proxy !proxytunnel !SOCKS4 !SOCKS5 + +# PORTS + +This is a probably incomplete list of known hardware and operating systems +that curl has been compiled for. If you know a system curl compiles and +runs on, that isn't listed, please let us know! + + - Alpha DEC OSF 4 + - Alpha Digital UNIX v3.2 + - Alpha FreeBSD 4.1, 4.5 + - Alpha Linux 2.2, 2.4 + - Alpha NetBSD 1.5.2 + - Alpha OpenBSD 3.0 + - Alpha OpenVMS V7.1-1H2 + - Alpha Tru64 v5.0 5.1 + - AVR32 Linux + - ARM Android 1.5, 2.1, 2.3, 3.2, 4.x + - ARM INTEGRITY + - ARM iOS + - Cell Linux + - Cell Cell OS + - HP-PA HP-UX 9.X 10.X 11.X + - HP-PA Linux + - HP3000 MPE/iX + - MicroBlaze uClinux + - MIPS IRIX 6.2, 6.5 + - MIPS Linux + - OS/400 + - Pocket PC/Win CE 3.0 + - Power AIX 3.2.5, 4.2, 4.3.1, 4.3.2, 5.1, 5.2 + - PowerPC Darwin 1.0 + - PowerPC INTEGRITY + - PowerPC Linux + - PowerPC Mac OS 9 + - PowerPC Mac OS X + - SH4 Linux 2.6.X + - SH4 OS21 + - SINIX-Z v5 + - Sparc Linux + - Sparc Solaris 2.4, 2.5, 2.5.1, 2.6, 7, 8, 9, 10 + - Sparc SunOS 4.1.X + - StrongARM (and other ARM) RISC OS 3.1, 4.02 + - StrongARM/ARM7/ARM9 Linux 2.4, 2.6 + - StrongARM NetBSD 1.4.1 + - Symbian OS (P.I.P.S.) 9.x + - TPF + - Ultrix 4.3a + - UNICOS 9.0 + - i386 BeOS + - i386 DOS + - i386 eCos 1.3.1 + - i386 Esix 4.1 + - i386 FreeBSD + - i386 HURD + - i386 Haiku OS + - i386 Linux 1.3, 2.0, 2.2, 2.3, 2.4, 2.6 + - i386 Mac OS X + - i386 MINIX 3.1 + - i386 NetBSD + - i386 Novell NetWare + - i386 OS/2 + - i386 OpenBSD + - i386 QNX 6 + - i386 SCO unix + - i386 Solaris 2.7 + - i386 Windows 95, 98, ME, NT, 2000, XP, 2003 + - i486 ncr-sysv4.3.03 (NCR MP-RAS) + - ia64 Linux 2.3.99 + - m68k AmigaOS 3 + - m68k Linux + - m68k uClinux + - m68k OpenBSD + - m88k dg-dgux5.4R3.00 + - s390 Linux + - x86_64 Linux + - XScale/PXA250 Linux 2.4 + - Nios II uClinux diff --git a/deps-win32/curl-7.54.1/docs/INTERNALS.md b/deps-win32/curl-7.54.1/docs/INTERNALS.md new file mode 100644 index 0000000..a733e1f --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/INTERNALS.md @@ -0,0 +1,1093 @@ +curl internals +============== + + - [Intro](#intro) + - [git](#git) + - [Portability](#Portability) + - [Windows vs Unix](#winvsunix) + - [Library](#Library) + - [`Curl_connect`](#Curl_connect) + - [`Curl_do`](#Curl_do) + - [`Curl_readwrite`](#Curl_readwrite) + - [`Curl_done`](#Curl_done) + - [`Curl_disconnect`](#Curl_disconnect) + - [HTTP(S)](#http) + - [FTP](#ftp) + - [Kerberos](#kerberos) + - [TELNET](#telnet) + - [FILE](#file) + - [SMB](#smb) + - [LDAP](#ldap) + - [E-mail](#email) + - [General](#general) + - [Persistent Connections](#persistent) + - [multi interface/non-blocking](#multi) + - [SSL libraries](#ssl) + - [Library Symbols](#symbols) + - [Return Codes and Informationals](#returncodes) + - [AP/ABI](#abi) + - [Client](#client) + - [Memory Debugging](#memorydebug) + - [Test Suite](#test) + - [Asynchronous name resolves](#asyncdns) + - [c-ares](#cares) + - [`curl_off_t`](#curl_off_t) + - [curlx](#curlx) + - [Content Encoding](#contentencoding) + - [hostip.c explained](#hostip) + - [Track Down Memory Leaks](#memoryleak) + - [`multi_socket`](#multi_socket) + - [Structs in libcurl](#structs) + + +Intro +===== + + This project is split in two. The library and the client. The client part + uses the library, but the library is designed to allow other applications to + use it. + + The largest amount of code and complexity is in the library part. + + + +git +=== + + All changes to the sources are committed to the git repository as soon as + they're somewhat verified to work. Changes shall be committed as independently + as possible so that individual changes can be easily spotted and tracked + afterwards. + + Tagging shall be used extensively, and by the time we release new archives we + should tag the sources with a name similar to the released version number. + + +Portability +=========== + + We write curl and libcurl to compile with C89 compilers. On 32bit and up + machines. Most of libcurl assumes more or less POSIX compliance but that's + not a requirement. + + We write libcurl to build and work with lots of third party tools, and we + want it to remain functional and buildable with these and later versions + (older versions may still work but is not what we work hard to maintain): + +Dependencies +------------ + + - OpenSSL 0.9.7 + - GnuTLS 1.2 + - zlib 1.1.4 + - libssh2 0.16 + - c-ares 1.6.0 + - libidn 0.4.1 + - cyassl 2.0.0 + - openldap 2.0 + - MIT Kerberos 1.2.4 + - GSKit V5R3M0 + - NSS 3.14.x + - axTLS 2.1.0 + - PolarSSL 1.3.0 + - Heimdal ? + - nghttp2 1.0.0 + +Operating Systems +----------------- + + On systems where configure runs, we aim at working on them all - if they have + a suitable C compiler. On systems that don't run configure, we strive to keep + curl running correctly on: + + - Windows 98 + - AS/400 V5R3M0 + - Symbian 9.1 + - Windows CE ? + - TPF ? + +Build tools +----------- + + When writing code (mostly for generating stuff included in release tarballs) + we use a few "build tools" and we make sure that we remain functional with + these versions: + + - GNU Libtool 1.4.2 + - GNU Autoconf 2.57 + - GNU Automake 1.7 + - GNU M4 1.4 + - perl 5.004 + - roffit 0.5 + - groff ? (any version that supports "groff -Tps -man [in] [out]") + - ps2pdf (gs) ? + + +Windows vs Unix +=============== + + There are a few differences in how to program curl the Unix way compared to + the Windows way. Perhaps the four most notable details are: + + 1. Different function names for socket operations. + + In curl, this is solved with defines and macros, so that the source looks + the same in all places except for the header file that defines them. The + macros in use are sclose(), sread() and swrite(). + + 2. Windows requires a couple of init calls for the socket stuff. + + That's taken care of by the `curl_global_init()` call, but if other libs + also do it etc there might be reasons for applications to alter that + behaviour. + + 3. The file descriptors for network communication and file operations are + not as easily interchangeable as in Unix. + + We avoid this by not trying any funny tricks on file descriptors. + + 4. When writing data to stdout, Windows makes end-of-lines the DOS way, thus + destroying binary data, although you do want that conversion if it is + text coming through... (sigh) + + We set stdout to binary under windows + + Inside the source code, We make an effort to avoid `#ifdef [Your OS]`. All + conditionals that deal with features *should* instead be in the format + `#ifdef HAVE_THAT_WEIRD_FUNCTION`. Since Windows can't run configure scripts, + we maintain a `curl_config-win32.h` file in lib directory that is supposed to + look exactly like a `curl_config.h` file would have looked like on a Windows + machine! + + Generally speaking: always remember that this will be compiled on dozens of + operating systems. Don't walk on the edge! + + +Library +======= + + (See [Structs in libcurl](#structs) for the separate section describing all + major internal structs and their purposes.) + + There are plenty of entry points to the library, namely each publicly defined + function that libcurl offers to applications. All of those functions are + rather small and easy-to-follow. All the ones prefixed with `curl_easy` are + put in the lib/easy.c file. + + `curl_global_init()` and `curl_global_cleanup()` should be called by the + application to initialize and clean up global stuff in the library. As of + today, it can handle the global SSL initing if SSL is enabled and it can init + the socket layer on windows machines. libcurl itself has no "global" scope. + + All printf()-style functions use the supplied clones in lib/mprintf.c. This + makes sure we stay absolutely platform independent. + + [ `curl_easy_init()`][2] allocates an internal struct and makes some + initializations. The returned handle does not reveal internals. This is the + `Curl_easy` struct which works as an "anchor" struct for all `curl_easy` + functions. All connections performed will get connect-specific data allocated + that should be used for things related to particular connections/requests. + + [`curl_easy_setopt()`][1] takes three arguments, where the option stuff must + be passed in pairs: the parameter-ID and the parameter-value. The list of + options is documented in the man page. This function mainly sets things in + the `Curl_easy` struct. + + `curl_easy_perform()` is just a wrapper function that makes use of the multi + API. It basically calls `curl_multi_init()`, `curl_multi_add_handle()`, + `curl_multi_wait()`, and `curl_multi_perform()` until the transfer is done + and then returns. + + Some of the most important key functions in url.c are called from multi.c + when certain key steps are to be made in the transfer operation. + + +Curl_connect() +-------------- + + Analyzes the URL, it separates the different components and connects to the + remote host. This may involve using a proxy and/or using SSL. The + `Curl_resolv()` function in lib/hostip.c is used for looking up host names + (it does then use the proper underlying method, which may vary between + platforms and builds). + + When `Curl_connect` is done, we are connected to the remote site. Then it + is time to tell the server to get a document/file. `Curl_do()` arranges + this. + + This function makes sure there's an allocated and initiated 'connectdata' + struct that is used for this particular connection only (although there may + be several requests performed on the same connect). A bunch of things are + inited/inherited from the `Curl_easy` struct. + + +Curl_do() +--------- + + `Curl_do()` makes sure the proper protocol-specific function is called. The + functions are named after the protocols they handle. + + The protocol-specific functions of course deal with protocol-specific + negotiations and setup. They have access to the `Curl_sendf()` (from + lib/sendf.c) function to send printf-style formatted data to the remote + host and when they're ready to make the actual file transfer they call the + `Curl_Transfer()` function (in lib/transfer.c) to setup the transfer and + returns. + + If this DO function fails and the connection is being re-used, libcurl will + then close this connection, setup a new connection and re-issue the DO + request on that. This is because there is no way to be perfectly sure that + we have discovered a dead connection before the DO function and thus we + might wrongly be re-using a connection that was closed by the remote peer. + + Some time during the DO function, the `Curl_setup_transfer()` function must + be called with some basic info about the upcoming transfer: what socket(s) + to read/write and the expected file transfer sizes (if known). + + +Curl_readwrite() +---------------- + + Called during the transfer of the actual protocol payload. + + During transfer, the progress functions in lib/progress.c are called at + frequent intervals (or at the user's choice, a specified callback might get + called). The speedcheck functions in lib/speedcheck.c are also used to + verify that the transfer is as fast as required. + + +Curl_done() +----------- + + Called after a transfer is done. This function takes care of everything + that has to be done after a transfer. This function attempts to leave + matters in a state so that `Curl_do()` should be possible to call again on + the same connection (in a persistent connection case). It might also soon + be closed with `Curl_disconnect()`. + + +Curl_disconnect() +----------------- + + When doing normal connections and transfers, no one ever tries to close any + connections so this is not normally called when `curl_easy_perform()` is + used. This function is only used when we are certain that no more transfers + are going to be made on the connection. It can be also closed by force, or + it can be called to make sure that libcurl doesn't keep too many + connections alive at the same time. + + This function cleans up all resources that are associated with a single + connection. + + +HTTP(S) +======= + + HTTP offers a lot and is the protocol in curl that uses the most lines of + code. There is a special file (lib/formdata.c) that offers all the multipart + post functions. + + base64-functions for user+password stuff (and more) is in (lib/base64.c) and + all functions for parsing and sending cookies are found in (lib/cookie.c). + + HTTPS uses in almost every case the same procedure as HTTP, with only two + exceptions: the connect procedure is different and the function used to read + or write from the socket is different, although the latter fact is hidden in + the source by the use of `Curl_read()` for reading and `Curl_write()` for + writing data to the remote server. + + `http_chunks.c` contains functions that understands HTTP 1.1 chunked transfer + encoding. + + An interesting detail with the HTTP(S) request, is the `Curl_add_buffer()` + series of functions we use. They append data to one single buffer, and when + the building is finished the entire request is sent off in one single write. This is done this way to overcome problems with flawed firewalls and lame servers. + + +FTP +=== + + The `Curl_if2ip()` function can be used for getting the IP number of a + specified network interface, and it resides in lib/if2ip.c. + + `Curl_ftpsendf()` is used for sending FTP commands to the remote server. It + was made a separate function to prevent us programmers from forgetting that + they must be CRLF terminated. They must also be sent in one single write() to + make firewalls and similar happy. + + +Kerberos +-------- + + Kerberos support is mainly in lib/krb5.c and lib/security.c but also + `curl_sasl_sspi.c` and `curl_sasl_gssapi.c` for the email protocols and + `socks_gssapi.c` and `socks_sspi.c` for SOCKS5 proxy specifics. + + +TELNET +====== + + Telnet is implemented in lib/telnet.c. + + +FILE +==== + + The file:// protocol is dealt with in lib/file.c. + + +SMB +=== + + The smb:// protocol is dealt with in lib/smb.c. + + +LDAP +==== + + Everything LDAP is in lib/ldap.c and lib/openldap.c + + +E-mail +====== + + The e-mail related source code is in lib/imap.c, lib/pop3.c and lib/smtp.c. + + +General +======= + + URL encoding and decoding, called escaping and unescaping in the source code, + is found in lib/escape.c. + + While transferring data in Transfer() a few functions might get used. + `curl_getdate()` in lib/parsedate.c is for HTTP date comparisons (and more). + + lib/getenv.c offers `curl_getenv()` which is for reading environment + variables in a neat platform independent way. That's used in the client, but + also in lib/url.c when checking the proxy environment variables. Note that + contrary to the normal unix getenv(), this returns an allocated buffer that + must be free()ed after use. + + lib/netrc.c holds the .netrc parser + + lib/timeval.c features replacement functions for systems that don't have + gettimeofday() and a few support functions for timeval conversions. + + A function named `curl_version()` that returns the full curl version string + is found in lib/version.c. + + +Persistent Connections +====================== + + The persistent connection support in libcurl requires some considerations on + how to do things inside of the library. + + - The `Curl_easy` struct returned in the [`curl_easy_init()`][2] call + must never hold connection-oriented data. It is meant to hold the root data + as well as all the options etc that the library-user may choose. + + - The `Curl_easy` struct holds the "connection cache" (an array of + pointers to 'connectdata' structs). + + - This enables the 'curl handle' to be reused on subsequent transfers. + + - When libcurl is told to perform a transfer, it first checks for an already + existing connection in the cache that we can use. Otherwise it creates a + new one and adds that to the cache. If the cache is full already when a new + connection is added, it will first close the oldest unused one. + + - When the transfer operation is complete, the connection is left + open. Particular options may tell libcurl not to, and protocols may signal + closure on connections and then they won't be kept open, of course. + + - When `curl_easy_cleanup()` is called, we close all still opened connections, + unless of course the multi interface "owns" the connections. + + The curl handle must be re-used in order for the persistent connections to + work. + + +multi interface/non-blocking +============================ + + The multi interface is a non-blocking interface to the library. To make that + interface work as well as possible, no low-level functions within libcurl + must be written to work in a blocking manner. (There are still a few spots + violating this rule.) + + One of the primary reasons we introduced c-ares support was to allow the name + resolve phase to be perfectly non-blocking as well. + + The FTP and the SFTP/SCP protocols are examples of how we adapt and adjust + the code to allow non-blocking operations even on multi-stage command- + response protocols. They are built around state machines that return when + they would otherwise block waiting for data. The DICT, LDAP and TELNET + protocols are crappy examples and they are subject for rewrite in the future + to better fit the libcurl protocol family. + + +SSL libraries +============= + + Originally libcurl supported SSLeay for SSL/TLS transports, but that was then + extended to its successor OpenSSL but has since also been extended to several + other SSL/TLS libraries and we expect and hope to further extend the support + in future libcurl versions. + + To deal with this internally in the best way possible, we have a generic SSL + function API as provided by the vtls/vtls.[ch] system, and they are the only + SSL functions we must use from within libcurl. vtls is then crafted to use + the appropriate lower-level function calls to whatever SSL library that is in + use. For example vtls/openssl.[ch] for the OpenSSL library. + + +Library Symbols +=============== + + All symbols used internally in libcurl must use a `Curl_` prefix if they're + used in more than a single file. Single-file symbols must be made static. + Public ("exported") symbols must use a `curl_` prefix. (There are exceptions, + but they are to be changed to follow this pattern in future versions.) Public + API functions are marked with `CURL_EXTERN` in the public header files so + that all others can be hidden on platforms where this is possible. + + +Return Codes and Informationals +=============================== + + I've made things simple. Almost every function in libcurl returns a CURLcode, + that must be `CURLE_OK` if everything is OK or otherwise a suitable error + code as the curl/curl.h include file defines. The very spot that detects an + error must use the `Curl_failf()` function to set the human-readable error + description. + + In aiding the user to understand what's happening and to debug curl usage, we + must supply a fair number of informational messages by using the + `Curl_infof()` function. Those messages are only displayed when the user + explicitly asks for them. They are best used when revealing information that + isn't otherwise obvious. + + +API/ABI +======= + + We make an effort to not export or show internals or how internals work, as + that makes it easier to keep a solid API/ABI over time. See docs/libcurl/ABI + for our promise to users. + + +Client +====== + + main() resides in `src/tool_main.c`. + + `src/tool_hugehelp.c` is automatically generated by the mkhelp.pl perl script + to display the complete "manual" and the `src/tool_urlglob.c` file holds the + functions used for the URL-"globbing" support. Globbing in the sense that the + {} and [] expansion stuff is there. + + The client mostly sets up its 'config' struct properly, then + it calls the `curl_easy_*()` functions of the library and when it gets back + control after the `curl_easy_perform()` it cleans up the library, checks + status and exits. + + When the operation is done, the ourWriteOut() function in src/writeout.c may + be called to report about the operation. That function is using the + `curl_easy_getinfo()` function to extract useful information from the curl + session. + + It may loop and do all this several times if many URLs were specified on the + command line or config file. + + +Memory Debugging +================ + + The file lib/memdebug.c contains debug-versions of a few functions. Functions + such as malloc, free, fopen, fclose, etc that somehow deal with resources + that might give us problems if we "leak" them. The functions in the memdebug + system do nothing fancy, they do their normal function and then log + information about what they just did. The logged data can then be analyzed + after a complete session, + + memanalyze.pl is the perl script present in tests/ that analyzes a log file + generated by the memory tracking system. It detects if resources are + allocated but never freed and other kinds of errors related to resource + management. + + Internally, definition of preprocessor symbol DEBUGBUILD restricts code which + is only compiled for debug enabled builds. And symbol CURLDEBUG is used to + differentiate code which is _only_ used for memory tracking/debugging. + + Use -DCURLDEBUG when compiling to enable memory debugging, this is also + switched on by running configure with --enable-curldebug. Use -DDEBUGBUILD + when compiling to enable a debug build or run configure with --enable-debug. + + curl --version will list 'Debug' feature for debug enabled builds, and + will list 'TrackMemory' feature for curl debug memory tracking capable + builds. These features are independent and can be controlled when running + the configure script. When --enable-debug is given both features will be + enabled, unless some restriction prevents memory tracking from being used. + + +Test Suite +========== + + The test suite is placed in its own subdirectory directly off the root in the + curl archive tree, and it contains a bunch of scripts and a lot of test case + data. + + The main test script is runtests.pl that will invoke test servers like + httpserver.pl and ftpserver.pl before all the test cases are performed. The + test suite currently only runs on Unix-like platforms. + + You'll find a description of the test suite in the tests/README file, and the + test case data files in the tests/FILEFORMAT file. + + The test suite automatically detects if curl was built with the memory + debugging enabled, and if it was, it will detect memory leaks, too. + + +Asynchronous name resolves +========================== + + libcurl can be built to do name resolves asynchronously, using either the + normal resolver in a threaded manner or by using c-ares. + + +[c-ares][3] +------ + +### Build libcurl to use a c-ares + +1. ./configure --enable-ares=/path/to/ares/install +2. make + +### c-ares on win32 + + First I compiled c-ares. I changed the default C runtime library to be the + single-threaded rather than the multi-threaded (this seems to be required to + prevent linking errors later on). Then I simply build the areslib project + (the other projects adig/ahost seem to fail under MSVC). + + Next was libcurl. I opened lib/config-win32.h and I added a: + `#define USE_ARES 1` + + Next thing I did was I added the path for the ares includes to the include + path, and the libares.lib to the libraries. + + Lastly, I also changed libcurl to be single-threaded rather than + multi-threaded, again this was to prevent some duplicate symbol errors. I'm + not sure why I needed to change everything to single-threaded, but when I + didn't I got redefinition errors for several CRT functions (malloc, stricmp, + etc.) + + +`curl_off_t` +========== + + `curl_off_t` is a data type provided by the external libcurl include + headers. It is the type meant to be used for the [`curl_easy_setopt()`][1] + options that end with LARGE. The type is 64bit large on most modern + platforms. + +curlx +===== + + The libcurl source code offers a few functions by source only. They are not + part of the official libcurl API, but the source files might be useful for + others so apps can optionally compile/build with these sources to gain + additional functions. + + We provide them through a single header file for easy access for apps: + "curlx.h" + +`curlx_strtoofft()` +------------------- + A macro that converts a string containing a number to a `curl_off_t` number. + This might use the `curlx_strtoll()` function which is provided as source + code in strtoofft.c. Note that the function is only provided if no + strtoll() (or equivalent) function exist on your platform. If `curl_off_t` + is only a 32 bit number on your platform, this macro uses strtol(). + +`curlx_tvnow()` +--------------- + returns a struct timeval for the current time. + +`curlx_tvdiff()` +-------------- + returns the difference between two timeval structs, in number of + milliseconds. + +`curlx_tvdiff_secs()` +--------------------- + returns the same as `curlx_tvdiff` but with full usec resolution (as a + double) + +Future +------ + + Several functions will be removed from the public `curl_` name space in a + future libcurl release. They will then only become available as `curlx_` + functions instead. To make the transition easier, we already today provide + these functions with the `curlx_` prefix to allow sources to be built + properly with the new function names. The concerned functions are: + + - `curlx_getenv` + - `curlx_strequal` + - `curlx_strnequal` + - `curlx_mvsnprintf` + - `curlx_msnprintf` + - `curlx_maprintf` + - `curlx_mvaprintf` + - `curlx_msprintf` + - `curlx_mprintf` + - `curlx_mfprintf` + - `curlx_mvsprintf` + - `curlx_mvprintf` + - `curlx_mvfprintf` + + +Content Encoding +================ + +## About content encodings + + [HTTP/1.1][4] specifies that a client may request that a server encode its + response. This is usually used to compress a response using one of a set of + commonly available compression techniques. These schemes are 'deflate' (the + zlib algorithm), 'gzip' and 'compress'. A client requests that the server + perform an encoding by including an Accept-Encoding header in the request + document. The value of the header should be one of the recognized tokens + 'deflate', ... (there's a way to register new schemes/tokens, see sec 3.5 of + the spec). A server MAY honor the client's encoding request. When a response + is encoded, the server includes a Content-Encoding header in the + response. The value of the Content-Encoding header indicates which scheme was + used to encode the data. + + A client may tell a server that it can understand several different encoding + schemes. In this case the server may choose any one of those and use it to + encode the response (indicating which one using the Content-Encoding header). + It's also possible for a client to attach priorities to different schemes so + that the server knows which it prefers. See sec 14.3 of RFC 2616 for more + information on the Accept-Encoding header. + +## Supported content encodings + + The 'deflate' and 'gzip' content encoding are supported by libcurl. Both + regular and chunked transfers work fine. The zlib library is required for + this feature. + +## The libcurl interface + + To cause libcurl to request a content encoding use: + + [`curl_easy_setopt`][1](curl, [`CURLOPT_ACCEPT_ENCODING`][5], string) + + where string is the intended value of the Accept-Encoding header. + + Currently, libcurl only understands how to process responses that use the + "deflate" or "gzip" Content-Encoding, so the only values for + [`CURLOPT_ACCEPT_ENCODING`][5] that will work (besides "identity," which does + nothing) are "deflate" and "gzip" If a response is encoded using the + "compress" or methods, libcurl will return an error indicating that the + response could not be decoded. If is NULL no Accept-Encoding header + is generated. If is a zero-length string, then an Accept-Encoding + header containing all supported encodings will be generated. + + The [`CURLOPT_ACCEPT_ENCODING`][5] must be set to any non-NULL value for + content to be automatically decoded. If it is not set and the server still + sends encoded content (despite not having been asked), the data is returned + in its raw form and the Content-Encoding type is not checked. + +## The curl interface + + Use the [--compressed][6] option with curl to cause it to ask servers to + compress responses using any format supported by curl. + + +hostip.c explained +================== + + The main compile-time defines to keep in mind when reading the host*.c source + file are these: + +## `CURLRES_IPV6` + + this host has getaddrinfo() and family, and thus we use that. The host may + not be able to resolve IPv6, but we don't really have to take that into + account. Hosts that aren't IPv6-enabled have `CURLRES_IPV4` defined. + +## `CURLRES_ARES` + + is defined if libcurl is built to use c-ares for asynchronous name + resolves. This can be Windows or *nix. + +## `CURLRES_THREADED` + + is defined if libcurl is built to use threading for asynchronous name + resolves. The name resolve will be done in a new thread, and the supported + asynch API will be the same as for ares-builds. This is the default under + (native) Windows. + + If any of the two previous are defined, `CURLRES_ASYNCH` is defined too. If + libcurl is not built to use an asynchronous resolver, `CURLRES_SYNCH` is + defined. + +## host*.c sources + + The host*.c sources files are split up like this: + + - hostip.c - method-independent resolver functions and utility functions + - hostasyn.c - functions for asynchronous name resolves + - hostsyn.c - functions for synchronous name resolves + - asyn-ares.c - functions for asynchronous name resolves using c-ares + - asyn-thread.c - functions for asynchronous name resolves using threads + - hostip4.c - IPv4 specific functions + - hostip6.c - IPv6 specific functions + + The hostip.h is the single united header file for all this. It defines the + `CURLRES_*` defines based on the config*.h and `curl_setup.h` defines. + + +Track Down Memory Leaks +======================= + +## Single-threaded + + Please note that this memory leak system is not adjusted to work in more + than one thread. If you want/need to use it in a multi-threaded app. Please + adjust accordingly. + + +## Build + + Rebuild libcurl with -DCURLDEBUG (usually, rerunning configure with + --enable-debug fixes this). 'make clean' first, then 'make' so that all + files are actually rebuilt properly. It will also make sense to build + libcurl with the debug option (usually -g to the compiler) so that debugging + it will be easier if you actually do find a leak in the library. + + This will create a library that has memory debugging enabled. + +## Modify Your Application + + Add a line in your application code: + + `curl_memdebug("dump");` + + This will make the malloc debug system output a full trace of all resource + using functions to the given file name. Make sure you rebuild your program + and that you link with the same libcurl you built for this purpose as + described above. + +## Run Your Application + + Run your program as usual. Watch the specified memory trace file grow. + + Make your program exit and use the proper libcurl cleanup functions etc. So + that all non-leaks are returned/freed properly. + +## Analyze the Flow + + Use the tests/memanalyze.pl perl script to analyze the dump file: + + tests/memanalyze.pl dump + + This now outputs a report on what resources that were allocated but never + freed etc. This report is very fine for posting to the list! + + If this doesn't produce any output, no leak was detected in libcurl. Then + the leak is mostly likely to be in your code. + + +`multi_socket` +============== + + Implementation of the `curl_multi_socket` API + + The main ideas of this API are simply: + + 1 - The application can use whatever event system it likes as it gets info + from libcurl about what file descriptors libcurl waits for what action + on. (The previous API returns `fd_sets` which is very select()-centric). + + 2 - When the application discovers action on a single socket, it calls + libcurl and informs that there was action on this particular socket and + libcurl can then act on that socket/transfer only and not care about + any other transfers. (The previous API always had to scan through all + the existing transfers.) + + The idea is that [`curl_multi_socket_action()`][7] calls a given callback + with information about what socket to wait for what action on, and the + callback only gets called if the status of that socket has changed. + + We also added a timer callback that makes libcurl call the application when + the timeout value changes, and you set that with [`curl_multi_setopt()`][9] + and the [`CURLMOPT_TIMERFUNCTION`][10] option. To get this to work, + Internally, there's an added struct to each easy handle in which we store + an "expire time" (if any). The structs are then "splay sorted" so that we + can add and remove times from the linked list and yet somewhat swiftly + figure out both how long there is until the next nearest timer expires + and which timer (handle) we should take care of now. Of course, the upside + of all this is that we get a [`curl_multi_timeout()`][8] that should also + work with old-style applications that use [`curl_multi_perform()`][11]. + + We created an internal "socket to easy handles" hash table that given + a socket (file descriptor) returns the easy handle that waits for action on + that socket. This hash is made using the already existing hash code + (previously only used for the DNS cache). + + To make libcurl able to report plain sockets in the socket callback, we had + to re-organize the internals of the [`curl_multi_fdset()`][12] etc so that + the conversion from sockets to `fd_sets` for that function is only done in + the last step before the data is returned. I also had to extend c-ares to + get a function that can return plain sockets, as that library too returned + only `fd_sets` and that is no longer good enough. The changes done to c-ares + are available in c-ares 1.3.1 and later. + + +Structs in libcurl +================== + +This section should cover 7.32.0 pretty accurately, but will make sense even +for older and later versions as things don't change drastically that often. + +## Curl_easy + + The `Curl_easy` struct is the one returned to the outside in the external API + as a "CURL *". This is usually known as an easy handle in API documentations + and examples. + + Information and state that is related to the actual connection is in the + 'connectdata' struct. When a transfer is about to be made, libcurl will + either create a new connection or re-use an existing one. The particular + connectdata that is used by this handle is pointed out by + `Curl_easy->easy_conn`. + + Data and information that regard this particular single transfer is put in + the SingleRequest sub-struct. + + When the `Curl_easy` struct is added to a multi handle, as it must be in + order to do any transfer, the ->multi member will point to the `Curl_multi` + struct it belongs to. The ->prev and ->next members will then be used by the + multi code to keep a linked list of `Curl_easy` structs that are added to + that same multi handle. libcurl always uses multi so ->multi *will* point to + a `Curl_multi` when a transfer is in progress. + + ->mstate is the multi state of this particular `Curl_easy`. When + `multi_runsingle()` is called, it will act on this handle according to which + state it is in. The mstate is also what tells which sockets to return for a + specific `Curl_easy` when [`curl_multi_fdset()`][12] is called etc. + + The libcurl source code generally use the name 'data' for the variable that + points to the `Curl_easy`. + + When doing multiplexed HTTP/2 transfers, each `Curl_easy` is associated with + an individual stream, sharing the same connectdata struct. Multiplexing + makes it even more important to keep things associated with the right thing! + +## connectdata + + A general idea in libcurl is to keep connections around in a connection + "cache" after they have been used in case they will be used again and then + re-use an existing one instead of creating a new as it creates a significant + performance boost. + + Each 'connectdata' identifies a single physical connection to a server. If + the connection can't be kept alive, the connection will be closed after use + and then this struct can be removed from the cache and freed. + + Thus, the same `Curl_easy` can be used multiple times and each time select + another connectdata struct to use for the connection. Keep this in mind, as + it is then important to consider if options or choices are based on the + connection or the `Curl_easy`. + + Functions in libcurl will assume that connectdata->data points to the + `Curl_easy` that uses this connection (for the moment). + + As a special complexity, some protocols supported by libcurl require a + special disconnect procedure that is more than just shutting down the + socket. It can involve sending one or more commands to the server before + doing so. Since connections are kept in the connection cache after use, the + original `Curl_easy` may no longer be around when the time comes to shut down + a particular connection. For this purpose, libcurl holds a special dummy + `closure_handle` `Curl_easy` in the `Curl_multi` struct to use when needed. + + FTP uses two TCP connections for a typical transfer but it keeps both in + this single struct and thus can be considered a single connection for most + internal concerns. + + The libcurl source code generally use the name 'conn' for the variable that + points to the connectdata. + +## Curl_multi + + Internally, the easy interface is implemented as a wrapper around multi + interface functions. This makes everything multi interface. + + `Curl_multi` is the multi handle struct exposed as "CURLM *" in external + APIs. + + This struct holds a list of `Curl_easy` structs that have been added to this + handle with [`curl_multi_add_handle()`][13]. The start of the list is + `->easyp` and `->num_easy` is a counter of added `Curl_easy`s. + + `->msglist` is a linked list of messages to send back when + [`curl_multi_info_read()`][14] is called. Basically a node is added to that + list when an individual `Curl_easy`'s transfer has completed. + + `->hostcache` points to the name cache. It is a hash table for looking up + name to IP. The nodes have a limited life time in there and this cache is + meant to reduce the time for when the same name is wanted within a short + period of time. + + `->timetree` points to a tree of `Curl_easy`s, sorted by the remaining time + until it should be checked - normally some sort of timeout. Each `Curl_easy` + has one node in the tree. + + `->sockhash` is a hash table to allow fast lookups of socket descriptor for + which `Curl_easy` uses that descriptor. This is necessary for the + `multi_socket` API. + + `->conn_cache` points to the connection cache. It keeps track of all + connections that are kept after use. The cache has a maximum size. + + `->closure_handle` is described in the 'connectdata' section. + + The libcurl source code generally use the name 'multi' for the variable that + points to the `Curl_multi` struct. + +## Curl_handler + + Each unique protocol that is supported by libcurl needs to provide at least + one `Curl_handler` struct. It defines what the protocol is called and what + functions the main code should call to deal with protocol specific issues. + In general, there's a source file named [protocol].c in which there's a + "struct `Curl_handler` `Curl_handler_[protocol]`" declared. In url.c there's + then the main array with all individual `Curl_handler` structs pointed to + from a single array which is scanned through when a URL is given to libcurl + to work with. + + `->scheme` is the URL scheme name, usually spelled out in uppercase. That's + "HTTP" or "FTP" etc. SSL versions of the protocol need their own `Curl_handler` setup so HTTPS separate from HTTP. + + `->setup_connection` is called to allow the protocol code to allocate + protocol specific data that then gets associated with that `Curl_easy` for + the rest of this transfer. It gets freed again at the end of the transfer. + It will be called before the 'connectdata' for the transfer has been + selected/created. Most protocols will allocate its private + 'struct [PROTOCOL]' here and assign `Curl_easy->req.protop` to point to it. + + `->connect_it` allows a protocol to do some specific actions after the TCP + connect is done, that can still be considered part of the connection phase. + + Some protocols will alter the `connectdata->recv[]` and + `connectdata->send[]` function pointers in this function. + + `->connecting` is similarly a function that keeps getting called as long as + the protocol considers itself still in the connecting phase. + + `->do_it` is the function called to issue the transfer request. What we call + the DO action internally. If the DO is not enough and things need to be kept + getting done for the entire DO sequence to complete, `->doing` is then + usually also provided. Each protocol that needs to do multiple commands or + similar for do/doing need to implement their own state machines (see SCP, + SFTP, FTP). Some protocols (only FTP and only due to historical reasons) has + a separate piece of the DO state called `DO_MORE`. + + `->doing` keeps getting called while issuing the transfer request command(s) + + `->done` gets called when the transfer is complete and DONE. That's after the + main data has been transferred. + + `->do_more` gets called during the `DO_MORE` state. The FTP protocol uses + this state when setting up the second connection. + + ->`proto_getsock` + ->`doing_getsock` + ->`domore_getsock` + ->`perform_getsock` + Functions that return socket information. Which socket(s) to wait for which + action(s) during the particular multi state. + + ->disconnect is called immediately before the TCP connection is shutdown. + + ->readwrite gets called during transfer to allow the protocol to do extra + reads/writes + + ->defport is the default report TCP or UDP port this protocol uses + + ->protocol is one or more bits in the `CURLPROTO_*` set. The SSL versions + have their "base" protocol set and then the SSL variation. Like + "HTTP|HTTPS". + + ->flags is a bitmask with additional information about the protocol that will + make it get treated differently by the generic engine: + + - `PROTOPT_SSL` - will make it connect and negotiate SSL + + - `PROTOPT_DUAL` - this protocol uses two connections + + - `PROTOPT_CLOSEACTION` - this protocol has actions to do before closing the + connection. This flag is no longer used by code, yet still set for a bunch + of protocol handlers. + + - `PROTOPT_DIRLOCK` - "direction lock". The SSH protocols set this bit to + limit which "direction" of socket actions that the main engine will + concern itself with. + + - `PROTOPT_NONETWORK` - a protocol that doesn't use network (read file:) + + - `PROTOPT_NEEDSPWD` - this protocol needs a password and will use a default + one unless one is provided + + - `PROTOPT_NOURLQUERY` - this protocol can't handle a query part on the URL + (?foo=bar) + +## conncache + + Is a hash table with connections for later re-use. Each `Curl_easy` has a + pointer to its connection cache. Each multi handle sets up a connection + cache that all added `Curl_easy`s share by default. + +## Curl_share + + The libcurl share API allocates a `Curl_share` struct, exposed to the + external API as "CURLSH *". + + The idea is that the struct can have a set of its own versions of caches and + pools and then by providing this struct in the `CURLOPT_SHARE` option, those + specific `Curl_easy`s will use the caches/pools that this share handle + holds. + + Then individual `Curl_easy` structs can be made to share specific things + that they otherwise wouldn't, such as cookies. + + The `Curl_share` struct can currently hold cookies, DNS cache and the SSL + session cache. + +## CookieInfo + + This is the main cookie struct. It holds all known cookies and related + information. Each `Curl_easy` has its own private CookieInfo even when + they are added to a multi handle. They can be made to share cookies by using + the share API. + + +[1]: https://curl.haxx.se/libcurl/c/curl_easy_setopt.html +[2]: https://curl.haxx.se/libcurl/c/curl_easy_init.html +[3]: http://c-ares.haxx.se/ +[4]: https://tools.ietf.org/html/rfc7230 "RFC 7230" +[5]: https://curl.haxx.se/libcurl/c/CURLOPT_ACCEPT_ENCODING.html +[6]: https://curl.haxx.se/docs/manpage.html#--compressed +[7]: https://curl.haxx.se/libcurl/c/curl_multi_socket_action.html +[8]: https://curl.haxx.se/libcurl/c/curl_multi_timeout.html +[9]: https://curl.haxx.se/libcurl/c/curl_multi_setopt.html +[10]: https://curl.haxx.se/libcurl/c/CURLMOPT_TIMERFUNCTION.html +[11]: https://curl.haxx.se/libcurl/c/curl_multi_perform.html +[12]: https://curl.haxx.se/libcurl/c/curl_multi_fdset.html +[13]: https://curl.haxx.se/libcurl/c/curl_multi_add_handle.html +[14]: https://curl.haxx.se/libcurl/c/curl_multi_info_read.html diff --git a/deps-win32/curl-7.54.1/docs/KNOWN_BUGS b/deps-win32/curl-7.54.1/docs/KNOWN_BUGS new file mode 100644 index 0000000..517cd14 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/KNOWN_BUGS @@ -0,0 +1,593 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + + Known Bugs + +These are problems and bugs known to exist at the time of this release. Feel +free to join in and help us correct one or more of these! Also be sure to +check the changelog of the current development status, as one or more of these +problems may have been fixed or changed somewhat since this was written! + + 1. HTTP + 1.1 CURLFORM_CONTENTLEN in an array + 1.2 Disabling HTTP Pipelining + 1.3 STARTTRANSFER time is wrong for HTTP POSTs + 1.4 multipart formposts file name encoding + 1.5 Expect-100 meets 417 + 1.6 Unnecessary close when 401 received waiting for 100 + 1.8 DNS timing is wrong for HTTP redirects + 1.9 HTTP/2 frames while in the connection pool kill reuse + 1.10 Strips trailing dot from host name + 1.11 CURLOPT_SEEKFUNCTION not called with CURLFORM_STREAM + + 2. TLS + 2.1 CURLINFO_SSL_VERIFYRESULT has limited support + 2.2 DER in keychain + 2.3 GnuTLS backend skips really long certificate fields + 2.4 DarwinSSL won't import PKCS#12 client certificates without a password + + 3. Email protocols + 3.1 IMAP SEARCH ALL truncated response + 3.2 No disconnect command + 3.3 SMTP to multiple recipients + 3.4 POP3 expects "CRLF.CRLF" eob for some single-line responses + + 4. Command line + 4.1 -J with %-encoded file nameas + 4.2 -J with -C - fails + 4.3 --retry and transfer timeouts + + 5. Build and portability issues + 5.1 Windows Borland compiler + 5.2 curl-config --libs contains private details + 5.4 AIX shared build with c-ares fails + 5.5 can't handle Unicode arguments in Windows + 5.6 cmake support gaps + 5.7 Visual Studio project gaps + 5.8 configure finding libs in wrong directory + 5.9 Utilize Requires.private directives in libcurl.pc + + 6. Authentication + 6.1 NTLM authentication and unicode + 6.2 MIT Kerberos for Windows build + 6.3 NTLM in system context uses wrong name + 6.4 Negotiate and Kerberos V5 need a fake user name + + 7. FTP + 7.1 FTP without or slow 220 response + 7.2 FTP with CONNECT and slow server + 7.3 FTP with NOBODY and FAILONERROR + 7.4 FTP with ACCT + 7.5 ASCII FTP + 7.6 FTP with NULs in URL parts + 7.7 FTP and empty path parts in the URL + 7.8 Premature transfer end but healthy control channel + + 8. TELNET + 8.1 TELNET and time limtiations don't work + 8.2 Microsoft telnet server + + 9. SFTP and SCP + 9.1 SFTP doesn't do CURLOPT_POSTQUOTE correct + + 10. SOCKS + 10.1 SOCKS proxy connections are done blocking + 10.2 SOCKS don't support timeouts + 10.3 FTPS over SOCKS + 10.4 active FTP over a SOCKS + + 11. Internals + 11.1 Curl leaks .onion hostnames in DNS + 11.2 error buffer not set if connection to multiple addresses fails + 11.3 c-ares deviates from stock resolver on http://1346569778 + + 12. LDAP and OpenLDAP + 12.1 OpenLDAP hangs after returning results + + 13. TCP/IP + 13.1 --interface for ipv6 binds to unusable IP address + + +============================================================================== + +1. HTTP + +1.1 CURLFORM_CONTENTLEN in an array + + It is not possible to pass a 64-bit value using CURLFORM_CONTENTLEN with + CURLFORM_ARRAY, when compiled on 32-bit platforms that support 64-bit + integers. This is because the underlying structure 'curl_forms' uses a dual + purpose char* for storing these values in via casting. For more information + see the now closed related issue: + https://github.com/curl/curl/issues/608 + +1.2 Disabling HTTP Pipelining + + Disabling HTTP Pipelining when there are ongoing transfers can lead to + heap corruption and crash. https://curl.haxx.se/bug/view.cgi?id=1411 + +1.3 STARTTRANSFER time is wrong for HTTP POSTs + + Wrong STARTTRANSFER timer accounting for POST requests Timer works fine with + GET requests, but while using POST the time for CURLINFO_STARTTRANSFER_TIME + is wrong. While using POST CURLINFO_STARTTRANSFER_TIME minus + CURLINFO_PRETRANSFER_TIME is near to zero every time. + + https://github.com/curl/curl/issues/218 + https://curl.haxx.se/bug/view.cgi?id=1213 + +1.4 multipart formposts file name encoding + + When creating multipart formposts. The file name part can be encoded with + something beyond ascii but currently libcurl will only pass in the verbatim + string the app provides. There are several browsers that already do this + encoding. The key seems to be the updated draft to RFC2231: + https://tools.ietf.org/html/draft-reschke-rfc2231-in-http-02 + +1.5 Expect-100 meets 417 + + If an upload using Expect: 100-continue receives an HTTP 417 response, it + ought to be automatically resent without the Expect:. A workaround is for + the client application to redo the transfer after disabling Expect:. + https://curl.haxx.se/mail/archive-2008-02/0043.html + +1.6 Unnecessary close when 401 received waiting for 100 + + libcurl closes the connection if an HTTP 401 reply is received while it is + waiting for the the 100-continue response. + https://curl.haxx.se/mail/lib-2008-08/0462.html + +1.8 DNS timing is wrong for HTTP redirects + + When extracting timing information after HTTP redirects, only the last + transfer's results are returned and not the totals: + https://github.com/curl/curl/issues/522 + +1.9 HTTP/2 frames while in the connection pool kill reuse + + If the server sends HTTP/2 frames (like for example an HTTP/2 PING frame) to + curl while the connection is held in curl's connection pool, the socket will + be found readable when considered for reuse and that makes curl think it is + dead and then it will be closed and a new connection gets created instead. + + This is *best* fixed by adding monitoring to connections while they are kept + in the pool so that pings can be responded to appropriately. + +1.10 Strips trailing dot from host name + + When given a URL with a trailing dot for the host name part: + "https://example.com./", libcurl will strip off the dot and use the name + without a dot internally and send it dot-less in HTTP Host: headers and in + the TLS SNI field. + + The HTTP part violates RFC 7230 section 5.4 but the SNI part is accordance + with RFC 6066 section 3. + + URLs using these trailing dots are very rare in the wild and we have not seen + or gotten any real-world problems with such URLs reported. The popular + browsers seem to have stayed with not stripping the dot for both uses (thus + they violate RFC 6066 instead of RFC 7230). + + Daniel took the discussion to the HTTPbis mailing list in March 2016: + https://lists.w3.org/Archives/Public/ietf-http-wg/2016JanMar/0430.html but + there was not major rush or interest to fix this. The impression I get is + that most HTTP people rather not rock the boat now and instead prioritize web + compatibility rather than to strictly adhere to these RFCs. + + Our current approach allows a knowing client to send a custom HTTP header + with the dot added. + + It can also be noted that while adding a trailing dot to the host name in + most (all?) cases will make the name resolve to the same set of IP addresses, + many HTTP servers will not happily accept the trailing dot there unless that + has been specifically configured to be a fine virtual host. + + If URLs with trailing dots for host names become more popular or even just + used more than for just plain fun experiments, I'm sure we will have reason + to go back and reconsider. + + See https://github.com/curl/curl/issues/716 for the discussion. + +1.11 CURLOPT_SEEKFUNCTION not called with CURLFORM_STREAM + + I'm using libcurl to POST form data using a FILE* with the CURLFORM_STREAM + option of curl_formadd(). I've noticed that if the connection drops at just + the right time, the POST is reattempted without the data from the file. It + seems like the file stream position isn't getting reset to the beginning of + the file. I found the CURLOPT_SEEKFUNCTION option and set that with a + function that performs an fseek() on the FILE*. However, setting that didn't + seem to fix the issue or even get called. See + https://github.com/curl/curl/issues/768 + + +2. TLS + +2.1 CURLINFO_SSL_VERIFYRESULT has limited support + + CURLINFO_SSL_VERIFYRESULT is only implemented for the OpenSSL and NSS + backends, so relying on this information in a generic app is flaky. + +2.2 DER in keychain + + Curl doesn't recognize certificates in DER format in keychain, but it works + with PEM. https://curl.haxx.se/bug/view.cgi?id=1065 + +2.3 GnuTLS backend skips really long certificate fields + + libcurl calls gnutls_x509_crt_get_dn() with a fixed buffer size and if the + field is too long in the cert, it'll just return an error and the field will + be displayed blank. + +2.4 DarwinSSL won't import PKCS#12 client certificates without a password + + libcurl calls SecPKCS12Import with the PKCS#12 client certificate, but that + function rejects certificates that do not have a password. + https://github.com/curl/curl/issues/1308 + + +3. Email protocols + +3.1 IMAP SEARCH ALL truncated response + + IMAP "SEARCH ALL" truncates output on large boxes. "A quick search of the + code reveals that pingpong.c contains some truncation code, at line 408, when + it deems the server response to be too large truncating it to 40 characters" + https://curl.haxx.se/bug/view.cgi?id=1366 + +3.2 No disconnect command + + The disconnect commands (LOGOUT and QUIT) may not be sent by IMAP, POP3 and + SMTP if a failure occurs during the authentication phase of a connection. + +3.3 SMTP to multiple recipients + + When sending data to multiple recipients, curl will abort and return failure + if one of the recipients indicate failure (on the "RCPT TO" + command). Ordinary mail programs would proceed and still send to the ones + that can receive data. This is subject for change in the future. + https://curl.haxx.se/bug/view.cgi?id=1116 + +3.4 POP3 expects "CRLF.CRLF" eob for some single-line responses + + You have to tell libcurl not to expect a body, when dealing with one line + response commands. Please see the POP3 examples and test cases which show + this for the NOOP and DELE commands. https://curl.haxx.se/bug/?i=740 + + +4. Command line + +4.1 -J with %-encoded file nameas + + -J/--remote-header-name doesn't decode %-encoded file names. RFC6266 details + how it should be done. The can of worm is basically that we have no charset + handling in curl and ascii >=128 is a challenge for us. Not to mention that + decoding also means that we need to check for nastiness that is attempted, + like "../" sequences and the like. Probably everything to the left of any + embedded slashes should be cut off. + https://curl.haxx.se/bug/view.cgi?id=1294 + +4.2 -J with -C - fails + + When using -J (with -O), automatically resumed downloading together with "-C + -" fails. Without -J the same command line works! This happens because the + resume logic is worked out before the target file name (and thus its + pre-transfer size) has been figured out! + https://curl.haxx.se/bug/view.cgi?id=1169 + +4.3 --retry and transfer timeouts + + If using --retry and the transfer timeouts (possibly due to using -m or + -y/-Y) the next attempt doesn't resume the transfer properly from what was + downloaded in the previous attempt but will truncate and restart at the + original position where it was at before the previous failed attempt. See + https://curl.haxx.se/mail/lib-2008-01/0080.html and Mandriva bug report + https://qa.mandriva.com/show_bug.cgi?id=22565 + + +5. Build and portability issues + +5.1 Windows Borland compiler + + When building with the Windows Borland compiler, it fails because the "tlib" + tool doesn't support hyphens (minus signs) in file names and we have such in + the build. https://curl.haxx.se/bug/view.cgi?id=1222 + +5.2 curl-config --libs contains private details + + "curl-config --libs" will include details set in LDFLAGS when configure is + run that might be needed only for building libcurl. Further, curl-config + --cflags suffers from the same effects with CFLAGS/CPPFLAGS. + +5.4 AIX shared build with c-ares fails + + curl version 7.12.2 fails on AIX if compiled with --enable-ares. The + workaround is to combine --enable-ares with --disable-shared + +5.5 can't handle Unicode arguments in Windows + + If a URL or filename can't be encoded using the user's current codepage then + it can only be encoded properly in the Unicode character set. Windows uses + UTF-16 encoding for Unicode and stores it in wide characters, however curl + and libcurl are not equipped for that at the moment. And, except for Cygwin, + Windows can't use UTF-8 as a locale. + + https://curl.haxx.se/bug/?i=345 + https://curl.haxx.se/bug/?i=731 + +5.6 cmake support gaps + + The cmake build setup lacks several features that the autoconf build + offers. This includes: + + - symbol hiding when the shared library is built + - use of correct soname for the shared library build + - support for several TLS backends are missing + - the unit tests cause link failures in regular non-static builds + - no nghttp2 check + +5.7 Visual Studio project gaps + + The Visual Studio projects lack some features that the autoconf and nmake + builds offer, such as the following: + + - support for zlib and nghttp2 + - use of static runtime libraries + - add the test suite components + + In addition to this the following could be implemented: + + - support for other development IDEs + - add PATH environment variables for third-party DLLs + +5.8 configure finding libs in wrong directory + + When the configure script checks for third-party libraries, it adds those + directories to the LDFLAGS variable and then tries linking to see if it + works. When successful, the found directory is kept in the LDFLAGS variable + when the script continues to execute and do more tests and possibly check for + more libraries. + + This can make subsequent checks for libraries wrongly detect another + installation in a directory that was previously added to LDFLAGS by another + library check! + + A possibly better way to do these checks would be to keep the pristine LDFLAGS + even after successful checks and instead add those verified paths to a + separate variable that only after all library checks have been performed gets + appended to LDFLAGS. + +5.9 Utilize Requires.private directives in libcurl.pc + + https://github.com/curl/curl/issues/864 + +6. Authentication + +6.1 NTLM authentication and unicode + + NTLM authentication involving unicode user name or password only works + properly if built with UNICODE defined together with the WinSSL/schannel + backend. The original problem was mentioned in: + https://curl.haxx.se/mail/lib-2009-10/0024.html + https://curl.haxx.se/bug/view.cgi?id=896 + + The WinSSL/schannel version verified to work as mentioned in + https://curl.haxx.se/mail/lib-2012-07/0073.html + +6.2 MIT Kerberos for Windows build + + libcurl fails to build with MIT Kerberos for Windows (KfW) due to KfW's + library header files exporting symbols/macros that should be kept private to + the KfW library. See ticket #5601 at http://krbdev.mit.edu/rt/ + +6.3 NTLM in system context uses wrong name + + NTLM authentication using SSPI (on Windows) when (lib)curl is running in + "system context" will make it use wrong(?) user name - at least when compared + to what winhttp does. See https://curl.haxx.se/bug/view.cgi?id=535 + +6.4 Negotiate and Kerberos V5 need a fake user name + + In order to get Negotiate (SPNEGO) authentication to work in HTTP or Kerberos + V5 in the e-mail protocols, you need to provide a (fake) user name (this + concerns both curl and the lib) because the code wrongly only considers + authentication if there's a user name provided by setting + conn->bits.user_passwd in url.c https://curl.haxx.se/bug/view.cgi?id=440 How? + https://curl.haxx.se/mail/lib-2004-08/0182.html A possible solution is to + either modify this variable to be set or introduce a variable such as + new conn->bits.want_authentication which is set when any of the authentication + options are set. + + +7. FTP + +7.1 FTP without or slow 220 response + + If a connection is made to a FTP server but the server then just never sends + the 220 response or otherwise is dead slow, libcurl will not acknowledge the + connection timeout during that phase but only the "real" timeout - which may + surprise users as it is probably considered to be the connect phase to most + people. Brought up (and is being misunderstood) in: + https://curl.haxx.se/bug/view.cgi?id=856 + +7.2 FTP with CONNECT and slow server + + When doing FTP over a socks proxy or CONNECT through HTTP proxy and the multi + interface is used, libcurl will fail if the (passive) TCP connection for the + data transfer isn't more or less instant as the code does not properly wait + for the connect to be confirmed. See test case 564 for a first shot at a test + case. + +7.3 FTP with NOBODY and FAILONERROR + + It seems sensible to be able to use CURLOPT_NOBODY and CURLOPT_FAILONERROR + with FTP to detect if a file exists or not, but it is not working: + https://curl.haxx.se/mail/lib-2008-07/0295.html + +7.4 FTP with ACCT + + When doing an operation over FTP that requires the ACCT command (but not when + logging in), the operation will fail since libcurl doesn't detect this and + thus fails to issue the correct command: + https://curl.haxx.se/bug/view.cgi?id=635 + +7.5 ASCII FTP + + FTP ASCII transfers do not follow RFC959. They don't convert the data + accordingly (not for sending nor for receiving). RFC 959 section 3.1.1.1 + clearly describes how this should be done: + + The sender converts the data from an internal character representation to + the standard 8-bit NVT-ASCII representation (see the Telnet + specification). The receiver will convert the data from the standard + form to his own internal form. + + Since 7.15.4 at least line endings are converted. + +7.6 FTP with NULs in URL parts + + FTP URLs passed to curl may contain NUL (0x00) in the RFC 1738 , + , and components, encoded as "%00". The problem is that + curl_unescape does not detect this, but instead returns a shortened C string. + From a strict FTP protocol standpoint, NUL is a valid character within RFC + 959 , so the way to handle this correctly in curl would be to use a + data structure other than a plain C string, one that can handle embedded NUL + characters. From a practical standpoint, most FTP servers would not + meaningfully support NUL characters within RFC 959 , anyway (e.g., + Unix pathnames may not contain NUL). + +7.7 FTP and empty path parts in the URL + + libcurl ignores empty path parts in FTP URLs, whereas RFC1738 states that + such parts should be sent to the server as 'CWD ' (without an argument). The + only exception to this rule, is that we knowingly break this if the empty + part is first in the path, as then we use the double slashes to indicate that + the user wants to reach the root dir (this exception SHALL remain even when + this bug is fixed). + +7.8 Premature transfer end but healthy control channel + + When 'multi_done' is called before the transfer has been completed the normal + way, it is considered a "premature" transfer end. In this situation, libcurl + closes the connection assuming it doesn't know the state of the connection so + it can't be reused for subsequent requests. + + With FTP however, this isn't necessarily true but there are a bunch of + situations (listed in the ftp_done code) where it *could* keep the connection + alive even in this situation - but the current code doesn't. Fixing this would + allow libcurl to reuse FTP connections better. + +8. TELNET + +8.1 TELNET and time limtiations don't work + + When using telnet, the time limitation options don't work. + https://curl.haxx.se/bug/view.cgi?id=846 + +8.2 Microsoft telnet server + + There seems to be a problem when connecting to the Microsoft telnet server. + https://curl.haxx.se/bug/view.cgi?id=649 + + +9. SFTP and SCP + +9.1 SFTP doesn't do CURLOPT_POSTQUOTE correct + + When libcurl sends CURLOPT_POSTQUOTE commands when connected to a SFTP server + using the multi interface, the commands are not being sent correctly and + instead the connection is "cancelled" (the operation is considered done) + prematurely. There is a half-baked (busy-looping) patch provided in the bug + report but it cannot be accepted as-is. See + https://curl.haxx.se/bug/view.cgi?id=748 + + +10. SOCKS + +10.1 SOCKS proxy connections are done blocking + + Both SOCKS5 and SOCKS4 proxy connections are done blocking, which is very bad + when used with the multi interface. + +10.2 SOCKS don't support timeouts + + The SOCKS4 connection codes don't properly acknowledge (connect) timeouts. + According to bug #1556528, even the SOCKS5 connect code does not do it right: + https://curl.haxx.se/bug/view.cgi?id=604 + + When connecting to a SOCK proxy, the (connect) timeout is not properly + acknowledged after the actual TCP connect (during the SOCKS "negotiate" + phase). + +10.3 FTPS over SOCKS + + libcurl doesn't support FTPS over a SOCKS proxy. + +10.4 active FTP over a SOCKS + + libcurl doesn't support active FTP over a SOCKS proxy + + +11. Internals + +11.1 Curl leaks .onion hostnames in DNS + + Curl sends DNS requests for hostnames with a .onion TLD. This leaks + information about what the user is attempting to access, and violates this + requirement of RFC7686: https://tools.ietf.org/html/rfc7686 + + Issue: https://github.com/curl/curl/issues/543 + +11.2 error buffer not set if connection to multiple addresses fails + + If you ask libcurl to resolve a hostname like example.com to IPv6 addresses + only. But you only have IPv4 connectivity. libcurl will correctly fail with + CURLE_COULDNT_CONNECT. But the error buffer set by CURLOPT_ERRORBUFFER + remains empty. Issue: https://github.com/curl/curl/issues/544 + +11.3 c-ares deviates from stock resolver on http://1346569778 + + When using the socket resolvers, that URL becomes: + + * Rebuilt URL to: http://1346569778/ + * Trying 80.67.6.50... + + but with c-ares it instead says "Could not resolve: 1346569778 (Domain name + not found)" + + See https://github.com/curl/curl/issues/893 + + +12. LDAP and OpenLDAP + +12.1 OpenLDAP hangs after returning results + + By configuration defaults, openldap automatically chase referrals on + secondary socket descriptors. The OpenLDAP backend is asynchronous and thus + should monitor all socket descriptors involved. Currently, these secondary + descriptors are not monitored, causing openldap library to never receive + data from them. + + As a temporary workaround, disable referrals chasing by configuration. + + The fix is not easy: proper automatic referrals chasing requires a + synchronous bind callback and monitoring an arbitrary number of socket + descriptors for a single easy handle (currently limited to 5). + + Generic LDAP is synchronous: OK. + + See https://github.com/curl/curl/issues/622 and + https://curl.haxx.se/mail/lib-2016-01/0101.html + + +13. TCP/IP + +13.1 --interface for ipv6 binds to unusable IP address + + Since IPv6 provides a lot of addresses with different scope, binding to an + IPv6 address needs to take the proper care so that it doesn't bind to a + locally scoped address as that is bound to fail. + + https://github.com/curl/curl/issues/686 diff --git a/deps-win32/curl-7.54.1/docs/LICENSE-MIXING.md b/deps-win32/curl-7.54.1/docs/LICENSE-MIXING.md new file mode 100644 index 0000000..5376bdb --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/LICENSE-MIXING.md @@ -0,0 +1,127 @@ +License Mixing +============== + +libcurl can be built to use a fair amount of various third party libraries, +libraries that are written and provided by other parties that are distributed +using their own licenses. Even libcurl itself contains code that may cause +problems to some. This document attempts to describe what licenses libcurl and +the other libraries use and what possible dilemmas linking and mixing them all +can lead to for end users. + +I am not a lawyer and this is not legal advice! + +One common dilemma is that [GPL](https://www.gnu.org/licenses/gpl.html) +licensed code is not allowed to be linked with code licensed under the +[Original BSD license](https://spdx.org/licenses/BSD-4-Clause.html) (with the +announcement clause). You may still build your own copies that use them all, +but distributing them as binaries would be to violate the GPL license - unless +you accompany your license with an +[exception](https://www.gnu.org/licenses/gpl-faq.html#GPLIncompatibleLibs). This +particular problem was addressed when the [Modified BSD +license](https://opensource.org/licenses/BSD-3-Clause) was created, which does +not have the announcement clause that collides with GPL. + +## libcurl + + Uses an [MIT style license](https://curl.haxx.se/docs/copyright.html) that is + very liberal. + +## OpenSSL + + (May be used for SSL/TLS support) Uses an Original BSD-style license with an + announcement clause that makes it "incompatible" with GPL. You are not + allowed to ship binaries that link with OpenSSL that includes GPL code + (unless that specific GPL code includes an exception for OpenSSL - a habit + that is growing more and more common). If OpenSSL's licensing is a problem + for you, consider using another TLS library. + +## GnuTLS + + (May be used for SSL/TLS support) Uses the + [LGPL](https://www.gnu.org/licenses/lgpl.html) license. If this is a problem + for you, consider using another TLS library. Also note that GnuTLS itself + depends on and uses other libs (libgcrypt and libgpg-error) and they too are + LGPL- or GPL-licensed. + +## WolfSSL + + (May be used for SSL/TLS support) Uses the GPL license or a proprietary + license. If this is a problem for you, consider using another TLS library. + +## NSS + + (May be used for SSL/TLS support) Is covered by the + [MPL](https://www.mozilla.org/MPL/) license, the GPL license and the LGPL + license. You may choose to license the code under MPL terms, GPL terms, or + LGPL terms. These licenses grant you different permissions and impose + different obligations. You should select the license that best meets your + needs. + +## axTLS + + (May be used for SSL/TLS support) Uses a Modified BSD-style license. + +## mbedTLS + + (May be used for SSL/TLS support) Uses the [Apache 2.0 + license](https://opensource.org/licenses/Apache-2.0) or the GPL license. + You may choose to license the code under Apache 2.0 terms or GPL terms. + These licenses grant you different permissions and impose different + obligations. You should select the license that best meets your needs. + +## BoringSSL + + (May be used for SSL/TLS support) As an OpenSSL fork, it has the same + license as that. + +## libressl + + (May be used for SSL/TLS support) As an OpenSSL fork, it has the same + license as that. + +## c-ares + + (Used for asynchronous name resolves) Uses an MIT license that is very + liberal and imposes no restrictions on any other library or part you may link + with. + +## zlib + + (Used for compressed Transfer-Encoding support) Uses an MIT-style license + that shouldn't collide with any other library. + +## MIT Kerberos + + (May be used for GSS support) MIT licensed, that shouldn't collide with any + other parts. + +## Heimdal + + (May be used for GSS support) Heimdal is Original BSD licensed with the + announcement clause. + +## GNU GSS + + (May be used for GSS support) GNU GSS is GPL licensed. Note that you may not + distribute binary curl packages that uses this if you build curl to also link + and use any Original BSD licensed libraries! + +## libidn + + (Used for IDNA support) Uses the GNU Lesser General Public License [3]. LGPL + is a variation of GPL with slightly less aggressive "copyleft". This license + requires more requirements to be met when distributing binaries, see the + license for details. Also note that if you distribute a binary that includes + this library, you must also include the full LGPL license text. Please + properly point out what parts of the distributed package that the license + addresses. + +## OpenLDAP + + (Used for LDAP support) Uses a Modified BSD-style license. Since libcurl uses + OpenLDAP as a shared library only, I have not heard of anyone that ships + OpenLDAP linked with libcurl in an app. + +## libssh2 + + (Used for scp and sftp support) libssh2 uses a Modified BSD-style license. diff --git a/deps-win32/curl-7.54.1/docs/MAIL-ETIQUETTE b/deps-win32/curl-7.54.1/docs/MAIL-ETIQUETTE new file mode 100644 index 0000000..897fc9f --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/MAIL-ETIQUETTE @@ -0,0 +1,264 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +MAIL ETIQUETTE + + 1. About the lists + 1.1 Mailing Lists + 1.2 Netiquette + 1.3 Do Not Mail a Single Individual + 1.4 Subscription Required + 1.5 Moderation of new posters + 1.6 Handling trolls and spam + 1.7 How to unsubscribe + 1.8 I posted, now what? + + 2. Sending mail + 2.1 Reply or New Mail + 2.2 Reply to the List + 2.3 Use a Sensible Subject + 2.4 Do Not Top-Post + 2.5 HTML is not for mails + 2.6 Quoting + 2.7 Digest + 2.8 Please Tell Us How You Solved The Problem! + +============================================================================== + +1. About the lists + + 1.1 Mailing Lists + + The mailing lists we have are all listed and described at + https://curl.haxx.se/mail/ + + Each mailing list is targeted to a specific set of users and subjects, + please use the one or the ones that suit you the most. + + Each mailing list has hundreds up to thousands of readers, meaning that + each mail sent will be received and read by a very large number of people. + People from various cultures, regions, religions and continents. + + 1.2 Netiquette + + Netiquette is a common term for how to behave on the internet. Of course, in + each particular group and subculture there will be differences in what is + acceptable and what is considered good manners. + + This document outlines what we in the curl project consider to be good + etiquette, and primarily this focus on how to behave on and how to use our + mailing lists. + + 1.3 Do Not Mail a Single Individual + + Many people send one question to one person. One person gets many mails, and + there is only one person who can give you a reply. The question may be + something that other people would also like to ask. These other people have + no way to read the reply, but to ask the one person the question. The one + person consequently gets overloaded with mail. + + If you really want to contact an individual and perhaps pay for his or her + services, by all means go ahead, but if it's just another curl question, + take it to a suitable list instead. + + 1.4 Subscription Required + + All curl mailing lists require that you are subscribed to allow a mail to go + through to all the subscribers. + + If you post without being subscribed (or from a different mail address than + the one you are subscribed with), your mail will simply be silently + discarded. You have to subscribe first, then post. + + The reason for this unfortunate and strict subscription policy is of course + to stop spam from pestering the lists. + + 1.5 Moderation of new posters + + Several of the curl mailing lists automatically make all posts from new + subscribers be moderated. This means that after you've subscribed and + sent your first mail to a list, that mail will not be let through to the + list until a mailing list administrator has verified that it is OK and + permits it to get posted. + + Once a first post has been made that proves the sender is actually talking + about curl-related subjects, the moderation "flag" will be switched off and + future posts will go through without being moderated. + + The reason for this moderation policy is that we do suffer from spammers who + actually subscribe and send spam to our lists. + + 1.6 Handling trolls and spam + + Despite our good intentions and hard work to keep spam off the lists and to + maintain a friendly and positive atmosphere, there will be times when spam + and or trolls get through. + + Troll - "someone who posts inflammatory, extraneous, or off-topic messages + in an online community" + + Spam - "use of electronic messaging systems to send unsolicited bulk + messages" + + No matter what, we NEVER EVER respond to trolls or spammers on the list. If + you believe the list admin should do something in particular, contact him/her + off-list. The subject will be taken care of as much as possible to prevent + repeated offenses, but responding on the list to such messages never leads to + anything good and only puts the light even more on the offender: which was + the entire purpose of it getting sent to the list in the first place. + + Don't feed the trolls! + + 1.7 How to unsubscribe + + You can unsubscribe the same way you subscribed in the first place. You go + to the page for the particular mailing list you're subscribed to and you enter + your email address and password and press the unsubscribe button. + + Also, the instructions to unsubscribe are included in the headers of every + mail that is sent out to all curl related mailing lists and there's a footer + in each mail that links to the "admin" page on which you can unsubscribe and + change other options. + + You NEVER EVER email the mailing list requesting someone else to take you off + the list. + + 1.8 I posted, now what? + + If you aren't subscribed with the exact same email address that you used to + send the email, your post will just be silently discarded. + + If you posted for the first time to the mailing list, you first need to wait + for an administrator to allow your email to go through (moderated). This normally + happens very quickly but in case we're asleep, you may have to wait a few + hours. + + Once your email goes through it is sent out to several hundred or even + thousands of recipients. Your email may cover an area that not that many people + know about or are interested in. Or possibly the person who knows about it + is on vacation or under a very heavy work load right now. You may have to wait + for a response and you should not expect to get a response at all, but + hopefully you get an answer within a couple of days. + + You do yourself and all of us a service when you include as many details as + possible already in your first email. Mention your operating system and + environment. Tell us which curl version you're using and tell us what you + did, what happened and what you expected would happen. Preferably, show us + what you did with details enough to allow others to help point out the problem + or repeat the same steps in their locations. + + Failing to include details will only delay responses and make people respond + and ask for more details and you will have to send a follow-up email that + includes them. + + Expect the responses to primarily help YOU debug the issue, or ask YOU + questions that can lead you or others towards a solution or explanation to + whatever you experience. + + If you are a repeat offender to the guidelines outlined in this document, + chances are that people will ignore you at will and your chances to get + responses in the future will greatly diminish. + + +2. Sending mail + + 2.1 Reply or New Mail + + Please do not reply to an existing message as a short-cut to post a message + to the lists. + + Many mail programs and web archivers use information within mails to keep + them together as "threads", as collections of posts that discuss a certain + subject. If you don't intend to reply on the same or similar subject, don't + just hit reply on an existing mail and change subject, create a new mail. + + 2.2 Reply to the List + + When replying to a message from the list, make sure that you do "group + reply" or "reply to all", and not just reply to the author of the single + mail you reply to. + + We're actively discouraging replying back to the single person by setting + the Reply-To: field in outgoing mails back to the mailing list address, + making it harder for people to mail the author directly, if only by mistake. + + 2.3 Use a Sensible Subject + + Please use a subject of the mail that makes sense and that is related to the + contents of your mail. It makes it a lot easier to find your mail afterwards + and it makes it easier to track mail threads and topics. + + 2.4 Do Not Top-Post + + If you reply to a message, don't use top-posting. Top-posting is when you + write the new text at the top of a mail and you insert the previous quoted + mail conversation below. It forces users to read the mail in a backwards + order to properly understand it. + + This is why top posting is so bad (in top posting order): + + A: Because it messes up the order in which people normally read text. + Q: Why is top-posting such a bad thing? + A: Top-posting. + Q: What is the most annoying thing in e-mail? + + Apart from the screwed up read order (especially when mixed together in a + thread when someone responds using the mandated bottom-posting style), it + also makes it impossible to quote only parts of the original mail. + + When you reply to a mail. You let the mail client insert the previous mail + quoted. Then you put the cursor on the first line of the mail and you move + down through the mail, deleting all parts of the quotes that don't add + context for your comments. When you want to add a comment you do so, inline, + right after the quotes that relate to your comment. Then you continue + downwards again. + + When most of the quotes have been removed and you've added your own words, + you're done! + + 2.5 HTML is not for mails + + Please switch off those HTML encoded messages. You can mail all those funny + mails to your friends. We speak plain text mails. + + 2.6 Quoting + + Quote as little as possible. Just enough to provide the context you cannot + leave out. A lengthy description can be found here: + + https://www.netmeister.org/news/learn2quote.html + + 2.7 Digest + + We allow subscribers to subscribe to the "digest" version of the mailing + lists. A digest is a collection of mails lumped together in one single mail. + + Should you decide to reply to a mail sent out as a digest, there are two + things you MUST consider if you really really cannot subscribe normally + instead: + + Cut off all mails and chatter that is not related to the mail you want to + reply to. + + Change the subject name to something sensible and related to the subject, + preferably even the actual subject of the single mail you wanted to reply to + + 2.8 Please Tell Us How You Solved The Problem! + + Many people mail questions to the list, people spend some of their time and + make an effort in providing good answers to these questions. + + If you are the one who asks, please consider responding once more in case + one of the hints was what solved your problems. The guys who write answers + feel good to know that they provided a good answer and that you fixed the + problem. Far too often, the person who asked the question is never heard from + again, and we never get to know if he/she is gone because the problem was + solved or perhaps because the problem was unsolvable! + + Getting the solution posted also helps other users that experience the same + problem(s). They get to see (possibly in the web archives) that the + suggested fixes actually has helped at least one person. + diff --git a/deps-win32/curl-7.54.1/docs/MANUAL b/deps-win32/curl-7.54.1/docs/MANUAL new file mode 100644 index 0000000..0e3db0f --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/MANUAL @@ -0,0 +1,1059 @@ +LATEST VERSION + + You always find news about what's going on as well as the latest versions + from the curl web pages, located at: + + https://curl.haxx.se + +SIMPLE USAGE + + Get the main page from Netscape's web-server: + + curl http://www.netscape.com/ + + Get the README file the user's home directory at funet's ftp-server: + + curl ftp://ftp.funet.fi/README + + Get a web page from a server using port 8000: + + curl http://www.weirdserver.com:8000/ + + Get a directory listing of an FTP site: + + curl ftp://cool.haxx.se/ + + Get the definition of curl from a dictionary: + + curl dict://dict.org/m:curl + + Fetch two documents at once: + + curl ftp://cool.haxx.se/ http://www.weirdserver.com:8000/ + + Get a file off an FTPS server: + + curl ftps://files.are.secure.com/secrets.txt + + or use the more appropriate FTPS way to get the same file: + + curl --ftp-ssl ftp://files.are.secure.com/secrets.txt + + Get a file from an SSH server using SFTP: + + curl -u username sftp://example.com/etc/issue + + Get a file from an SSH server using SCP using a private key + (not password-protected) to authenticate: + + curl -u username: --key ~/.ssh/id_rsa \ + scp://example.com/~/file.txt + + Get a file from an SSH server using SCP using a private key + (password-protected) to authenticate: + + curl -u username: --key ~/.ssh/id_rsa --pass private_key_password \ + scp://example.com/~/file.txt + + Get the main page from an IPv6 web server: + + curl "http://[2001:1890:1112:1::20]/" + + Get a file from an SMB server: + + curl -u "domain\username:passwd" smb://server.example.com/share/file.txt + +DOWNLOAD TO A FILE + + Get a web page and store in a local file with a specific name: + + curl -o thatpage.html http://www.netscape.com/ + + Get a web page and store in a local file, make the local file get the name + of the remote document (if no file name part is specified in the URL, this + will fail): + + curl -O http://www.netscape.com/index.html + + Fetch two files and store them with their remote names: + + curl -O www.haxx.se/index.html -O curl.haxx.se/download.html + +USING PASSWORDS + + FTP + + To ftp files using name+passwd, include them in the URL like: + + curl ftp://name:passwd@machine.domain:port/full/path/to/file + + or specify them with the -u flag like + + curl -u name:passwd ftp://machine.domain:port/full/path/to/file + + FTPS + + It is just like for FTP, but you may also want to specify and use + SSL-specific options for certificates etc. + + Note that using FTPS:// as prefix is the "implicit" way as described in the + standards while the recommended "explicit" way is done by using FTP:// and + the --ftp-ssl option. + + SFTP / SCP + + This is similar to FTP, but you can use the --key option to specify a + private key to use instead of a password. Note that the private key may + itself be protected by a password that is unrelated to the login password + of the remote system; this password is specified using the --pass option. + Typically, curl will automatically extract the public key from the private + key file, but in cases where curl does not have the proper library support, + a matching public key file must be specified using the --pubkey option. + + HTTP + + Curl also supports user and password in HTTP URLs, thus you can pick a file + like: + + curl http://name:passwd@machine.domain/full/path/to/file + + or specify user and password separately like in + + curl -u name:passwd http://machine.domain/full/path/to/file + + HTTP offers many different methods of authentication and curl supports + several: Basic, Digest, NTLM and Negotiate (SPNEGO). Without telling which + method to use, curl defaults to Basic. You can also ask curl to pick the + most secure ones out of the ones that the server accepts for the given URL, + by using --anyauth. + + NOTE! According to the URL specification, HTTP URLs can not contain a user + and password, so that style will not work when using curl via a proxy, even + though curl allows it at other times. When using a proxy, you _must_ use + the -u style for user and password. + + HTTPS + + Probably most commonly used with private certificates, as explained below. + +PROXY + + curl supports both HTTP and SOCKS proxy servers, with optional authentication. + It does not have special support for FTP proxy servers since there are no + standards for those, but it can still be made to work with many of them. You + can also use both HTTP and SOCKS proxies to transfer files to and from FTP + servers. + + Get an ftp file using an HTTP proxy named my-proxy that uses port 888: + + curl -x my-proxy:888 ftp://ftp.leachsite.com/README + + Get a file from an HTTP server that requires user and password, using the + same proxy as above: + + curl -u user:passwd -x my-proxy:888 http://www.get.this/ + + Some proxies require special authentication. Specify by using -U as above: + + curl -U user:passwd -x my-proxy:888 http://www.get.this/ + + A comma-separated list of hosts and domains which do not use the proxy can + be specified as: + + curl --noproxy localhost,get.this -x my-proxy:888 http://www.get.this/ + + If the proxy is specified with --proxy1.0 instead of --proxy or -x, then + curl will use HTTP/1.0 instead of HTTP/1.1 for any CONNECT attempts. + + curl also supports SOCKS4 and SOCKS5 proxies with --socks4 and --socks5. + + See also the environment variables Curl supports that offer further proxy + control. + + Most FTP proxy servers are set up to appear as a normal FTP server from the + client's perspective, with special commands to select the remote FTP server. + curl supports the -u, -Q and --ftp-account options that can be used to + set up transfers through many FTP proxies. For example, a file can be + uploaded to a remote FTP server using a Blue Coat FTP proxy with the + options: + + curl -u "Remote-FTP-Username@remote.ftp.server Proxy-Username:Remote-Pass" \ + --ftp-account Proxy-Password --upload-file local-file \ + ftp://my-ftp.proxy.server:21/remote/upload/path/ + + See the manual for your FTP proxy to determine the form it expects to set up + transfers, and curl's -v option to see exactly what curl is sending. + +RANGES + + HTTP 1.1 introduced byte-ranges. Using this, a client can request + to get only one or more subparts of a specified document. Curl supports + this with the -r flag. + + Get the first 100 bytes of a document: + + curl -r 0-99 http://www.get.this/ + + Get the last 500 bytes of a document: + + curl -r -500 http://www.get.this/ + + Curl also supports simple ranges for FTP files as well. Then you can only + specify start and stop position. + + Get the first 100 bytes of a document using FTP: + + curl -r 0-99 ftp://www.get.this/README + +UPLOADING + + FTP / FTPS / SFTP / SCP + + Upload all data on stdin to a specified server: + + curl -T - ftp://ftp.upload.com/myfile + + Upload data from a specified file, login with user and password: + + curl -T uploadfile -u user:passwd ftp://ftp.upload.com/myfile + + Upload a local file to the remote site, and use the local file name at the remote + site too: + + curl -T uploadfile -u user:passwd ftp://ftp.upload.com/ + + Upload a local file to get appended to the remote file: + + curl -T localfile -a ftp://ftp.upload.com/remotefile + + Curl also supports ftp upload through a proxy, but only if the proxy is + configured to allow that kind of tunneling. If it does, you can run curl in + a fashion similar to: + + curl --proxytunnel -x proxy:port -T localfile ftp.upload.com + +SMB / SMBS + + curl -T file.txt -u "domain\username:passwd" + smb://server.example.com/share/ + + HTTP + + Upload all data on stdin to a specified HTTP site: + + curl -T - http://www.upload.com/myfile + + Note that the HTTP server must have been configured to accept PUT before + this can be done successfully. + + For other ways to do HTTP data upload, see the POST section below. + +VERBOSE / DEBUG + + If curl fails where it isn't supposed to, if the servers don't let you in, + if you can't understand the responses: use the -v flag to get verbose + fetching. Curl will output lots of info and what it sends and receives in + order to let the user see all client-server interaction (but it won't show + you the actual data). + + curl -v ftp://ftp.upload.com/ + + To get even more details and information on what curl does, try using the + --trace or --trace-ascii options with a given file name to log to, like + this: + + curl --trace trace.txt www.haxx.se + + +DETAILED INFORMATION + + Different protocols provide different ways of getting detailed information + about specific files/documents. To get curl to show detailed information + about a single file, you should use -I/--head option. It displays all + available info on a single file for HTTP and FTP. The HTTP information is a + lot more extensive. + + For HTTP, you can get the header information (the same as -I would show) + shown before the data by using -i/--include. Curl understands the + -D/--dump-header option when getting files from both FTP and HTTP, and it + will then store the headers in the specified file. + + Store the HTTP headers in a separate file (headers.txt in the example): + + curl --dump-header headers.txt curl.haxx.se + + Note that headers stored in a separate file can be very useful at a later + time if you want curl to use cookies sent by the server. More about that in + the cookies section. + +POST (HTTP) + + It's easy to post data using curl. This is done using the -d + option. The post data must be urlencoded. + + Post a simple "name" and "phone" guestbook. + + curl -d "name=Rafael%20Sagula&phone=3320780" \ + http://www.where.com/guest.cgi + + How to post a form with curl, lesson #1: + + Dig out all the tags in the form that you want to fill in. (There's + a perl program called formfind.pl on the curl site that helps with this). + + If there's a "normal" post, you use -d to post. -d takes a full "post + string", which is in the format + + =&=&... + + The 'variable' names are the names set with "name=" in the tags, and + the data is the contents you want to fill in for the inputs. The data *must* + be properly URL encoded. That means you replace space with + and that you + replace weird letters with %XX where XX is the hexadecimal representation of + the letter's ASCII code. + + Example: + + (page located at http://www.formpost.com/getthis/ + +
+ + + + +
+ + We want to enter user 'foobar' with password '12345'. + + To post to this, you enter a curl command line like: + + curl -d "user=foobar&pass=12345&id=blablabla&ding=submit" (continues) + http://www.formpost.com/getthis/post.cgi + + + While -d uses the application/x-www-form-urlencoded mime-type, generally + understood by CGI's and similar, curl also supports the more capable + multipart/form-data type. This latter type supports things like file upload. + + -F accepts parameters like -F "name=contents". If you want the contents to + be read from a file, use <@filename> as contents. When specifying a file, + you can also specify the file content type by appending ';type=' + to the file name. You can also post the contents of several files in one + field. For example, the field name 'coolfiles' is used to send three files, + with different content types using the following syntax: + + curl -F "coolfiles=@fil1.gif;type=image/gif,fil2.txt,fil3.html" \ + http://www.post.com/postit.cgi + + If the content-type is not specified, curl will try to guess from the file + extension (it only knows a few), or use the previously specified type (from + an earlier file if several files are specified in a list) or else it will + use the default type 'application/octet-stream'. + + Emulate a fill-in form with -F. Let's say you fill in three fields in a + form. One field is a file name which to post, one field is your name and one + field is a file description. We want to post the file we have written named + "cooltext.txt". To let curl do the posting of this data instead of your + favourite browser, you have to read the HTML source of the form page and + find the names of the input fields. In our example, the input field names + are 'file', 'yourname' and 'filedescription'. + + curl -F "file=@cooltext.txt" -F "yourname=Daniel" \ + -F "filedescription=Cool text file with cool text inside" \ + http://www.post.com/postit.cgi + + To send two files in one post you can do it in two ways: + + 1. Send multiple files in a single "field" with a single field name: + + curl -F "pictures=@dog.gif,cat.gif" + + 2. Send two fields with two field names: + + curl -F "docpicture=@dog.gif" -F "catpicture=@cat.gif" + + To send a field value literally without interpreting a leading '@' + or '<', or an embedded ';type=', use --form-string instead of + -F. This is recommended when the value is obtained from a user or + some other unpredictable source. Under these circumstances, using + -F instead of --form-string would allow a user to trick curl into + uploading a file. + +REFERRER + + An HTTP request has the option to include information about which address + referred it to the actual page. Curl allows you to specify the + referrer to be used on the command line. It is especially useful to + fool or trick stupid servers or CGI scripts that rely on that information + being available or contain certain data. + + curl -e www.coolsite.com http://www.showme.com/ + + NOTE: The Referer: [sic] field is defined in the HTTP spec to be a full URL. + +USER AGENT + + An HTTP request has the option to include information about the browser + that generated the request. Curl allows it to be specified on the command + line. It is especially useful to fool or trick stupid servers or CGI + scripts that only accept certain browsers. + + Example: + + curl -A 'Mozilla/3.0 (Win95; I)' http://www.nationsbank.com/ + + Other common strings: + 'Mozilla/3.0 (Win95; I)' Netscape Version 3 for Windows 95 + 'Mozilla/3.04 (Win95; U)' Netscape Version 3 for Windows 95 + 'Mozilla/2.02 (OS/2; U)' Netscape Version 2 for OS/2 + 'Mozilla/4.04 [en] (X11; U; AIX 4.2; Nav)' NS for AIX + 'Mozilla/4.05 [en] (X11; U; Linux 2.0.32 i586)' NS for Linux + + Note that Internet Explorer tries hard to be compatible in every way: + 'Mozilla/4.0 (compatible; MSIE 4.01; Windows 95)' MSIE for W95 + + Mozilla is not the only possible User-Agent name: + 'Konqueror/1.0' KDE File Manager desktop client + 'Lynx/2.7.1 libwww-FM/2.14' Lynx command line browser + +COOKIES + + Cookies are generally used by web servers to keep state information at the + client's side. The server sets cookies by sending a response line in the + headers that looks like 'Set-Cookie: ' where the data part then + typically contains a set of NAME=VALUE pairs (separated by semicolons ';' + like "NAME1=VALUE1; NAME2=VALUE2;"). The server can also specify for what + path the "cookie" should be used for (by specifying "path=value"), when the + cookie should expire ("expire=DATE"), for what domain to use it + ("domain=NAME") and if it should be used on secure connections only + ("secure"). + + If you've received a page from a server that contains a header like: + Set-Cookie: sessionid=boo123; path="/foo"; + + it means the server wants that first pair passed on when we get anything in + a path beginning with "/foo". + + Example, get a page that wants my name passed in a cookie: + + curl -b "name=Daniel" www.sillypage.com + + Curl also has the ability to use previously received cookies in following + sessions. If you get cookies from a server and store them in a file in a + manner similar to: + + curl --dump-header headers www.example.com + + ... you can then in a second connect to that (or another) site, use the + cookies from the 'headers' file like: + + curl -b headers www.example.com + + While saving headers to a file is a working way to store cookies, it is + however error-prone and not the preferred way to do this. Instead, make curl + save the incoming cookies using the well-known netscape cookie format like + this: + + curl -c cookies.txt www.example.com + + Note that by specifying -b you enable the "cookie awareness" and with -L + you can make curl follow a location: (which often is used in combination + with cookies). So that if a site sends cookies and a location, you can + use a non-existing file to trigger the cookie awareness like: + + curl -L -b empty.txt www.example.com + + The file to read cookies from must be formatted using plain HTTP headers OR + as netscape's cookie file. Curl will determine what kind it is based on the + file contents. In the above command, curl will parse the header and store + the cookies received from www.example.com. curl will send to the server the + stored cookies which match the request as it follows the location. The + file "empty.txt" may be a nonexistent file. + + To read and write cookies from a netscape cookie file, you can set both -b + and -c to use the same file: + + curl -b cookies.txt -c cookies.txt www.example.com + +PROGRESS METER + + The progress meter exists to show a user that something actually is + happening. The different fields in the output have the following meaning: + + % Total % Received % Xferd Average Speed Time Curr. + Dload Upload Total Current Left Speed + 0 151M 0 38608 0 0 9406 0 4:41:43 0:00:04 4:41:39 9287 + + From left-to-right: + % - percentage completed of the whole transfer + Total - total size of the whole expected transfer + % - percentage completed of the download + Received - currently downloaded amount of bytes + % - percentage completed of the upload + Xferd - currently uploaded amount of bytes + Average Speed + Dload - the average transfer speed of the download + Average Speed + Upload - the average transfer speed of the upload + Time Total - expected time to complete the operation + Time Current - time passed since the invoke + Time Left - expected time left to completion + Curr.Speed - the average transfer speed the last 5 seconds (the first + 5 seconds of a transfer is based on less time of course.) + + The -# option will display a totally different progress bar that doesn't + need much explanation! + +SPEED LIMIT + + Curl allows the user to set the transfer speed conditions that must be met + to let the transfer keep going. By using the switch -y and -Y you + can make curl abort transfers if the transfer speed is below the specified + lowest limit for a specified time. + + To have curl abort the download if the speed is slower than 3000 bytes per + second for 1 minute, run: + + curl -Y 3000 -y 60 www.far-away-site.com + + This can very well be used in combination with the overall time limit, so + that the above operation must be completed in whole within 30 minutes: + + curl -m 1800 -Y 3000 -y 60 www.far-away-site.com + + Forcing curl not to transfer data faster than a given rate is also possible, + which might be useful if you're using a limited bandwidth connection and you + don't want your transfer to use all of it (sometimes referred to as + "bandwidth throttle"). + + Make curl transfer data no faster than 10 kilobytes per second: + + curl --limit-rate 10K www.far-away-site.com + + or + + curl --limit-rate 10240 www.far-away-site.com + + Or prevent curl from uploading data faster than 1 megabyte per second: + + curl -T upload --limit-rate 1M ftp://uploadshereplease.com + + When using the --limit-rate option, the transfer rate is regulated on a + per-second basis, which will cause the total transfer speed to become lower + than the given number. Sometimes of course substantially lower, if your + transfer stalls during periods. + +CONFIG FILE + + Curl automatically tries to read the .curlrc file (or _curlrc file on win32 + systems) from the user's home dir on startup. + + The config file could be made up with normal command line switches, but you + can also specify the long options without the dashes to make it more + readable. You can separate the options and the parameter with spaces, or + with = or :. Comments can be used within the file. If the first letter on a + line is a '#'-symbol the rest of the line is treated as a comment. + + If you want the parameter to contain spaces, you must enclose the entire + parameter within double quotes ("). Within those quotes, you specify a + quote as \". + + NOTE: You must specify options and their arguments on the same line. + + Example, set default time out and proxy in a config file: + + # We want a 30 minute timeout: + -m 1800 + # ... and we use a proxy for all accesses: + proxy = proxy.our.domain.com:8080 + + White spaces ARE significant at the end of lines, but all white spaces + leading up to the first characters of each line are ignored. + + Prevent curl from reading the default file by using -q as the first command + line parameter, like: + + curl -q www.thatsite.com + + Force curl to get and display a local help page in case it is invoked + without URL by making a config file similar to: + + # default url to get + url = "http://help.with.curl.com/curlhelp.html" + + You can specify another config file to be read by using the -K/--config + flag. If you set config file name to "-" it'll read the config from stdin, + which can be handy if you want to hide options from being visible in process + tables etc: + + echo "user = user:passwd" | curl -K - http://that.secret.site.com + +EXTRA HEADERS + + When using curl in your own very special programs, you may end up needing + to pass on your own custom headers when getting a web page. You can do + this by using the -H flag. + + Example, send the header "X-you-and-me: yes" to the server when getting a + page: + + curl -H "X-you-and-me: yes" www.love.com + + This can also be useful in case you want curl to send a different text in a + header than it normally does. The -H header you specify then replaces the + header curl would normally send. If you replace an internal header with an + empty one, you prevent that header from being sent. To prevent the Host: + header from being used: + + curl -H "Host:" www.server.com + +FTP and PATH NAMES + + Do note that when getting files with the ftp:// URL, the given path is + relative the directory you enter. To get the file 'README' from your home + directory at your ftp site, do: + + curl ftp://user:passwd@my.site.com/README + + But if you want the README file from the root directory of that very same + site, you need to specify the absolute file name: + + curl ftp://user:passwd@my.site.com//README + + (I.e with an extra slash in front of the file name.) + +SFTP and SCP and PATH NAMES + + With sftp: and scp: URLs, the path name given is the absolute name on the + server. To access a file relative to the remote user's home directory, + prefix the file with /~/ , such as: + + curl -u $USER sftp://home.example.com/~/.bashrc + +FTP and firewalls + + The FTP protocol requires one of the involved parties to open a second + connection as soon as data is about to get transferred. There are two ways to + do this. + + The default way for curl is to issue the PASV command which causes the + server to open another port and await another connection performed by the + client. This is good if the client is behind a firewall that doesn't allow + incoming connections. + + curl ftp.download.com + + If the server, for example, is behind a firewall that doesn't allow connections + on ports other than 21 (or if it just doesn't support the PASV command), the + other way to do it is to use the PORT command and instruct the server to + connect to the client on the given IP number and port (as parameters to the + PORT command). + + The -P flag to curl supports a few different options. Your machine may have + several IP-addresses and/or network interfaces and curl allows you to select + which of them to use. Default address can also be used: + + curl -P - ftp.download.com + + Download with PORT but use the IP address of our 'le0' interface (this does + not work on windows): + + curl -P le0 ftp.download.com + + Download with PORT but use 192.168.0.10 as our IP address to use: + + curl -P 192.168.0.10 ftp.download.com + +NETWORK INTERFACE + + Get a web page from a server using a specified port for the interface: + + curl --interface eth0:1 http://www.netscape.com/ + + or + + curl --interface 192.168.1.10 http://www.netscape.com/ + +HTTPS + + Secure HTTP requires SSL libraries to be installed and used when curl is + built. If that is done, curl is capable of retrieving and posting documents + using the HTTPS protocol. + + Example: + + curl https://www.secure-site.com + + Curl is also capable of using your personal certificates to get/post files + from sites that require valid certificates. The only drawback is that the + certificate needs to be in PEM-format. PEM is a standard and open format to + store certificates with, but it is not used by the most commonly used + browsers (Netscape and MSIE both use the so called PKCS#12 format). If you + want curl to use the certificates you use with your (favourite) browser, you + may need to download/compile a converter that can convert your browser's + formatted certificates to PEM formatted ones. This kind of converter is + included in recent versions of OpenSSL, and for older versions Dr Stephen + N. Henson has written a patch for SSLeay that adds this functionality. You + can get his patch (that requires an SSLeay installation) from his site at: + http://www.drh-consultancy.demon.co.uk/ + + Example on how to automatically retrieve a document using a certificate with + a personal password: + + curl -E /path/to/cert.pem:password https://secure.site.com/ + + If you neglect to specify the password on the command line, you will be + prompted for the correct password before any data can be received. + + Many older SSL-servers have problems with SSLv3 or TLS, which newer versions + of OpenSSL etc use, therefore it is sometimes useful to specify what + SSL-version curl should use. Use -3, -2 or -1 to specify that exact SSL + version to use (for SSLv3, SSLv2 or TLSv1 respectively): + + curl -2 https://secure.site.com/ + + Otherwise, curl will first attempt to use v3 and then v2. + + To use OpenSSL to convert your favourite browser's certificate into a PEM + formatted one that curl can use, do something like this: + + In Netscape, you start with hitting the 'Security' menu button. + + Select 'certificates->yours' and then pick a certificate in the list + + Press the 'Export' button + + enter your PIN code for the certs + + select a proper place to save it + + Run the 'openssl' application to convert the certificate. If you cd to the + openssl installation, you can do it like: + + # ./apps/openssl pkcs12 -in [file you saved] -clcerts -out [PEMfile] + + In Firefox, select Options, then Advanced, then the Encryption tab, + View Certificates. This opens the Certificate Manager, where you can + Export. Be sure to select PEM for the Save as type. + + In Internet Explorer, select Internet Options, then the Content tab, then + Certificates. Then you can Export, and depending on the format you may + need to convert to PEM. + + In Chrome, select Settings, then Show Advanced Settings. Under HTTPS/SSL + select Manage Certificates. + +RESUMING FILE TRANSFERS + + To continue a file transfer where it was previously aborted, curl supports + resume on HTTP(S) downloads as well as FTP uploads and downloads. + + Continue downloading a document: + + curl -C - -o file ftp://ftp.server.com/path/file + + Continue uploading a document(*1): + + curl -C - -T file ftp://ftp.server.com/path/file + + Continue downloading a document from a web server(*2): + + curl -C - -o file http://www.server.com/ + + (*1) = This requires that the FTP server supports the non-standard command + SIZE. If it doesn't, curl will say so. + + (*2) = This requires that the web server supports at least HTTP/1.1. If it + doesn't, curl will say so. + +TIME CONDITIONS + + HTTP allows a client to specify a time condition for the document it + requests. It is If-Modified-Since or If-Unmodified-Since. Curl allows you to + specify them with the -z/--time-cond flag. + + For example, you can easily make a download that only gets performed if the + remote file is newer than a local copy. It would be made like: + + curl -z local.html http://remote.server.com/remote.html + + Or you can download a file only if the local file is newer than the remote + one. Do this by prepending the date string with a '-', as in: + + curl -z -local.html http://remote.server.com/remote.html + + You can specify a "free text" date as condition. Tell curl to only download + the file if it was updated since January 12, 2012: + + curl -z "Jan 12 2012" http://remote.server.com/remote.html + + Curl will then accept a wide range of date formats. You always make the date + check the other way around by prepending it with a dash '-'. + +DICT + + For fun try + + curl dict://dict.org/m:curl + curl dict://dict.org/d:heisenbug:jargon + curl dict://dict.org/d:daniel:web1913 + + Aliases for 'm' are 'match' and 'find', and aliases for 'd' are 'define' + and 'lookup'. For example, + + curl dict://dict.org/find:curl + + Commands that break the URL description of the RFC (but not the DICT + protocol) are + + curl dict://dict.org/show:db + curl dict://dict.org/show:strat + + Authentication is still missing (but this is not required by the RFC) + +LDAP + + If you have installed the OpenLDAP library, curl can take advantage of it + and offer ldap:// support. + On Windows, curl will use WinLDAP from Platform SDK by default. + + Default protocol version used by curl is LDAPv3. LDAPv2 will be used as + fallback mechanism in case if LDAPv3 will fail to connect. + + LDAP is a complex thing and writing an LDAP query is not an easy task. I do + advise you to dig up the syntax description for that elsewhere. One such + place might be: + + RFC 2255, "The LDAP URL Format" https://curl.haxx.se/rfc/rfc2255.txt + + To show you an example, this is how I can get all people from my local LDAP + server that has a certain sub-domain in their email address: + + curl -B "ldap://ldap.frontec.se/o=frontec??sub?mail=*sth.frontec.se" + + If I want the same info in HTML format, I can get it by not using the -B + (enforce ASCII) flag. + + You also can use authentication when accessing LDAP catalog: + + curl -u user:passwd "ldap://ldap.frontec.se/o=frontec??sub?mail=*" + curl "ldap://user:passwd@ldap.frontec.se/o=frontec??sub?mail=*" + + By default, if user and password provided, OpenLDAP/WinLDAP will use basic + authentication. On Windows you can control this behavior by providing + one of --basic, --ntlm or --digest option in curl command line + + curl --ntlm "ldap://user:passwd@ldap.frontec.se/o=frontec??sub?mail=*" + + On Windows, if no user/password specified, auto-negotiation mechanism will + be used with current logon credentials (SSPI/SPNEGO). + +ENVIRONMENT VARIABLES + + Curl reads and understands the following environment variables: + + http_proxy, HTTPS_PROXY, FTP_PROXY + + They should be set for protocol-specific proxies. General proxy should be + set with + + ALL_PROXY + + A comma-separated list of host names that shouldn't go through any proxy is + set in (only an asterisk, '*' matches all hosts) + + NO_PROXY + + If the host name matches one of these strings, or the host is within the + domain of one of these strings, transactions with that node will not be + proxied. When a domain is used, it needs to start with a period. A user can + specify that both www.example.com and foo.example.com should not uses a + proxy by setting NO_PROXY to ".example.com". By including the full name you + can exclude specific host names, so to make www.example.com not use a proxy + but still have foo.example.com do it, set NO_PROXY to "www.example.com" + + The usage of the -x/--proxy flag overrides the environment variables. + +NETRC + + Unix introduced the .netrc concept a long time ago. It is a way for a user + to specify name and password for commonly visited FTP sites in a file so + that you don't have to type them in each time you visit those sites. You + realize this is a big security risk if someone else gets hold of your + passwords, so therefore most unix programs won't read this file unless it is + only readable by yourself (curl doesn't care though). + + Curl supports .netrc files if told to (using the -n/--netrc and + --netrc-optional options). This is not restricted to just FTP, + so curl can use it for all protocols where authentication is used. + + A very simple .netrc file could look something like: + + machine curl.haxx.se login iamdaniel password mysecret + +CUSTOM OUTPUT + + To better allow script programmers to get to know about the progress of + curl, the -w/--write-out option was introduced. Using this, you can specify + what information from the previous transfer you want to extract. + + To display the amount of bytes downloaded together with some text and an + ending newline: + + curl -w 'We downloaded %{size_download} bytes\n' www.download.com + +KERBEROS FTP TRANSFER + + Curl supports kerberos4 and kerberos5/GSSAPI for FTP transfers. You need + the kerberos package installed and used at curl build time for it to be + available. + + First, get the krb-ticket the normal way, like with the kinit/kauth tool. + Then use curl in way similar to: + + curl --krb private ftp://krb4site.com -u username:fakepwd + + There's no use for a password on the -u switch, but a blank one will make + curl ask for one and you already entered the real password to kinit/kauth. + +TELNET + + The curl telnet support is basic and very easy to use. Curl passes all data + passed to it on stdin to the remote server. Connect to a remote telnet + server using a command line similar to: + + curl telnet://remote.server.com + + And enter the data to pass to the server on stdin. The result will be sent + to stdout or to the file you specify with -o. + + You might want the -N/--no-buffer option to switch off the buffered output + for slow connections or similar. + + Pass options to the telnet protocol negotiation, by using the -t option. To + tell the server we use a vt100 terminal, try something like: + + curl -tTTYPE=vt100 telnet://remote.server.com + + Other interesting options for it -t include: + + - XDISPLOC= Sets the X display location. + + - NEW_ENV= Sets an environment variable. + + NOTE: The telnet protocol does not specify any way to login with a specified + user and password so curl can't do that automatically. To do that, you need + to track when the login prompt is received and send the username and + password accordingly. + +PERSISTENT CONNECTIONS + + Specifying multiple files on a single command line will make curl transfer + all of them, one after the other in the specified order. + + libcurl will attempt to use persistent connections for the transfers so that + the second transfer to the same host can use the same connection that was + already initiated and was left open in the previous transfer. This greatly + decreases connection time for all but the first transfer and it makes a far + better use of the network. + + Note that curl cannot use persistent connections for transfers that are used + in subsequence curl invokes. Try to stuff as many URLs as possible on the + same command line if they are using the same host, as that'll make the + transfers faster. If you use an HTTP proxy for file transfers, practically + all transfers will be persistent. + +MULTIPLE TRANSFERS WITH A SINGLE COMMAND LINE + + As is mentioned above, you can download multiple files with one command line + by simply adding more URLs. If you want those to get saved to a local file + instead of just printed to stdout, you need to add one save option for each + URL you specify. Note that this also goes for the -O option (but not + --remote-name-all). + + For example: get two files and use -O for the first and a custom file + name for the second: + + curl -O http://url.com/file.txt ftp://ftp.com/moo.exe -o moo.jpg + + You can also upload multiple files in a similar fashion: + + curl -T local1 ftp://ftp.com/moo.exe -T local2 ftp://ftp.com/moo2.txt + +IPv6 + + curl will connect to a server with IPv6 when a host lookup returns an IPv6 + address and fall back to IPv4 if the connection fails. The --ipv4 and --ipv6 + options can specify which address to use when both are available. IPv6 + addresses can also be specified directly in URLs using the syntax: + + http://[2001:1890:1112:1::20]/overview.html + + When this style is used, the -g option must be given to stop curl from + interpreting the square brackets as special globbing characters. Link local + and site local addresses including a scope identifier, such as fe80::1234%1, + may also be used, but the scope portion must be numeric or match an existing + network interface on Linux and the percent character must be URL escaped. The + previous example in an SFTP URL might look like: + + sftp://[fe80::1234%251]/ + + IPv6 addresses provided other than in URLs (e.g. to the --proxy, --interface + or --ftp-port options) should not be URL encoded. + +METALINK + + Curl supports Metalink (both version 3 and 4 (RFC 5854) are supported), a way + to list multiple URIs and hashes for a file. Curl will make use of the mirrors + listed within for failover if there are errors (such as the file or server not + being available). It will also verify the hash of the file after the download + completes. The Metalink file itself is downloaded and processed in memory and + not stored in the local file system. + + Example to use a remote Metalink file: + + curl --metalink http://www.example.com/example.metalink + + To use a Metalink file in the local file system, use FILE protocol (file://): + + curl --metalink file://example.metalink + + Please note that if FILE protocol is disabled, there is no way to use a local + Metalink file at the time of this writing. Also note that if --metalink and + --include are used together, --include will be ignored. This is because including + headers in the response will break Metalink parser and if the headers are included + in the file described in Metalink file, hash check will fail. + +MAILING LISTS + + For your convenience, we have several open mailing lists to discuss curl, + its development and things relevant to this. Get all info at + https://curl.haxx.se/mail/. Some of the lists available are: + + curl-users + + Users of the command line tool. How to use it, what doesn't work, new + features, related tools, questions, news, installations, compilations, + running, porting etc. + + curl-library + + Developers using or developing libcurl. Bugs, extensions, improvements. + + curl-announce + + Low-traffic. Only receives announcements of new public versions. At worst, + that makes something like one or two mails per month, but usually only one + mail every second month. + + curl-and-php + + Using the curl functions in PHP. Everything curl with a PHP angle. Or PHP + with a curl angle. + + curl-and-python + + Python hackers using curl with or without the python binding pycurl. + + Please direct curl questions, feature requests and trouble reports to one of + these mailing lists instead of mailing any individual. diff --git a/deps-win32/curl-7.54.1/docs/Makefile.am b/deps-win32/curl-7.54.1/docs/Makefile.am new file mode 100644 index 0000000..21c1be0 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/Makefile.am @@ -0,0 +1,83 @@ +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 1998 - 2017, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### + +AUTOMAKE_OPTIONS = foreign no-dependencies + +# EXTRA_DIST breaks with $(abs_builddir) so build it using this variable +# but distribute it (using the relative file name) in the next variable +man_MANS = $(abs_builddir)/curl.1 +noinst_man_MANS = curl.1 mk-ca-bundle.1 +dist_man_MANS = curl-config.1 +GENHTMLPAGES = curl.html curl-config.html mk-ca-bundle.html +PDFPAGES = curl.pdf curl-config.pdf mk-ca-bundle.pdf +MANDISTPAGES = curl.1.dist curl-config.1.dist + +HTMLPAGES = $(GENHTMLPAGES) index.html + +# Build targets in this file (.) before cmdline-opts to ensure that +# the curl.1 rule below runs first +SUBDIRS = libcurl . cmdline-opts +DIST_SUBDIRS = $(SUBDIRS) examples + +CLEANFILES = $(GENHTMLPAGES) $(PDFPAGES) $(MANDISTPAGES) curl.1 + +EXTRA_DIST = MANUAL BUGS CONTRIBUTE.md FAQ FEATURES INTERNALS.md SSLCERTS.md \ + README.win32 RESOURCES TODO TheArtOfHttpScripting THANKS VERSIONS KNOWN_BUGS \ + BINDINGS.md HISTORY.md INSTALL INSTALL.md LICENSE-MIXING.md \ + README.netware MAIL-ETIQUETTE HTTP-COOKIES.md SECURITY.md RELEASE-PROCEDURE \ + SSL-PROBLEMS.md HTTP2.md ROADMAP.md CODE_OF_CONDUCT.md CODE_STYLE.md \ + CHECKSRC.md CMakeLists.txt README.md CIPHERS.md INSTALL.cmake README.cmake \ + $(noinst_man_MANS) + +MAN2HTML= roffit $< >$@ + +SUFFIXES = .1 .html .pdf + +# $(abs_builddir) is to disable VPATH when searching for this file, which +# would otherwise find the copy in $(srcdir) which breaks the $(HUGE) +# rule in src/Makefile.am in out-of-tree builds that references the file in the +# build directory. +# +# First, seed the used copy of curl.1 with the prebuilt copy (in an out-of-tree +# build), then run make recursively to rebuild it only if its dependencies +# have changed. +$(abs_builddir)/curl.1: + if test "$(top_builddir)x" != "$(top_srcdir)x" -a -e "$(srcdir)/curl.1"; then \ + cp -fp "$(srcdir)/curl.1" $@; fi + cd cmdline-opts && $(MAKE) + +html: $(HTMLPAGES) + cd libcurl && $(MAKE) html + +pdf: $(PDFPAGES) + cd libcurl && $(MAKE) pdf + +.1.html: + $(MAN2HTML) + +.1.pdf: + @(foo=`echo $@ | sed -e 's/\.[0-9]$$//g'`; \ + groff -Tps -man $< >$$foo.ps; \ + ps2pdf $$foo.ps $@; \ + rm $$foo.ps; \ + echo "converted $< to $@") + diff --git a/deps-win32/curl-7.54.1/docs/README.cmake b/deps-win32/curl-7.54.1/docs/README.cmake new file mode 100644 index 0000000..084c1de --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/README.cmake @@ -0,0 +1,16 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +README.cmake + Read the README file first. + + Curl contains CMake build files that provide a way to build Curl with the + CMake build tool (www.cmake.org). CMake is a cross platform meta build tool + that generates native makefiles and IDE project files. The CMake build + system can be used to build Curl on any of its supported platforms. + + Read the INSTALL.cmake file for instructions on how to compile curl with + CMake. diff --git a/deps-win32/curl-7.54.1/docs/README.md b/deps-win32/curl-7.54.1/docs/README.md new file mode 100644 index 0000000..56691fc --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/README.md @@ -0,0 +1,12 @@ +![curl logo](https://cdn.rawgit.com/curl/curl-www/master/logo/curl-logo.svg) + +# Documentation + +You'll find a mix of various documentation in this directory and +subdirectories, using several different formats. Some of them are not ideal +for reading directly in your browser. + +If you'd rather see the rendered version of the documentation, check out the +curl web site's [documentation section](https://curl.haxx.se/docs/) for +general curl stuff or the [libcurl section](https://curl.haxx.se/libcurl/) for +libcurl related documentation. diff --git a/deps-win32/curl-7.54.1/docs/README.netware b/deps-win32/curl-7.54.1/docs/README.netware new file mode 100644 index 0000000..9028963 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/README.netware @@ -0,0 +1,26 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +README.netware + + Read the README file first. + + Curl has been successfully compiled with gcc / nlmconv on different flavours + of Linux as well as with the official Metrowerks CodeWarrior compiler. + While not being the main development target, a continuously growing share of + curl users are NetWare-based, especially also consuming the lib from PHP. + + The unix-style man pages are tricky to read on windows, so therefore all + those pages are also provided as web pages on the curl web site. + + The main curl.1 man page is also "built-in" in the command line tool. Use a + command line similar to this in order to extract a separate text file: + + curl -M >manual.txt + + Read the INSTALL file for instructions on how to compile curl self. + + diff --git a/deps-win32/curl-7.54.1/docs/README.win32 b/deps-win32/curl-7.54.1/docs/README.win32 new file mode 100644 index 0000000..00ca197 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/README.win32 @@ -0,0 +1,25 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +README.win32 + + Read the README file first. + + Curl has been compiled, built and run on all sorts of Windows and win32 + systems. While not being the main develop target, a fair share of curl users + are win32-based. + + The unix-style man pages are tricky to read on windows, so therefore all + those pages are also provided as web pages on the curl web site. + + The main curl.1 man page is also "built-in" in the command line tool. Use a + command line similar to this in order to extract a separate text file: + + curl -M >manual.txt + + Read the INSTALL file for instructions on how to compile curl self. + + diff --git a/deps-win32/curl-7.54.1/docs/RELEASE-PROCEDURE b/deps-win32/curl-7.54.1/docs/RELEASE-PROCEDURE new file mode 100644 index 0000000..5137f55 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/RELEASE-PROCEDURE @@ -0,0 +1,92 @@ +curl release procedure - how to do a release +============================================ + +in the source code repo +----------------------- + +- edit `RELEASE-NOTES` to be accurate + +- update `docs/THANKS` + +- make sure all relevant changes are committed on the master branch + +- tag the git repo in this style: `git tag -a curl-7_34_0`. -a annotates the + tag and we use underscores instead of dots in the version number. + +- run "./maketgz 7.34.0" to build the release tarballs. It is important that + you run this on a machine with the correct set of autotools etc installed + as this is what then will be shipped and used by most users on *nix like + systems. + +- push the git commits and the new tag + +- gpg sign the 4 tarballs as maketgz suggests + +- upload the 8 resulting files to the primary download directory + +in the curl-www repo +-------------------- + +- edit `Makefile` (version number and date), + +- edit `_newslog.html` (announce the new release) and + +- edit `_changes.html` (insert changes+bugfixes from RELEASE-NOTES) + +- commit all local changes + +- tag the repo with the same tag as used for the source repo + +- make sure all relevant changes are committed and pushed on the master branch + + (the web site then updates its contents automatically) + +on github +--------- + +- edit the newly made release tag so that it is listed as the latest release + +inform +------ + +- send an email to curl-users, curl-announce and curl-library. Insert the + RELEASE-NOTES into the mail. + +celebrate +--------- + +- suitable beverage intake is encouraged for the festivities + +curl release scheduling +======================= + +Basics +------ + +We do releases every 8 weeks on Wednesdays. If critical problems arise, we can +insert releases outside of the schedule or we can move the release date - but +this is very rare. + +Each 8 week release cycle is split in two 4-week periods. + +- During the first 4 weeks after a release, we allow new features and changes + to curl and libcurl. If we accept any such changes, we bump the minor number + used for the next release. + +- During the second 4-week period we do not merge any features or changes, we + then only focus on fixing bugs and polishing things to make a solid coming + release. + +Coming dates +------------ + +Based on the description above, here are some planned release dates (at the +time of this writing): + +- June 14, 2017 (version 7.54.1) +- August 9, 2017 +- October 4, 2017 +- November 29, 2017 +- January 24, 2018 +- March 21, 2018 +- May 16, 2018 diff --git a/deps-win32/curl-7.54.1/docs/RESOURCES b/deps-win32/curl-7.54.1/docs/RESOURCES new file mode 100644 index 0000000..1ad8aac --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/RESOURCES @@ -0,0 +1,83 @@ + _ _ ____ _ + Project ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + + +This document lists documents and standards used by curl. + + RFC 959 - The FTP protocol + + RFC 1635 - How to Use Anonymous FTP + + RFC 1738 - Uniform Resource Locators + + RFC 1777 - defines the LDAP protocol + + RFC 1808 - Relative Uniform Resource Locators + + RFC 1867 - Form-based File Upload in HTML + + RFC 1950 - ZLIB Compressed Data Format Specification + + RFC 1951 - DEFLATE Compressed Data Format Specification + + RFC 1952 - gzip compression format + + RFC 1959 - LDAP URL syntax + + RFC 2045-2049 - Everything you need to know about MIME! (needed for form + based upload) + + RFC 2068 - HTTP 1.1 (obsoleted by RFC 2616) + + RFC 2104 - Keyed-Hashing for Message Authentication + + RFC 2109 - HTTP State Management Mechanism (cookie stuff) + - Also, read Netscape's specification at + https://curl.haxx.se/rfc/cookie_spec.html + + RFC 2183 - The Content-Disposition Header Field + + RFC 2195 - CRAM-MD5 authentication + + RFC 2229 - A Dictionary Server Protocol + + RFC 2255 - Newer LDAP URL syntax document. + + RFC 2231 - MIME Parameter Value and Encoded Word Extensions: + Character Sets, Languages, and Continuations + + RFC 2388 - "Returning Values from Forms: multipart/form-data" + Use this as an addition to the RFC1867 + + RFC 2396 - "Uniform Resource Identifiers: Generic Syntax and Semantics" This + one obsoletes RFC 1738, but since RFC 1738 is often mentioned + I've left it in this list. + + RFC 2428 - FTP Extensions for IPv6 and NATs + + RFC 2577 - FTP Security Considerations + + RFC 2616 - HTTP 1.1, the latest + + RFC 2617 - HTTP Authentication + + RFC 2718 - Guidelines for new URL Schemes + + RFC 2732 - Format for Literal IPv6 Addresses in URL's + + RFC 2818 - HTTP Over TLS (TLS is the successor to SSL) + + RFC 2821 - SMTP protocol + + RFC 2964 - Use of HTTP State Management + + RFC 2965 - HTTP State Management Mechanism. Cookies. Obsoletes RFC2109 + + RFC 3207 - SMTP over TLS + + RFC 4616 - PLAIN authentication + + RFC 4954 - SMTP Authentication diff --git a/deps-win32/curl-7.54.1/docs/ROADMAP.md b/deps-win32/curl-7.54.1/docs/ROADMAP.md new file mode 100644 index 0000000..1007ccb --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/ROADMAP.md @@ -0,0 +1,118 @@ +curl the next few years - perhaps +================================= + +Roadmap of things Daniel Stenberg and Steve Holme want to work on next. It is +intended to serve as a guideline for others for information, feedback and +possible participation. + +QUIC +---- + +The standardization process of QUIC has been taken to the IETF and can be +followed on the [IETF QUIC Mailing +list](https://www.ietf.org/mailman/listinfo/quic). I'd like us to get on the +bandwagon. Ideally, this would be done with a separate library/project to +handle the binary/framing layer in a similar fashion to how HTTP/2 is +implemented. This, to allow other projects to benefit from the work and to +thus broaden the interest and chance of others to participate. + +HTTP cookies +------------ + +Two cookie drafts have been adopted by the httpwg in IETF and we should +support them as the popular browsers will as well: + +[Deprecate modification of 'secure' cookies from non-secure +origins](https://tools.ietf.org/html/draft-ietf-httpbis-cookie-alone-00) + +[Cookie Prefixes](https://tools.ietf.org/html/draft-ietf-httpbis-cookie-prefixes-00) + +[Firefox bug report about secure cookies](https://bugzilla.mozilla.org/show_bug.cgi?id=976073) + +SRV records +----------- + +How to find services for specific domains/hosts. + +curl_formadd() +-------------- + +make sure there's an easy handle passed in to `curl_formadd()`, +`curl_formget()` and `curl_formfree()` by adding replacement functions and +deprecating the old ones to allow custom mallocs and more. + +Or perhaps even better: revamp the formpost API completely while we're at it +and making something that is easier to use and understand: + + https://github.com/curl/curl/wiki/formpost-API-redesigned + +Third-party SASL +---------------- + +Add support for third-party SASL libraries such as Cyrus SASL. + +SASL authentication in LDAP +--------------------------- + +... + +Simplify the SMTP email +----------------------- + +Simplify the SMTP email interface so that programmers don't have to +construct the body of an email that contains all the headers, alternative +content, images and attachments - maintain raw interface so that +programmers that want to do this can + +email capabilities +------------------ + +Allow the email protocols to return the capabilities before +authenticating. This will allow an application to decide on the best +authentication mechanism + +Win32 pthreads +-------------- + +Allow Windows threading model to be replaced by Win32 pthreads port + +dynamic buffer size +------------------- + +Implement a dynamic buffer size to allow SFTP to use much larger buffers and +possibly allow the size to be customizable by applications. Use less memory +when handles are not in use? + +New stuff - curl +---------------- + +1. Embed a language interpreter (lua?). For that middle ground where curl + isn’t enough and a libcurl binding feels “too much”. Build-time conditional + of course. + +2. Simplify the SMTP command line so that the headers and multi-part content + don't have to be constructed before calling curl + +Improve +------- + +1. build for windows (considered hard by many users) + +2. curl -h output (considered overwhelming to users) + +3. we have > 200 command line options, is there a way to redo things to + simplify or improve the situation as we are likely to keep adding + features/options in the future too + +4. authentication framework (consider merging HTTP and SASL authentication to + give one API for protocols to call) + +5. Perform some of the clean up from the TODO document, removing old + definitions and such like that are currently earmarked to be removed years + ago + +Remove +------ + +1. makefile.vc files as there is no point in maintaining two sets of Windows + makefiles. Note: These are currently being used by the Windows autobuilds diff --git a/deps-win32/curl-7.54.1/docs/SECURITY.md b/deps-win32/curl-7.54.1/docs/SECURITY.md new file mode 100644 index 0000000..c88cc9c --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/SECURITY.md @@ -0,0 +1,116 @@ +curl security for developers +============================ + +This document is intended to provide guidance to curl developers on how +security vulnerabilities should be handled. + +Publishing Information +---------------------- + +All known and public curl or libcurl related vulnerabilities are listed on +[the curl web site security page](https://curl.haxx.se/docs/security.html). + +Security vulnerabilities should not be entered in the project's public bug +tracker unless the necessary configuration is in place to limit access to the +issue to only the reporter and the project's security team. + +Vulnerability Handling +---------------------- + +The typical process for handling a new security vulnerability is as follows. + +No information should be made public about a vulnerability until it is +formally announced at the end of this process. That means, for example that a +bug tracker entry must NOT be created to track the issue since that will make +the issue public and it should not be discussed on any of the project's public +mailing lists. Also messages associated with any commits should not make +any reference to the security nature of the commit if done prior to the public +announcement. + +- The person discovering the issue, the reporter, reports the vulnerability + privately to `curl-security@haxx.se`. That's an email alias that reaches a + handful of selected and trusted people. + +- Messages that do not relate to the reporting or managing of an undisclosed + security vulnerability in curl or libcurl are ignored and no further action + is required. + +- A person in the security team sends an e-mail to the original reporter to + acknowledge the report. + +- The security team investigates the report and either rejects it or accepts + it. + +- If the report is rejected, the team writes to the reporter to explain why. + +- If the report is accepted, the team writes to the reporter to let him/her + know it is accepted and that they are working on a fix. + +- The security team discusses the problem, works out a fix, considers the + impact of the problem and suggests a release schedule. This discussion + should involve the reporter as much as possible. + +- The release of the information should be "as soon as possible" and is most + often synced with an upcoming release that contains the fix. If the + reporter, or anyone else, thinks the next planned release is too far away + then a separate earlier release for security reasons should be considered. + +- Write a security advisory draft about the problem that explains what the + problem is, its impact, which versions it affects, solutions or + workarounds, when the release is out and make sure to credit all + contributors properly. + +- Request a CVE number from + [distros@openwall](http://oss-security.openwall.org/wiki/mailing-lists/distros) + when also informing and preparing them for the upcoming public security + vulnerability announcement - attach the advisory draft for information. Note + that 'distros' won't accept an embargo longer than 19 days and they do not + care for Windows-specific flaws. For windows-specific flaws, request CVE + directly from MITRE. + +- Update the "security advisory" with the CVE number. + +- The security team commits the fix in a private branch. The commit message + should ideally contain the CVE number. This fix is usually also distributed + to the 'distros' mailing list to allow them to use the fix prior to the + public announcement. + +- No more than 48 hours before the release, the private branch is merged into + the master branch and pushed. Once pushed, the information is accessible to + the public and the actual release should follow suit immediately afterwards. + The time between the push and the release is used for final tests and + reviews. + +- The project team creates a release that includes the fix. + +- The project team announces the release and the vulnerability to the world in + the same manner we always announce releases. It gets sent to the + curl-announce, curl-library and curl-users mailing lists. + +- The security web page on the web site should get the new vulnerability + mentioned. + +Pre-notification +---------------- + +If you think you are or should be eligible for a pre-notification about +upcoming security announcements for curl, we urge OS distros and similar +vendors to primarily join the distros@openwall list as that is one of the +purposes of that list - and not just for curl of course. + +If you are not a distro or otherwise not suitable for distros@openwall and yet +want pre-notifications from us, contact the curl security team with a detailed +and clear explanation why this is the case. + +curl-security (at haxx dot se) +------------------------------ + +Who is on this list? There are a couple of criteria you must meet, and then we +might ask you to join the list or you can ask to join it. It really isn't very +formal. We basically only require that you have a long-term presence in the +curl project and you have shown an understanding for the project and its way +of working. You must've been around for a good while and you should have no +plans in vanishing in the near future. + +We do not make the list of participants public mostly because it tends to vary +somewhat over time and a list somewhere will only risk getting outdated. diff --git a/deps-win32/curl-7.54.1/docs/SSL-PROBLEMS.md b/deps-win32/curl-7.54.1/docs/SSL-PROBLEMS.md new file mode 100644 index 0000000..91803e2 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/SSL-PROBLEMS.md @@ -0,0 +1,87 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + +# SSL problems + + First, let's establish that we often refer to TLS and SSL interchangeably as + SSL here. The current protocol is called TLS, it was called SSL a long time + ago. + + There are several known reasons why a connection that involves SSL might + fail. This is a document that attempts to details the most common ones and + how to mitigate them. + +## CA certs + + CA certs are used to digitally verify the server's certificate. You need a + "ca bundle" for this. See lots of more details on this in the SSLCERTS + document. + +## CA bundle missing intermediate certificates + + When using said CA bundle to verify a server cert, you will experience + problems if your CA cert does not have the certificates for the + intermediates in the whole trust chain. + +## Protocol version + + Some broken servers fail to support the protocol negotiation properly that + SSL servers are supposed to handle. This may cause the connection to fail + completely. Sometimes you may need to explicitly select a SSL version to use + when connecting to make the connection succeed. + + An additional complication can be that modern SSL libraries sometimes are + built with support for older SSL and TLS versions disabled! + + All versions of SSL are considered insecure and should be avoided. Use TLS. + +## Ciphers + + Clients give servers a list of ciphers to select from. If the list doesn't + include any ciphers the server wants/can use, the connection handshake + fails. + + curl has recently disabled the user of a whole bunch of seriously insecure + ciphers from its default set (slightly depending on SSL backend in use). + + You may have to explicitly provide an alternative list of ciphers for curl + to use to allow the server to use a WEAK cipher for you. + + Note that these weak ciphers are identified as flawed. For example, this + includes symmetric ciphers with less than 128 bit keys and RC4. + + WinSSL in Windows XP is not able to connect to servers that no longer + support the legacy handshakes and algorithms used by those versions, so we + advice against building curl to use WinSSL on really old Windows versions. + + References: + + https://tools.ietf.org/html/draft-popov-tls-prohibiting-rc4-01 + +## Allow BEAST + + BEAST is the name of a TLS 1.0 attack that surfaced 2011. When adding means + to mitigate this attack, it turned out that some broken servers out there in + the wild didn't work properly with the BEAST mitigation in place. + + To make such broken servers work, the --ssl-allow-beast option was + introduced. Exactly as it sounds, it re-introduces the BEAST vulnerability + but on the other hand it allows curl to connect to that kind of strange + servers. + +## Disabling certificate revocation checks + + Some SSL backends may do certificate revocation checks (CRL, OCSP, etc) + depending on the OS or build configuration. The --ssl-no-revoke option was + introduced in 7.44.0 to disable revocation checking but currently is only + supported for WinSSL (the native Windows SSL library), with an exception in + the case of Windows' Untrusted Publishers blacklist which it seems can't be + bypassed. This option may have broader support to accommodate other SSL + backends in the future. + + References: + + https://curl.haxx.se/docs/ssl-compared.html diff --git a/deps-win32/curl-7.54.1/docs/SSLCERTS.md b/deps-win32/curl-7.54.1/docs/SSLCERTS.md new file mode 100644 index 0000000..3fcd345 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/SSLCERTS.md @@ -0,0 +1,173 @@ +SSL Certificate Verification +============================ + +SSL is TLS +---------- + +SSL is the old name. It is called TLS these days. + + +Native SSL +---------- + +If libcurl was built with Schannel or Secure Transport support (the native SSL +libraries included in Windows and Mac OS X), then this does not apply to +you. Scroll down for details on how the OS-native engines handle SSL +certificates. If you're not sure, then run "curl -V" and read the results. If +the version string says "WinSSL" in it, then it was built with Schannel +support. + +It is about trust +----------------- + +This system is about trust. In your local CA certificate store you have certs +from *trusted* Certificate Authorities that you then can use to verify that the +server certificates you see are valid. They're signed by one of the CAs you +trust. + +Which CAs do you trust? You can decide to trust the same set of companies your +operating system trusts, or the set one of the known browsers trust. That's +basically trust via someone else you trust. You should just be aware that +modern operating systems and browsers are setup to trust *hundreds* of +companies and recent years several such CAs have been found untrustworthy. + +Certificate Verification +------------------------ + +libcurl performs peer SSL certificate verification by default. This is done +by using a CA certificate store that the SSL library can use to make sure the +peer's server certificate is valid. + +If you communicate with HTTPS, FTPS or other TLS-using servers using +certificates that are signed by CAs present in the store, you can be sure +that the remote server really is the one it claims to be. + +If the remote server uses a self-signed certificate, if you don't install a CA +cert store, if the server uses a certificate signed by a CA that isn't +included in the store you use or if the remote host is an impostor +impersonating your favorite site, and you want to transfer files from this +server, do one of the following: + + 1. Tell libcurl to *not* verify the peer. With libcurl you disable this with + `curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);` + + With the curl command line tool, you disable this with -k/--insecure. + + 2. Get a CA certificate that can verify the remote server and use the proper + option to point out this CA cert for verification when connecting. For + libcurl hackers: `curl_easy_setopt(curl, CURLOPT_CAPATH, capath);` + + With the curl command line tool: --cacert [file] + + 3. Add the CA cert for your server to the existing default CA certificate + store. The default CA certificate store can changed at compile time with the + following configure options: + + --with-ca-bundle=FILE: use the specified file as CA certificate store. CA + certificates need to be concatenated in PEM format into this file. + + --with-ca-path=PATH: use the specified path as CA certificate store. CA + certificates need to be stored as individual PEM files in this directory. + You may need to run c_rehash after adding files there. + + If neither of the two options is specified, configure will try to auto-detect + a setting. It's also possible to explicitly not hardcode any default store + but rely on the built in default the crypto library may provide instead. + You can achieve that by passing both --without-ca-bundle and + --without-ca-path to the configure script. + + If you use Internet Explorer, this is one way to get extract the CA cert + for a particular server: + + - View the certificate by double-clicking the padlock + - Find out where the CA certificate is kept (Certificate> + Authority Information Access>URL) + - Get a copy of the crt file using curl + - Convert it from crt to PEM using the openssl tool: + openssl x509 -inform DES -in yourdownloaded.crt \ + -out outcert.pem -text + - Add the 'outcert.pem' to the CA certificate store or use it stand-alone + as described below. + + If you use the 'openssl' tool, this is one way to get extract the CA cert + for a particular server: + + - `openssl s_client -connect xxxxx.com:443 |tee logfile` + - type "QUIT", followed by the "ENTER" key + - The certificate will have "BEGIN CERTIFICATE" and "END CERTIFICATE" + markers. + - If you want to see the data in the certificate, you can do: "openssl + x509 -inform PEM -in certfile -text -out certdata" where certfile is + the cert you extracted from logfile. Look in certdata. + - If you want to trust the certificate, you can add it to your CA + certificate store or use it stand-alone as described. Just remember that + the security is no better than the way you obtained the certificate. + + 4. If you're using the curl command line tool, you can specify your own CA + cert path by setting the environment variable `CURL_CA_BUNDLE` to the path + of your choice. + + If you're using the curl command line tool on Windows, curl will search + for a CA cert file named "curl-ca-bundle.crt" in these directories and in + this order: + 1. application's directory + 2. current working directory + 3. Windows System directory (e.g. C:\windows\system32) + 4. Windows Directory (e.g. C:\windows) + 5. all directories along %PATH% + + 5. Get a better/different/newer CA cert bundle! One option is to extract the + one a recent Firefox browser uses by running 'make ca-bundle' in the curl + build tree root, or possibly download a version that was generated this + way for you: [CA Extract](https://curl.haxx.se/docs/caextract.html) + +Neglecting to use one of the above methods when dealing with a server using a +certificate that isn't signed by one of the certificates in the installed CA +certificate store, will cause SSL to report an error ("certificate verify +failed") during the handshake and SSL will then refuse further communication +with that server. + +Certificate Verification with NSS +--------------------------------- + +If libcurl was built with NSS support, then depending on the OS distribution, +it is probably required to take some additional steps to use the system-wide +CA cert db. RedHat ships with an additional module, libnsspem.so, which +enables NSS to read the OpenSSL PEM CA bundle. On openSUSE you can install +p11-kit-nss-trust which makes NSS use the system wide CA certificate store. NSS +also has a new [database format](https://wiki.mozilla.org/NSS_Shared_DB). + +Starting with version 7.19.7, libcurl automatically adds the 'sql:' prefix to +the certdb directory (either the hardcoded default /etc/pki/nssdb or the +directory configured with SSL_DIR environment variable). To check which certdb +format your distribution provides, examine the default certdb location: +/etc/pki/nssdb; the new certdb format can be identified by the filenames +cert9.db, key4.db, pkcs11.txt; filenames of older versions are cert8.db, +key3.db, secmod.db. + +Certificate Verification with Schannel and Secure Transport +----------------------------------------------------------- + +If libcurl was built with Schannel (Microsoft's native TLS engine) or Secure +Transport (Apple's native TLS engine) support, then libcurl will still perform +peer certificate verification, but instead of using a CA cert bundle, it will +use the certificates that are built into the OS. These are the same +certificates that appear in the Internet Options control panel (under Windows) +or Keychain Access application (under OS X). Any custom security rules for +certificates will be honored. + +Schannel will run CRL checks on certificates unless peer verification is +disabled. Secure Transport on iOS will run OCSP checks on certificates unless +peer verification is disabled. Secure Transport on OS X will run either OCSP +or CRL checks on certificates if those features are enabled, and this behavior +can be adjusted in the preferences of Keychain Access. + +HTTPS proxy +----------- + +Since version 7.52.0, curl can do HTTPS to the proxy separately from the +connection to the server. This TLS connection is handled separately from the +server connection so instead of `--insecure` and `--cacert` to control the +certificate verification, you use `--proxy-insecure` and `--proxy-cacert`. +With these options, you make sure that the TLS connection and the trust of the +proxy can be kept totally separate from the TLS connection to the server. diff --git a/deps-win32/curl-7.54.1/docs/THANKS b/deps-win32/curl-7.54.1/docs/THANKS new file mode 100644 index 0000000..9d8d124 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/THANKS @@ -0,0 +1,1578 @@ + This project has been alive for many years. Countless people have provided + feedback that have improved curl. Here follows a list of people that have + contributed (a-z order). + + If you have contributed but are missing here, please let us know! + +"Captain Basil" +"Spoon Man" +Aaro Koskinen +Aaron Oneal +Aaron Orenstein +Abram Pousada +Adam D. Moss +Adam Langley +Adam Light +Adam Piggott +Adam Sampson +Adam Tkac +Adrian Schuur +Adriano Meirelles +Ajit Dhumale +Akhil Kedia +Aki Koskinen +Akos Pasztory +Akshay Vernekar +Alain Danteny +Alan Jenkins +Alan Pinstein +Albert Chin-A-Young +Albert Choy +Ale Vesely +Alejandro Alvarez Ayllon +Aleksandar Milivojevic +Aleksey Tulinov +Ales Mlakar +Ales Novak +Alessandro Ghedini +Alessandro Vesely +Alex Bligh +Alex Chan +Alex Fishman +Alex Gruz +Alex McLellan +Alex Neblett +Alex Rousskov +Alex Suykov +Alex Vinnik +Alex aka WindEagle +Alexander Beedie +Alexander Dyagilev +Alexander Elgert +Alexander Klauer +Alexander Kourakos +Alexander Krasnostavsky +Alexander Lazic +Alexander Pepper +Alexander Peslyak +Alexander Sinditskiy +Alexander Traud +Alexander Zhuravlev +Alexey Borzov +Alexey Pesternikov +Alexey Simak +Alexey Zakhlestin +Alexis Carvalho +Alexis La Goutte +Alfred Gebert +Allen Pulsifer +Alona Rossen +Amol Pattekar +Amr Shahin +Anatol Belski +Anatoli Tubman +Anders Bakken +Anders Gustafsson +Anders Havn +Anders Roxell +Andi Jahja +Andre Guibert de Bruet +Andre Heinecke +Andreas Damm +Andreas Faerber +Andreas Farber +Andreas Malzahn +Andreas Ntaflos +Andreas Olsson +Andreas Rieke +Andreas Roth +Andreas Schuldei +Andreas Streichardt +Andreas Wurf +Andrei Benea +Andrei Cipu +Andrei Kurushin +Andrei Sedoi +Andrej E Baranov +Andrew Benham +Andrew Biggs +Andrew Bushnell +Andrew Francis +Andrew Fuller +Andrew Krieger +Andrew Kurushin +Andrew Moise +Andrew Robbins +Andrew Wansink +Andrew de los Reyes +Andrey Labunets +Andrii Moiseiev +Andrés García +Andy Cedilnik +Andy Serpa +Andy Tsouladze +Angus Mackay +Anthon Pang +Anthony Avina +Anthony Bryan +Anthony G. Basile +Antoine Aubert +Antoine Calando +Anton Bychkov +Anton Kalmykov +Anton Malov +Anton Yabchinskiy +Antonio Larrosa +Antony74 on github +Antti Hätälä +Arkadiusz Miskiewicz +Armel Asselin +Arnaud Compan +Arnaud Ebalard +Arthur Murray +Arve Knudsen +Arvid Norberg +Ashish Shukla +Ask Bjørn Hansen +Askar Safin +Ates Goral +Augustus Saunders +Avery Fay +Axel Tillequin +Balaji Parasuram +Balaji Salunke +Balint Szilakszi +Barry Abrahamson +Bart Whiteley +Bas Mevissen +Ben Boeckel +Ben Darnell +Ben Greear +Ben Madsen +Ben Noordhuis +Ben Van Hof +Ben Winslow +Benbuck Nason +Benjamin Gerard +Benjamin Gilbert +Benjamin Johnson +Benjamin Kircher +Benoit Neil +Benoit Sigoure +Bernard Leak +Bernard Spil +Bernhard M. Wiedemann +Bernhard Reutner-Fischer +Bert Huijben +Bertrand Demiddelaer +Bertrand Simonnet +Bill Doyle +Bill Egert +Bill Hoffman +Bill Middlecamp +Bill Nagel +Bjoern Sikora +Bjorn Augustsson +Bjorn Reese +Björn Stenberg +Blaise Potard +Bob Relyea +Bob Richmond +Bob Schader +Bogdan Nicula +Brad Burdick +Brad Fitzpatrick +Brad Harder +Brad Hards +Brad King +Brad Spencer +Bradford Bruce +Brandon Casey +Brandon Wang +Brendan Jurd +Brent Beardsley +Brian Akins +Brian Carpenter +Brian Childs +Brian Chrisman +Brian Dessent +Brian J. Murrell +Brian Prodoehl +Brian R Duffy +Brian Ulm +Brock Noland +Bru Rom +Bruce Mitchener +Bruce Stephens +Bruno Thomsen +Bruno de Carvalho +Bryan Henderson +Bryan Kemp +Byrial Jensen +Cameron Kaiser +Cameron MacMinn +Camille Moncelier +Caolan McNamara +Carlo Cannas +Carlo Teubner +Carlo Wood +Carsten Lange +Casey O'Donnell +Catalin Patulea +Chad Monroe +Chandrakant Bagul +Charles Kerr +Charles Romestant +Chen Prog +Chih-Chung Chang +Chris "Bob Bob" +Chris Araman +Chris Carlmar +Chris Combes +Chris Conlon +Chris Deidun +Chris Faherty +Chris Flerackers +Chris Gaukroger +Chris Maltby +Chris Mumford +Chris Smowton +Chris Young +Christian Fillion +Christian Grothoff +Christian Heimes +Christian Hägele +Christian Krause +Christian Kurz +Christian Robottom Reis +Christian Schmitz +Christian Stewart +Christian Vogt +Christian Weisgerber +Christophe Demory +Christophe Legry +Christopher Conroy +Christopher Palow +Christopher R. Palmer +Christopher Stone +Chungtsun Li +Ciprian Badescu +Claes Jakobsson +Clarence Gardner +Clemens Gruber +Clifford Wolf +Clint Clayton +Cody Jones +Cody Mack +Colby Ranger +Colin Blair +Colin Hogben +Colin Watson +Colm Buckley +Constantine Sapuntzakis +Cory Benfield +Cory Nelson +Craig A West +Craig Davison +Craig Markwardt +Cris Bailiff +Cristian Rodríguez +Curt Bogmine +Cyrill Osterwalder +Cédric Connes +Cédric Deltheil +D. Flinkmann +Da-Yoon Chung +Dag Ekengren +Dagobert Michelsen +Dambaev Alexander +Damian Dixon +Damien Adant +Damien Vielpeau +Dan Becker +Dan C +Dan Cristian +Dan Donahue +Dan Fandrich +Dan Jacobson +Dan Locks +Dan McNulty +Dan Nelson +Dan Petitt +Dan Torop +Dan Zitter +Daniel Black +Daniel Cater +Daniel Egger +Daniel Gustafsson +Daniel Hwang +Daniel Johnson +Daniel Kahn Gillmor +Daniel Lee Hwang +Daniel Melani +Daniel Mentz +Daniel Romero +Daniel Schauenberg +Daniel Seither +Daniel Shahaf +Daniel Steinberg +Daniel Stenberg +Daniel Theron +Daniel at touchtunes +Daphne Luong +Darryl House +Darshan Mody +Darío Hereñú +Dave Dribin +Dave Halbakken +Dave Hamilton +Dave May +Dave Reisner +Dave Thompson +Dave Vasilevsky +Davey Shafik +David Bau +David Benjamin +David Binderman +David Blaikie +David Byron +David Cohen +David Eriksson +David Houlder +David Hull +David J Meyer +David James +David Kalnischkies +David Kierznowski +David Kimdon +David Lang +David LeBlanc +David McCreedy +David Meyer +David Odin +David Phillips +David Rosenstrauch +David Ryskalczyk +David Schweikert +David Shaw +David Strauss +David Tarendash +David Thiel +David Walser +David Woodhouse +David Wright +David Yan +Dengminwen +Denis Feklushkin +Dennis Clarke +Derek Higgins +Desmond O. Chang +Detlef Schmier +Didier Brisebourg +Diego Bes +Diego Casorran +Dilyan Palauzov +Dima Barsky +Dima Tisnek +Dimitar Boevski +Dimitre Dimitrov +Dimitrios Siganos +Dimitris Sarris +Dinar +Dirk Eddelbuettel +Dirk Manske +Dmitri Shubin +Dmitriy Sergeyev +Dmitry Bartsevich +Dmitry Eremin-Solenikov +Dmitry Falko +Dmitry Kurochkin +Dmitry Popov +Dmitry Rechkin +Dmitry S. Baikov +Dolbneff A.V +Domenico Andreoli +Dominick Meglio +Dominik Hölzl +Dominique Leuenberger +Doug Kaufman +Doug Porter +Douglas Creager +Douglas E. Wegscheid +Douglas Kilpatrick +Douglas R. Horner +Douglas Steinwand +Dov Murik +Drake Arconis +Duane Cathey +Duncan Mac-Vicar Prett +Dustin Boswell +Dusty Mabe +Dylan Ellicott +Dylan Salisbury +Dániel Bakai +Early Ehlinger +Ebenezer Ikonne +Ed Morley +Edin Kadribasic +Eduard Bloch +Edward Kimmel +Edward Rudd +Edward Sheldrake +Edward Thomson +Eelco Dolstra +Eetu Ojanen +Egon Eckert +Eldar Zaitov +Ellis Pritchard +Elmira A Semenova +Emanuele Bovisio +Emil Lerner +Emil Romanus +Emiliano Ida +Emmanuel Tychon +Enrico Scholz +Enrik Berkhan +Eramoto Masaya +Eric Cooper +Eric Hu +Eric Landes +Eric Lavigne +Eric Lubin +Eric Melville +Eric Mertens +Eric Rautman +Eric Rescorla +Eric Ridge +Eric S. Raymond +Eric Thelin +Eric Vergnaud +Eric Wong +Eric Young +Erick Nuwendam +Erik Janssen +Erik Johansson +Ernest Beinrohr +Erwan Legrand +Erwin Authried +Ethan Glasser Camp +Eugene Kotlyarov +Evan Jordan +Evgeny Grin +Evgeny Turnaev +Eygene Ryabinkin +Fabian Frank +Fabian Hiernaux +Fabian Keil +Fabian Ruff +Fabrizio Ammollo +Fahim Chandurwala +Fedor Karpelevitch +Feist Josselin +Felix Yan +Felix von Leitner +Feng Tu +Fernando Muñoz +Flavio Medeiros +Florian Schoppmann +Florian Weimer +Forrest Cahoon +Francisco Moraes +Francois Petitjean +Frank Gevaerts +Frank Hempel +Frank Keeney +Frank McGeough +Frank Meier +Frank Ticheler +Frank Van Uffelen +František Kučera +François Charlier +Fred Machado +Fred New +Fred Noz +Fred Stluka +Frederic Lepied +Fredrik Thulin +Gabriel Kuri +Gabriel Sjoberg +Garrett Holmstrom +Gary Maxwell +Gaurav Malhotra +Gautam Kachroo +Gautam Mani +Gavrie Philipson +Gaz Iqbal +Gaël Portay +Geoff Beier +Georg Horn +Georg Huettenegger +Georg Lippitsch +Georg Wicherski +Gerd v. Egidy +Gergely Nagy +Gerhard Herre +Gerrit Bruchhäuser +Ghennadi Procopciuc +Giancarlo Formicuccia +Giaslas Georgios +Gil Weber +Gilad +Gilbert Ramirez Jr. +Gilles Blanc +Gisle Vanem +Giuseppe Attardi +Giuseppe D'Ambrosio +Giuseppe Persico +Glen A Johnson Jr. +Glen Nakamura +Glen Scott +Glenn Sheridan +Google Inc. +Gordon Marler +Gorilla Maguila +Gou Lingfeng +Grant Erickson +Grant Pannell +Greg Hewgill +Greg Morse +Greg Onufer +Greg Pratt +Greg Rowe +Greg Zavertnik +Gregory Szorc +Grigory Entin +Guenole Bescon +Guenter Knauf +Guido Berhoerster +Guillaume Arluison +Gunter Knauf +Gustaf Hui +Gustavo Grieco +GwanYeong Kim +Gwenole Beauchesne +Gökhan Şengün +Götz Babin-Ebell +Hamish Mackenzie +Hang Kin Lau +Hang Su +Hanno Böck +Hanno Kranzhoff +Hans Steegers +Hans-Jurgen May +Hardeep Singh +Haris Okanovic +Harold Stuart +Harshal Pradhan +Hauke Duden +He Qin +Heikki Korpela +Heinrich Ko +Heinrich Schaefer +Helmut K. C. Tessarek +Helwing Lutz +Hendrik Visage +Henrik Gaßmann +Henrik Storner +Henry Ludemann +Herve Amblard +Hidemoto Nakada +Ho-chi Chen +Hoi-Ho Chan +Hongli Lai +Howard Chu +Hubert Kario +Hzhijun +Ian D Allen +Ian Ford +Ian Gulliver +Ian Lynagh +Ian Turner +Ian Wilkes +Ignacio Vazquez-Abrams +Igor Franchuk +Igor Novoseltsev +Igor Polyakov +Iida Yosiaki +Ilguiz Latypov +Ilja van Sprundel +Immanuel Gregoire +Inca R +Ingmar Runge +Ingo Ralf Blum +Ingo Wilken +Irfan Adilovic +Isaac Boukris +Ishan SinghLevett +Ivan Avdeev +Ivo Bellin Salarin +Jack Zhang +Jacky Lam +Jacob Meuser +Jacob Moshenko +Jactry Zeng +Jad Chamcham +Jaime Fullaondo +Jakub Zakrzewski +James Atwill +James Bursa +James Cheng +James Clancy +James Cone +James Dury +James Gallagher +James Griffiths +James Housley +James MacMillan +Jamie Lokier +Jamie Newton +Jamie Wilkinson +Jan Ehrhardt +Jan Koen Annot +Jan Kunder +Jan Schaumann +Jan Schmidt +Jan Van Boghout +Jared Jennings +Jared Lundell +Jari Aalto +Jari Sundell +Jason Glasgow +Jason Liu +Jason McDonald +Jason S. Priebe +Javier Barroso +Javier G. Sogo +Jay Austin +Jayesh A Shah +Jaz Fresh +Jean Gressmann +Jean Jacques Drouin +Jean-Claude Chauve +Jean-Francois Bertrand +Jean-Francois Durand +Jean-Louis Lemaire +Jean-Marc Ranger +Jean-Noël Rouvignac +Jean-Philippe Barrette-LaPierre +Jeff Connelly +Jeff Hodges +Jeff Johnson +Jeff King +Jeff Lawson +Jeff Phillips +Jeff Pohlmeyer +Jeff Weber +Jeffrey Walton +Jens Rantil +Jeremy Friesner +Jeremy Huddleston +Jeremy Lin +Jeremy Pearson +Jeroen Koekkoek +Jeroen Ooms +Jerome Muffat-Meridol +Jerome Robert +Jerome Vouillon +Jerry Krinock +Jerry Wu +Jes Badwal +Jesper Jensen +Jesse Noller +Jesse Tan +Jie He +Jim Drash +Jim Freeman +Jim Hollinger +Jim Meyering +Jiri Dvorak +Jiri Hruska +Jiri Jaburek +Jiří Malák +Jocelyn Jaubert +Joe Halpin +Joe Malicki +Joe Mason +Joel Chen +Joel Depooter +Jofell Gallardo +Johan Anderson +Johan Lantz +Johan Nilsson +Johan van Selst +Johannes Bauer +Johannes Ernst +Johannes Schindelin +John Bradshaw +John Coffey +John Crow +John Dennis +John Dunn +John E. Malmberg +John Gardiner Myers +John Janssen +John Joseph Bachir +John Kelly +John Kohl +John Lask +John Levon +John Lightsey +John Marino +John Marshall +John McGowan +John P. McCaskey +John Suprock +John Wanghui +John Wilkinson +John-Mark Bell +Johnny Luong +Jon Grubbs +Jon Nelson +Jon Sargeant +Jon Seymour +Jon Spencer +Jon Torrey +Jon Travis +Jon Turner +Jonas Forsman +Jonas Minnberg +Jonas Schnelli +Jonatan Lander +Jonatan Vela +Jonathan Cardoso Machado +Jonathan Cardoso Machado Machado +Jonathan Hseu +Jonathan Nieder +Jongki Suwandi +Joonas Kuorilehto +Jose Alf +Jose Kahan +Josef Wolf +Josh Kapell +Joshua Kwan +Josue Andrade Gomes +Jozef Kralik +Juan Barreto +Juan F. Codagnone +Juan Ignacio Hervás +Juan RP +Judson Bishop +Juergen Wilke +Jukka Pihl +Julian Noble +Julian Ospald +Julian Taylor +Julien Chaffraix +Julien Nabet +Julien Royer +Jun-ichiro itojun Hagino +Jurij Smakov +Justin Clift +Justin Ehlert +Justin Fletcher +Justin Karneges +Justin Maggard +János Fekete +Jörg Mueller-Tolk +Jörn Hartroth +K. R. Walker +Kai Engert +Kai Noda +Kai Sommerfeld +Kai-Uwe Rommel +Kalle Vahlman +Kamil Dudka +Kang Lin +Kang-Jin Lee +Karl Moerder +Karol Pietrzak +Kaspar Brand +Katie Wang +Kazuho Oku +Kees Cook +Keith MacDonald +Keith McGuigan +Keith Mok +Ken Hirsch +Ken Rastatter +Kenny To +Kent Boortz +Keshav Krity +Kevin Baughman +Kevin Fisk +Kevin Ji +Kevin Lussier +Kevin Reed +Kevin Roth +Kim Minjoong +Kim Rinnewitz +Kim Vandry +Kimmo Kinnunen +Kjell Ericson +Kjetil Jacobsen +Klevtsov Vadim +Konstantin Isakov +Kris Kennaway +Krishnendu Majumdar +Krister Johansen +Kristian Gunstone +Kristian Köhntopp +Kurt Fankhauser +Kyle J. McKay +Kyle L. Huff +Kyle Sallee +Kyselgov E.N +Lachlan O'Dea +Larry Campbell +Larry Fahnoe +Larry Lin +Larry Stefani +Larry Stone +Lars Buitinck +Lars Gustafsson +Lars J. Aas +Lars Johannesen +Lars Nilsson +Lars Torben Wilson +Lau Hang Kin +Laurent Rabret +Lauri Kasanen +Legoff Vincent +Lehel Bernadt +Leif W +Leith Bade +Len Krause +Lenaic Lefever +Lenny Rachitsky +Leon Winter +Leonardo Rosati +Liam Healy +Lijo Antony +Linas Vepstas +Lindley French +Ling Thio +Linus Nielsen Feltzing +Linus Nordberg +Lior Kaplan +Lisa Xu +Liviu Chircu +Liza Alenchery +Lloyd Fournier +Lluís Batlle i Rossell +Loic Dachary +Loren Kirkby +Luan Cestari +Luca Altea +Lucas Adamski +Lucas Pardue +Ludek Finstrle +Ludovico Cavedon +Ludwig Nussel +Lukas Ruzicka +Lukasz Czekierda +Luke Amery +Luke Call +Luke Dashjr +Luo Jinghua +Luong Dinh Dung +Luật Nguyễn +Lyndon Hill +Maciej Karpiuk +Maciej Puzio +Maciej W. Rozycki +Mahmoud Samir Fayed +Maks Naumov +Maksim Kuzevanov +Maksim Stsepanenka +Mamoru Tasaka +Mandy Wu +Manfred Schwarb +Manuel Massing +Marc Boucher +Marc Deslauriers +Marc Doughty +Marc Hesse +Marc Hörsken +Marc Kleine-Budde +Marc Renault +Marc-Antoine Perennou +Marcel Raad +Marcel Roelofs +Marcelo Echeverria +Marcelo Juchem +Marcin Adamski +Marcin Gryszkalis +Marcin Konicki +Marco Deckel +Marco G. Salvagno +Marco Maggi +Marcus Hoffmann +Marcus Sundberg +Marcus Webster +Mario Schroeder +Mark Brand +Mark Butler +Mark Davies +Mark Eichin +Mark Hamilton +Mark Incley +Mark Karpeles +Mark Lentczner +Mark Nottingham +Mark Salisbury +Mark Snelling +Mark Tully +Markus Duft +Markus Elfring +Markus Koetter +Markus Moeller +Markus Oberhumer +Markus Westerlind +Marquis de Muesli +Martijn Koster +Martin C. Martin +Martin Drasar +Martin Frodl +Martin Hager +Martin Hedenfalk +Martin Jansen +Martin Kepplinger +Martin Lemke +Martin Skinner +Martin Storsjö +Martin Vejnár +Marty Kuhrt +Maruko +Massimiliano Ziccardi +Massimo Callegari +Mateusz Loskot +Mathias Axelsson +Mats Lidell +Matt Arsenault +Matt Ford +Matt Kraai +Matt Veenstra +Matt Witherspoon +Matt Wixson +Matteo Rocco +Matthew Blain +Matthew Clarke +Matthew Hall +Matthias Bolte +Maurice Barnum +Mauro Iorio +Mauro Rappa +Max Dymond +Max Katsev +Max Khon +Maxim Ivanov +Maxim Perenesenko +Maxim Prohorov +Maxime Larocque +Mehmet Bozkurt +Mekonikum +Melissa Mears +Mettgut Jamalla +Michael Benedict +Michael Calmer +Michael Cronenworth +Michael Curtis +Michael Day +Michael Goffioul +Michael Jahn +Michael Jerris +Michael Kalinin +Michael Kaufmann +Michael König +Michael Maltese +Michael Mealling +Michael Mueller +Michael Osipov +Michael Smith +Michael Stapelberg +Michael Stillwell +Michael Wallner +Michal Bonino +Michal Marek +Michał Fita +Michał Górny +Michał Kowalczyk +Michał Piechowski +Michel Promonet +Michele Bini +Miguel Angel +Miguel Diaz +Mihai Ionescu +Mikael Johansson +Mikael Sennerholm +Mike Bytnar +Mike Crowe +Mike Dobbs +Mike Giancola +Mike Hasselberg +Mike Henshaw +Mike Hommey +Mike Mio +Mike Power +Mike Protts +Mike Revi +Miklos Nemeth +Miloš Ljumović +Mingliang Zhu +Miroslav Franc +Miroslav Spousta +Mitz Wark +Mohamed Lrhazi +Mohammad AlSaleh +Mohun Biswas +Mostyn Bramley-Moore +Moti Avrahami +Myk Taylor +Nach M. S. +Nagai H +Nathan Coulter +Nathan O'Sullivan +Nathanael Nerode +Nathaniel Waisbrot +Naveen Chandran +Naveen Noel +Neal Poole +Nehal J Wani +Neil Bowers +Neil Dunbar +Neil Spring +Nic Roets +Nicholas Maniscalco +Nick Draffen +Nick Gimbrone +Nick Humfrey +Nick Zitzmann +Nico Baggus +Nicolas Berloquin +Nicolas Croiset +Nicolas François +Niels van Tongeren +Nikita Schmidt +Nikitinskit Dmitriy +Niklas Angebrand +Nikolai Kondrashov +Nikos Mavrogiannopoulos +Ning Dong +Nir Soffer +Nis Jorgensen +Nobuhiro Ban +Nodak Sodak +Norbert Frese +Norbert Kett +Norbert Novotny +Octavio Schroeder +Ofer +Okhin Vasilij +Ola Mork +Olaf Flebbe +Olaf Stüben +Oleg Pudeyev +Oliver Gondža +Oliver Graute +Oliver Kuckertz +Oliver Schindler +Olivier Berger +Olivier Brunel +Orange Tsai +Oren Souroujon +Oren Tirosh +Orgad Shaneh +Ori Avtalion +Oscar Koeroo +Oscar Norlander +P R Schaffner +Palo Markovic +Paolo Piacentini +Paras Sethia +Pascal Gaudette +Pascal Terjan +Pasha Kuznetsov +Pasi Karkkainen +Pat Ray +Patrice Guerin +Patricia Muscalu +Patrick Bihan-Faou +Patrick McManus +Patrick Monnerat +Patrick Rapin +Patrick Scott +Patrick Smith +Patrick Watson +Patrik Thunstrom +Pau Garcia i Quiles +Paul Donohue +Paul Harrington +Paul Harris +Paul Howarth +Paul Joyce +Paul Marks +Paul Marquis +Paul Moore +Paul Nolan +Paul Oliver +Paul Querna +Paul Saab +Pavel Cenek +Pavel Orehov +Pavel Raiskup +Pawel A. Gajda +Pawel Kierski +Pedro Larroy +Pedro Neves +Per Malmberg +Peter Bray +Peter Forret +Peter Frühberger +Peter Gal +Peter Heuchert +Peter Hjalmarsson +Peter Korsgaard +Peter Lamberg +Peter Laser +Peter O'Gorman +Peter Pentchev +Peter Silva +Peter Su +Peter Sylvester +Peter Todd +Peter Verhas +Peter Wang +Peter Wu +Peter Wullinger +Peteris Krumins +Petr Bahula +Petr Novak +Petr Pisar +Phil Blundell +Phil Crump +Phil Karn +Phil Lisiecki +Phil Pellouchoud +Philip Craig +Philip Gladstone +Philip Langdale +Philippe Hameau +Philippe Raoult +Philippe Vaucher +Pierre +Pierre Brico +Pierre Chapuis +Pierre Joye +Pierre Ynard +Piotr Dobrogost +Pooyan McSporran +Pramod Sharma +Prash Dush +Praveen Pvs +Priyanka Shah +Puneet Pawaia +Quagmire +Quanah Gibson-Mount +Quinn Slack +R. Dennis Steed +Radu Simionescu +Rafa Muyo +Rafael Antonio +Rafael Sagula +Rafayel Mkrtchyan +Rafaël Carré +Rainer Canavan +Rainer Jung +Rainer Koenig +Rainer Müller +Rajesh Naganathan +Rajkumar Mandal +Ralf S. Engelschall +Ralph Beckmann +Ralph Mitchell +Ramana Mokkapati +Randy Armstrong +Randy McMurchy +Ravi Pratap +Ray Dassen +Ray Pekowski +Ray Satiro +Razvan Cojocaru +Reinhard Max +Reinout van Schouwen +Remi Gacogne +Remo E +Renato Botelho +Renaud Chaillat +Renaud Duhaut +Renaud Guillard +Renaud Lehoux +Rene Bernhardt +Rene Rebe +Reuven Wachtfogel +Reza Arbab +Ricardo Cadime +Rich Burridge +Rich Gray +Rich Rauenzahn +Richard Archer +Richard Atterer +Richard Bramante +Richard Clayton +Richard Cooper +Richard Gorton +Richard Gray +Richard Hosking +Richard Hsu +Richard Michael +Richard Moore +Richard Prescott +Richard Silverman +Richard van den Berg +Richy Kim +Rick Jones +Rick Richardson +Ricki Hirner +Rider Linden +Rob Crittenden +Rob Davies +Rob Jones +Rob Stanzel +Rob Ward +Robert A. Monat +Robert B. Harris +Robert D. Young +Robert Foreman +Robert Iakobashvili +Robert Olson +Robert Schumann +Robert Weaver +Robert Wruck +Robin Cornelius +Robin Johnson +Robin Kay +Robson Braga Araujo +Rod Widdowson +Rodney Simmons +Rodric Glaser +Rodrigo Silva +Roger Leigh +Roland Blom +Roland Krikava +Roland Zimmermann +Rolland Dudemaine +Romain Coltel +Roman Koifman +Roman Mamedov +Romulo A. Ceccon +Ron Eldor +Ron Parker +Ron Zapp +Ronnie Mose +Rosimildo da Silva +Roy Shan +Rune Kleveland +Ruslan Gazizov +Rutger Hofman +Ryan Braud +Ryan Chan +Ryan Nelson +Ryan Schmidt +Ryan Scott +Ryuichi KAWAMATA +Rémy Léone +S. Moonesamy +Salvador Dávila +Salvatore Sorrentino +Sam Deane +Sam Hurst +Sam Roth +Sam Schanken +Sampo Kellomaki +Samuel Díaz García +Samuel Listopad +Samuel Thibault +Sander Gates +Sandor Feldi +Santhana Todatry +Saqib Ali +Sara Golemon +Saran Neti +Sascha Swiercy +Saul good +Saurav Babu +Scott Bailey +Scott Barrett +Scott Cantor +Scott Davis +Scott McCreary +Sean Boudreau +Sean Burford +Sebastian Mundry +Sebastian Pohlschmidt +Sebastian Rasmussen +Senthil Raja Velu +Sergei Kuzmin +Sergei Nikulov +Sergey Tatarincev +Sergii Pylypenko +Sergio Ballestrero +Serj Kalichev +Seshubabu Pasam +Seth Mos +Sh Diao +Shachaf Ben-Kiki +Shao Shuchao +Sharad Gupta +Shard +Shawn Landden +Shawn Poulson +Shine Fan +Shmulik Regev +Siddhartha Prakash Jain +Sidney San Martín +Siegfried Gyuricsko +Simon Dick +Simon H. +Simon Josefsson +Simon Liu +Simon Warta +Song Ma +Sonia Subramanian +Spacen Jasset +Spiridonoff A.V +Spork Schivago +Stadler Stephan +Stan van de Burgt +Stanislav Ivochkin +Stefan Bühler +Stefan Eissing +Stefan Esser +Stefan Kanthak +Stefan Krause +Stefan Neis +Stefan Teleman +Stefan Tomanek +Stefan Ulrich +Steinar H. Gunderson +Stephan Bergmann +Stephen Brokenshire +Stephen Collyer +Stephen Kick +Stephen More +Stephen Toub +Sterling Hughes +Steve Brokenshire +Steve Green +Steve H Truong +Steve Havelka +Steve Holme +Steve Lhomme +Steve Little +Steve Marx +Steve Oliphant +Steve Roskowski +Steven Bazyl +Steven G. Johnson +Steven Gu +Steven M. Schweda +Steven Parkes +Stoned Elipot +Stuart Henderson +Sune Ahlgren +Sven Anders +Sven Neuhaus +Sven Wegener +Svyatoslav Mishyn +Sylvestre Ledru +Symeon Paraschoudis +Sébastien Willemijns +T. Bharath +T. Yamada +TJ Saunders +Tae Hyoung Ahn +Taneli Vähäkangas +Tanguy Fautre +Tatsuhiro Tsujikawa +Temprimus +Terri Oda +TheAssassin at github +Theodore Dubois +Thomas Braun +Thomas Glanzmann +Thomas J. Moore +Thomas Klausner +Thomas L. Shinnick +Thomas Lopatic +Thomas Ruecker +Thomas Schwinge +Thomas Tonino +Thorsten Schöning +Tiit Pikma +Till Maas +Tim Ansell +Tim Baker +Tim Bartley +Tim Chen +Tim Costello +Tim Harder +Tim Heckman +Tim Newsome +Tim Rühsen +Tim Sneddon +Tim Stack +Tim Starling +Timo Sirainen +Timotej Lazar +Timothy Polich +Tinus van den Berg +Tobias Markus +Tobias Rundström +Tobias Stoeckmann +Toby Peterson +Todd A Ouska +Todd Kulesza +Todd Short +Todd Vierling +Tom Benoist +Tom Donovan +Tom Grace +Tom Lee +Tom Mattison +Tom Moers +Tom Mueller +Tom Regner +Tom Sparrow +Tom Wright +Tom Zerucha +Tomas Hoger +Tomas Jakobsson +Tomas Mlcoch +Tomas Pospisek +Tomas Szepe +Tomas Tomecek +Tomasz Kojm +Tomasz Lacki +Tommie Gannert +Tommy Tam +Ton Voon +Toni Moreno +Tony Kelman +Toon Verwaest +Tor Arntsen +Torben Dannhauer +Torsten Foertsch +Toshio Kuratomi +Toshiyuki Maezawa +Traian Nicolescu +Travis Burtrum +Travis Obenhaus +Troels Walsted Hansen +Troy Engel +Tupone Alfredo +Tyler Hall +Török Edwin +Ulf Härnhammar +Ulf Samuelsson +Ulrich Doehner +Ulrich Telle +Ulrich Zadow +Valentin David +Vasy Okhin +Venkat Akella +Venkataramana Mokkapati +Victor Snezhko +Vijay Panghal +Vikram Saxena +Viktor Szakáts +Ville Skyttä +Vilmos Nebehaj +Vincas Razma +Vincent Bronner +Vincent Le Normand +Vincent Penquerc'h +Vincent Sanders +Vincent Torri +Vlad Grachov +Vlad Ureche +Vladimir Grishchenko +Vladimir Lazarenko +Vojtech Janota +Vojtech Minarik +Vojtěch Král +Vsevolod Novikov +W. Mark Kubacki +Waldek Kozba +Walter J. Mack +Ward Willats +Warp Kawada +Warren Menzer +Wayne Haigh +Werner Koch +Wesley Laxton +Wesley Miaw +Wez Furlong +Wham Bang +Wilfredo Sanchez +Will Dietz +Willem Sparreboom +William Ahern +Wojciech Zwiefka +Wouter Van Rooy +Wu Yongzheng +Xavier Bouchoux +Xiangbin Li +Yaakov Selkowitz +Yamada Yasuharu +Yang Tse +Yarram Sunil +Yasuharu Yamada +Yehezkel Horowitz +Yehoshua Hershberg +Yi Huang +Yingwei Liu +Yonggang Luo +Yousuke Kimoto +Yukihiro Kawada +Yun SangHo +Yuriy Sosov +Yves Arrouye +Yves Lejeune +Zdenek Pavlas +Zekun Ni +Zmey Petroff +Zvi Har'El +afrind on github +asavah on github +baumanj on github +bsammon on github +canavan at github +dkjjr89 on github +eXeC64 on github +jonrumsey at github +jonrumsey on github +jveazey on github +ka7 on github +kreshano on github +lijian996 on github +lukaszgn on github +madblobfish on github +marc-groundctl on github +mccormickt12 on github +mkzero on github +neex on github +neheb on github +nk +nopjmp on github +silveja1 on github +stootill at github +swalkaus at yahoo.com +tarek112 on github +tommink[at]post.pl +vanillajonathan on github +wmsch on github +wyattoday at github +zelinchen on github +İsmail Dönmez +Štefan Kremeň +Никита Дорохин diff --git a/deps-win32/curl-7.54.1/docs/TODO b/deps-win32/curl-7.54.1/docs/TODO new file mode 100644 index 0000000..e8b8fe7 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/TODO @@ -0,0 +1,1240 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + + Things that could be nice to do in the future + + Things to do in project curl. Please tell us what you think, contribute and + send us patches that improve things! + + Be aware that these are things that we could do, or have once been considered + things we could do. If you want to work on any of these areas, please + consider bringing it up for discussions first on the mailing list so that we + all agree it is still a good idea for the project! + + All bugs documented in the KNOWN_BUGS document are subject for fixing! + + 1. libcurl + 1.2 More data sharing + 1.3 struct lifreq + 1.4 signal-based resolver timeouts + 1.5 get rid of PATH_MAX + 1.6 Modified buffer size approach + 1.7 Detect when called from within callbacks + 1.8 CURLOPT_RESOLVE for any port number + 1.9 Cache negative name resolves + 1.11 minimize dependencies with dynamically loaded modules + 1.12 have form functions use CURL handle argument + 1.14 Typesafe curl_easy_setopt() + 1.15 Monitor connections in the connection pool + 1.16 Try to URL encode given URL + 1.17 Add support for IRIs + 1.18 try next proxy if one doesn't work + 1.19 Timeout idle connections from the pool + 1.20 SRV and URI DNS records + 1.21 API for URL parsing/splitting + 1.23 Offer API to flush the connection pool + 1.24 TCP Fast Open for windows + 1.25 Remove the generated include file + + 2. libcurl - multi interface + 2.1 More non-blocking + 2.2 Better support for same name resolves + 2.3 Non-blocking curl_multi_remove_handle() + 2.4 Split connect and authentication process + 2.5 Edge-triggered sockets should work + + 3. Documentation + 3.2 Provide cmake config-file + + 4. FTP + 4.1 HOST + 4.2 Alter passive/active on failure and retry + 4.3 Earlier bad letter detection + 4.4 REST for large files + 4.5 ASCII support + 4.6 GSSAPI via Windows SSPI + 4.7 STAT for LIST without data connection + + 5. HTTP + 5.1 Better persistency for HTTP 1.0 + 5.2 support FF3 sqlite cookie files + 5.3 Rearrange request header order + 5.4 HTTP Digest using SHA-256 + 5.5 auth= in URLs + 5.6 Refuse "downgrade" redirects + 5.7 Brotli compression + 5.8 QUIC + 5.9 Improve formpost API + 5.10 Leave secure cookies alone + 5.11 Chunked transfer multipart formpost + 5.12 OPTIONS * + + 6. TELNET + 6.1 ditch stdin + 6.2 ditch telnet-specific select + 6.3 feature negotiation debug data + + 7. SMTP + 7.1 Pipelining + 7.2 Enhanced capability support + 7.3 Add CURLOPT_MAIL_CLIENT option + + 8. POP3 + 8.1 Pipelining + 8.2 Enhanced capability support + + 9. IMAP + 9.1 Enhanced capability support + + 10. LDAP + 10.1 SASL based authentication mechanisms + + 11. SMB + 11.1 File listing support + 11.2 Honor file timestamps + 11.3 Use NTLMv2 + 11.4 Create remote directories + + 12. New protocols + 12.1 RSYNC + + 13. SSL + 13.1 Disable specific versions + 13.2 Provide mutex locking API + 13.3 Evaluate SSL patches + 13.4 Cache/share OpenSSL contexts + 13.5 Export session ids + 13.6 Provide callback for cert verification + 13.7 improve configure --with-ssl + 13.8 Support DANE + 13.10 Support SSLKEYLOGFILE + 13.11 Support intermediate & root pinning for PINNEDPUBLICKEY + 13.12 Support HSTS + 13.13 Support HPKP + + 14. GnuTLS + 14.1 SSL engine stuff + 14.2 check connection + + 15. WinSSL/SChannel + 15.1 Add support for client certificate authentication + 15.2 Add support for custom server certificate validation + 15.3 Add support for the --ciphers option + + 16. SASL + 16.1 Other authentication mechanisms + 16.2 Add QOP support to GSSAPI authentication + 16.3 Support binary messages (i.e.: non-base64) + + 17. SSH protocols + 17.1 Multiplexing + 17.2 SFTP performance + 17.3 Support better than MD5 hostkey hash + + 18. Command line tool + 18.1 sync + 18.2 glob posts + 18.3 prevent file overwriting + 18.4 simultaneous parallel transfers + 18.5 provide formpost headers + 18.6 warning when setting an option + 18.7 warning when sending binary output to terminal + 18.8 offer color-coded HTTP header output + 18.9 Choose the name of file in braces for complex URLs + 18.10 improve how curl works in a windows console window + 18.11 -w output to stderr + 18.12 keep running, read instructions from pipe/socket + 18.13 support metalink in http headers + 18.14 --fail without --location should treat 3xx as a failure + 18.15 --retry should resume + 18.16 send only part of --data + 18.17 consider file name from the redirected URL with -O ? + + 19. Build + 19.1 roffit + 19.2 Enable PIE and RELRO by default + + 20. Test suite + 20.1 SSL tunnel + 20.2 nicer lacking perl message + 20.3 more protocols supported + 20.4 more platforms supported + 20.5 Add support for concurrent connections + 20.6 Use the RFC6265 test suite + + 21. Next SONAME bump + 21.1 http-style HEAD output for FTP + 21.2 combine error codes + 21.3 extend CURLOPT_SOCKOPTFUNCTION prototype + + 22. Next major release + 22.1 cleanup return codes + 22.2 remove obsolete defines + 22.3 size_t + 22.4 remove several functions + 22.5 remove CURLOPT_FAILONERROR + 22.6 remove CURLOPT_DNS_USE_GLOBAL_CACHE + 22.7 remove progress meter from libcurl + 22.8 remove 'curl_httppost' from public + +============================================================================== + +1. libcurl + +1.2 More data sharing + + curl_share_* functions already exist and work, and they can be extended to + share more. For example, enable sharing of the ares channel and the + connection cache. + +1.3 struct lifreq + + Use 'struct lifreq' and SIOCGLIFADDR instead of 'struct ifreq' and + SIOCGIFADDR on newer Solaris versions as they claim the latter is obsolete. + To support IPv6 interface addresses for network interfaces properly. + +1.4 signal-based resolver timeouts + + libcurl built without an asynchronous resolver library uses alarm() to time + out DNS lookups. When a timeout occurs, this causes libcurl to jump from the + signal handler back into the library with a sigsetjmp, which effectively + causes libcurl to continue running within the signal handler. This is + non-portable and could cause problems on some platforms. A discussion on the + problem is available at https://curl.haxx.se/mail/lib-2008-09/0197.html + + Also, alarm() provides timeout resolution only to the nearest second. alarm + ought to be replaced by setitimer on systems that support it. + +1.5 get rid of PATH_MAX + + Having code use and rely on PATH_MAX is not nice: + http://insanecoding.blogspot.com/2007/11/pathmax-simply-isnt.html + + Currently the SSH based code uses it a bit, but to remove PATH_MAX from there + we need libssh2 to properly tell us when we pass in a too small buffer and + its current API (as of libssh2 1.2.7) doesn't. + +1.6 Modified buffer size approach + + Current libcurl allocates a fixed 16K size buffer for download and an + additional 16K for upload. They are always unconditionally part of the easy + handle. If CRLF translations are requested, an additional 32K "scratch + buffer" is allocated. A total of 64K transfer buffers in the worst case. + + First, while the handles are not actually in use these buffers could be freed + so that lingering handles just kept in queues or whatever waste less memory. + + Secondly, SFTP is a protocol that needs to handle many ~30K blocks at once + since each need to be individually acked and therefore libssh2 must be + allowed to send (or receive) many separate ones in parallel to achieve high + transfer speeds. A current libcurl build with a 16K buffer makes that + impossible, but one with a 512K buffer will reach MUCH faster transfers. But + allocating 512K unconditionally for all buffers just in case they would like + to do fast SFTP transfers at some point is not a good solution either. + + Dynamically allocate buffer size depending on protocol in use in combination + with freeing it after each individual transfer? Other suggestions? + +1.7 Detect when called from within callbacks + + We should set a state variable before calling callbacks, so that we + subsequently can add code within libcurl that returns error if called within + callbacks for when that's not supported. + +1.8 CURLOPT_RESOLVE for any port number + + This option allows applications to set a replacement IP address for a given + host + port pair. Consider making support for providing a replacement address + for the host name on all port numbers. + + See https://github.com/curl/curl/issues/1264 + +1.9 Cache negative name resolves + + A name resolve that has failed is likely to fail when made again within a + short period of time. Currently we only cache positive responses. + +1.11 minimize dependencies with dynamically loaded modules + + We can create a system with loadable modules/plug-ins, where these modules + would be the ones that link to 3rd party libs. That would allow us to avoid + having to load ALL dependencies since only the necessary ones for this + app/invoke/used protocols would be necessary to load. See + https://github.com/curl/curl/issues/349 + +1.12 have form functions use CURL handle argument + + curl_formadd() and curl_formget() both currently have no CURL handle + argument, but both can use a callback that is set in the easy handle, and + thus curl_formget() with callback cannot function without first having + curl_easy_perform() (or similar) called - which is hard to grasp and a design + mistake. + + The curl_formadd() design can probably also be reconsidered to make it easier + to use and less error-prone. Probably easiest by splitting it into several + function calls. + +1.14 Typesafe curl_easy_setopt() + + One of the most common problems in libcurl using applications is the lack of + type checks for curl_easy_setopt() which happens because it accepts varargs + and thus can take any type. + + One possible solution to this is to introduce a few different versions of the + setopt version for the different kinds of data you can set. + + curl_easy_set_num() - sets a long value + + curl_easy_set_large() - sets a curl_off_t value + + curl_easy_set_ptr() - sets a pointer + + curl_easy_set_cb() - sets a callback PLUS its callback data + +1.15 Monitor connections in the connection pool + + libcurl's connection cache or pool holds a number of open connections for the + purpose of possible subsequent connection reuse. It may contain a few up to a + significant amount of connections. Currently, libcurl leaves all connections + as they are and first when a connection is iterated over for matching or + reuse purpose it is verified that it is still alive. + + Those connections may get closed by the server side for idleness or they may + get a HTTP/2 ping from the peer to verify that they're still alive. By adding + monitoring of the connections while in the pool, libcurl can detect dead + connections (and close them) better and earlier, and it can handle HTTP/2 + pings to keep such ones alive even when not actively doing transfers on them. + +1.16 Try to URL encode given URL + + Given a URL that for example contains spaces, libcurl could have an option + that would try somewhat harder than it does now and convert spaces to %20 and + perhaps URL encoded byte values over 128 etc (basically do what the redirect + following code already does). + + https://github.com/curl/curl/issues/514 + +1.17 Add support for IRIs + + IRIs (RFC 3987) allow localized, non-ascii, names in the URL. To properly + support this, curl/libcurl would need to translate/encode the given input + from the input string encoding into percent encoded output "over the wire". + + To make that work smoothly for curl users even on Windows, curl would + probably need to be able to convert from several input encodings. + +1.18 try next proxy if one doesn't work + + Allow an application to specify a list of proxies to try, and failing to + connect to the first go on and try the next instead until the list is + exhausted. Browsers support this feature at least when they specify proxies + using PACs. + + https://github.com/curl/curl/issues/896 + +1.19 Timeout idle connections from the pool + + libcurl currently keeps connections in its connection pool for an indefinite + period of time, until it either gets reused, gets noticed that it has been + closed by the server or gets pruned to make room for a new connection. + + To reduce overhead (especially for when we add monitoring of the connections + in the pool), we should introduce a timeout so that connections that have + been idle for N seconds get closed. + +1.20 SRV and URI DNS records + + Offer support for resolving SRV and URI DNS records for libcurl to know which + server to connect to for various protocols (including HTTP!). + +1.21 API for URL parsing/splitting + + libcurl has always parsed URLs internally and never exposed any API or + features to allow applications to do it. Still most or many applications + using libcurl need that ability. In polls to users, we've learned that many + libcurl users would like to see and use such an API. + +1.23 Offer API to flush the connection pool + + Sometimes applications want to flush all the existing connections kept alive. + An API could allow a forced flush or just a forced loop that would properly + close all connections that have been closed by the server already. + +1.24 TCP Fast Open for windows + + libcurl supports the CURLOPT_TCP_FASTOPEN option since 7.49.0 for Linux and + Mac OS. Windows supports TCP Fast Open starting with Windows 10, version 1607 + and we should add support for it. + +1.25 Remove the generated include file + + When curl and libcurl are built, one of the public include files are + generated and is populated with a set of defines that are derevid from sizes + and constants for the particular target architecture that build is made. For + platforms that can select between 32 bit and 64 bit at build time, this + approach makes the libcurl build only create a set of public headers suitable + for one of the architectures and not both. If you build libcurl for such a + platform and you want to allow applications to get built using either 32/64 + version, you must generate the libcurl headers once for each setup and you + must then add a replacement curl header that would itself select the correct + 32 or 64 bit specific header as necessary. + + Your curl/curl.h alternative could then look like (replace with suitable CPP + variable to check): + + #ifdef ARCH_32bit + #include + #else /* ARCH_64bit */ + #include + #endif + + A fix would either (A) fix the 32/64 setup automatically or even better (B) + work away the architecture specific defines from the headers so that they can + be used for all architectures independently of what libcurl was built for. + + +2. libcurl - multi interface + +2.1 More non-blocking + + Make sure we don't ever loop because of non-blocking sockets returning + EWOULDBLOCK or similar. Blocking cases include: + + - Name resolves on non-windows unless c-ares or the threaded resolver is used + - HTTP proxy CONNECT operations + - SOCKS proxy handshakes + - file:// transfers + - TELNET transfers + - The "DONE" operation (post transfer protocol-specific actions) for the + protocols SFTP, SMTP, FTP. Fixing Curl_done() for this is a worthy task. + +2.2 Better support for same name resolves + + If a name resolve has been initiated for name NN and a second easy handle + wants to resolve that name as well, make it wait for the first resolve to end + up in the cache instead of doing a second separate resolve. This is + especially needed when adding many simultaneous handles using the same host + name when the DNS resolver can get flooded. + +2.3 Non-blocking curl_multi_remove_handle() + + The multi interface has a few API calls that assume a blocking behavior, like + add_handle() and remove_handle() which limits what we can do internally. The + multi API need to be moved even more into a single function that "drives" + everything in a non-blocking manner and signals when something is done. A + remove or add would then only ask for the action to get started and then + multi_perform() etc still be called until the add/remove is completed. + +2.4 Split connect and authentication process + + The multi interface treats the authentication process as part of the connect + phase. As such any failures during authentication won't trigger the relevant + QUIT or LOGOFF for protocols such as IMAP, POP3 and SMTP. + +2.5 Edge-triggered sockets should work + + The multi_socket API should work with edge-triggered socket events. One of + the internal actions that need to be improved for this to work perfectly is + the 'maxloops' handling in transfer.c:readwrite_data(). + +3. Documentation + +3.2 Provide cmake config-file + + A config-file package is a set of files provided by us to allow applications + to write cmake scripts to find and use libcurl easier. See + https://github.com/curl/curl/issues/885 + +4. FTP + +4.1 HOST + + HOST is a command for a client to tell which host name to use, to offer FTP + servers named-based virtual hosting: + + https://tools.ietf.org/html/rfc7151 + +4.2 Alter passive/active on failure and retry + + When trying to connect passively to a server which only supports active + connections, libcurl returns CURLE_FTP_WEIRD_PASV_REPLY and closes the + connection. There could be a way to fallback to an active connection (and + vice versa). https://curl.haxx.se/bug/feature.cgi?id=1754793 + +4.3 Earlier bad letter detection + + Make the detection of (bad) %0d and %0a codes in FTP URL parts earlier in the + process to avoid doing a resolve and connect in vain. + +4.4 REST for large files + + REST fix for servers not behaving well on >2GB requests. This should fail if + the server doesn't set the pointer to the requested index. The tricky + (impossible?) part is to figure out if the server did the right thing or not. + +4.5 ASCII support + + FTP ASCII transfers do not follow RFC959. They don't convert the data + accordingly. + +4.6 GSSAPI via Windows SSPI + +In addition to currently supporting the SASL GSSAPI mechanism (Kerberos V5) +via third-party GSS-API libraries, such as Heimdal or MIT Kerberos, also add +support for GSSAPI authentication via Windows SSPI. + +4.7 STAT for LIST without data connection + +Some FTP servers allow STAT for listing directories instead of using LIST, and +the response is then sent over the control connection instead of as the +otherwise usedw data connection: http://www.nsftools.com/tips/RawFTP.htm#STAT + +This is not detailed in any FTP specification. + +5. HTTP + +5.1 Better persistency for HTTP 1.0 + + "Better" support for persistent connections over HTTP 1.0 + https://curl.haxx.se/bug/feature.cgi?id=1089001 + +5.2 support FF3 sqlite cookie files + + Firefox 3 is changing from its former format to a a sqlite database instead. + We should consider how (lib)curl can/should support this. + https://curl.haxx.se/bug/feature.cgi?id=1871388 + +5.3 Rearrange request header order + + Server implementors often make an effort to detect browser and to reject + clients it can detect to not match. One of the last details we cannot yet + control in libcurl's HTTP requests, which also can be exploited to detect + that libcurl is in fact used even when it tries to impersonate a browser, is + the order of the request headers. I propose that we introduce a new option in + which you give headers a value, and then when the HTTP request is built it + sorts the headers based on that number. We could then have internally created + headers use a default value so only headers that need to be moved have to be + specified. + +5.4 HTTP Digest using SHA-256 + + RFC 7616 introduces an update to the HTTP Digest authentication + specification, which amongst other thing defines how new digest algorithms + can be used instead of MD5 which is considered old and not recommended. + + See https://tools.ietf.org/html/rfc7616 and + https://github.com/curl/curl/issues/1018 + +5.5 auth= in URLs + + Add the ability to specify the preferred authentication mechanism to use by + using ;auth= in the login part of the URL. + + For example: + + http://test:pass;auth=NTLM@example.com would be equivalent to specifying --user + test:pass;auth=NTLM or --user test:pass --ntlm from the command line. + + Additionally this should be implemented for proxy base URLs as well. + +5.6 Refuse "downgrade" redirects + + See https://github.com/curl/curl/issues/226 + + Consider a way to tell curl to refuse to "downgrade" protocol with a redirect + and/or possibly a bit that refuses redirect to change protocol completely. + +5.7 Brotli compression + + Brotli compression performs better than gzip and is being implemented by + browsers and servers widely. The algorithm: https://github.com/google/brotli + The Firefox bug: https://bugzilla.mozilla.org/show_bug.cgi?id=366559 + +5.8 QUIC + + The standardization process of QUIC has been taken to the IETF and can be + followed on the [IETF QUIC Mailing + list](https://www.ietf.org/mailman/listinfo/quic). I'd like us to get on the + bandwagon. Ideally, this would be done with a separate library/project to + handle the binary/framing layer in a similar fashion to how HTTP/2 is + implemented. This, to allow other projects to benefit from the work and to + thus broaden the interest and chance of others to participate. + +5.9 Improve formpost API + + Revamp the formpost API and making something that is easier to use and + understand: + + https://github.com/curl/curl/wiki/formpost-API-redesigned + +5.10 Leave secure cookies alone + + Non-secure origins (HTTP sites) should not be allowed to set or modify + cookies with the 'secure' property: + + https://tools.ietf.org/html/draft-ietf-httpbis-cookie-alone-01 + +5.11 Chunked transfer multipart formpost + + For a case where the file is being made during the upload is progressing + (like passed on stdin to the curl tool), we cannot know the size before-hand + and we rather not read the entire thing into memory before it can start the + upload. + + https://github.com/curl/curl/issues/1139 + +5.12 OPTIONS * + + HTTP defines an OPTIONS method that can be sent with an asterisk option like + "OPTIONS *" to ask about options from the server and not a specific URL + resource. https://tools.ietf.org/html/rfc7230#section-5.3.4 + + libcurl as it currently works will always sent HTTP methods with a path that + starts with a slash so there's no way for an application to send a proper + "OPTIONS *" using libcurl. This should be fixed. + + I can't think of any other non-slash paths we should support so it will + probably make sense to add a new boolean option for issuign an "OPTIONS *" + request. CURLOPT_OPTIONSASTERISK perhaps (and a corresponding command line + option)? + + See https://github.com/curl/curl/issues/1280 + + +6. TELNET + +6.1 ditch stdin + +Reading input (to send to the remote server) on stdin is a crappy solution for +library purposes. We need to invent a good way for the application to be able +to provide the data to send. + +6.2 ditch telnet-specific select + + Move the telnet support's network select() loop go away and merge the code + into the main transfer loop. Until this is done, the multi interface won't + work for telnet. + +6.3 feature negotiation debug data + + Add telnet feature negotiation data to the debug callback as header data. + + +7. SMTP + +7.1 Pipelining + + Add support for pipelining emails. + +7.2 Enhanced capability support + + Add the ability, for an application that uses libcurl, to obtain the list of + capabilities returned from the EHLO command. + +7.3 Add CURLOPT_MAIL_CLIENT option + + Rather than use the URL to specify the mail client string to present in the + HELO and EHLO commands, libcurl should support a new CURLOPT specifically for + specifying this data as the URL is non-standard and to be honest a bit of a + hack ;-) + + Please see the following thread for more information: + https://curl.haxx.se/mail/lib-2012-05/0178.html + + +8. POP3 + +8.1 Pipelining + + Add support for pipelining commands. + +8.2 Enhanced capability support + + Add the ability, for an application that uses libcurl, to obtain the list of + capabilities returned from the CAPA command. + +9. IMAP + +9.1 Enhanced capability support + + Add the ability, for an application that uses libcurl, to obtain the list of + capabilities returned from the CAPABILITY command. + +10. LDAP + +10.1 SASL based authentication mechanisms + + Currently the LDAP module only supports ldap_simple_bind_s() in order to bind + to an LDAP server. However, this function sends username and password details + using the simple authentication mechanism (as clear text). However, it should + be possible to use ldap_bind_s() instead specifying the security context + information ourselves. + +11. SMB + +11.1 File listing support + +Add support for listing the contents of a SMB share. The output should probably +be the same as/similar to FTP. + +11.2 Honor file timestamps + +The timestamp of the transferred file should reflect that of the original file. + +11.3 Use NTLMv2 + +Currently the SMB authentication uses NTLMv1. + +11.4 Create remote directories + +Support for creating remote directories when uploading a file to a directory +that doesn't exist on the server, just like --ftp-create-dirs. + +12. New protocols + +12.1 RSYNC + + There's no RFC for the protocol or an URI/URL format. An implementation + should most probably use an existing rsync library, such as librsync. + +13. SSL + +13.1 Disable specific versions + + Provide an option that allows for disabling specific SSL versions, such as + SSLv2 https://curl.haxx.se/bug/feature.cgi?id=1767276 + +13.2 Provide mutex locking API + + Provide a libcurl API for setting mutex callbacks in the underlying SSL + library, so that the same application code can use mutex-locking + independently of OpenSSL or GnutTLS being used. + +13.3 Evaluate SSL patches + + Evaluate/apply Gertjan van Wingerde's SSL patches: + https://curl.haxx.se/mail/lib-2004-03/0087.html + +13.4 Cache/share OpenSSL contexts + + "Look at SSL cafile - quick traces look to me like these are done on every + request as well, when they should only be necessary once per SSL context (or + once per handle)". The major improvement we can rather easily do is to make + sure we don't create and kill a new SSL "context" for every request, but + instead make one for every connection and re-use that SSL context in the same + style connections are re-used. It will make us use slightly more memory but + it will libcurl do less creations and deletions of SSL contexts. + + Technically, the "caching" is probably best implemented by getting added to + the share interface so that easy handles who want to and can reuse the + context specify that by sharing with the right properties set. + + https://github.com/curl/curl/issues/1110 + +13.5 Export session ids + + Add an interface to libcurl that enables "session IDs" to get + exported/imported. Cris Bailiff said: "OpenSSL has functions which can + serialise the current SSL state to a buffer of your choice, and recover/reset + the state from such a buffer at a later date - this is used by mod_ssl for + apache to implement and SSL session ID cache". + +13.6 Provide callback for cert verification + + OpenSSL supports a callback for customised verification of the peer + certificate, but this doesn't seem to be exposed in the libcurl APIs. Could + it be? There's so much that could be done if it were! + +13.7 improve configure --with-ssl + + make the configure --with-ssl option first check for OpenSSL, then GnuTLS, + then NSS... + +13.8 Support DANE + + DNS-Based Authentication of Named Entities (DANE) is a way to provide SSL + keys and certs over DNS using DNSSEC as an alternative to the CA model. + https://www.rfc-editor.org/rfc/rfc6698.txt + + An initial patch was posted by Suresh Krishnaswamy on March 7th 2013 + (https://curl.haxx.se/mail/lib-2013-03/0075.html) but it was a too simple + approach. See Daniel's comments: + https://curl.haxx.se/mail/lib-2013-03/0103.html . libunbound may be the + correct library to base this development on. + + Björn Stenberg wrote a separate initial take on DANE that was never + completed. + +13.10 Support SSLKEYLOGFILE + + When used, Firefox and Chrome dumps their master TLS keys to the file name + this environment variable specifies. This allows tools like for example + Wireshark to capture and decipher TLS traffic to/from those clients. libcurl + could be made to support this more widely (presumably this already works when + built with NSS). Peter Wu made a OpenSSL preload to make possible that can be + used as inspiration and guidance + https://git.lekensteyn.nl/peter/wireshark-notes/tree/src/sslkeylog.c + +13.11 Support intermediate & root pinning for PINNEDPUBLICKEY + + CURLOPT_PINNEDPUBLICKEY does not consider the hashes of intermediate & root + certificates when comparing the pinned keys. Therefore it is not compatible + with "HTTP Public Key Pinning" as there also intermediate and root certificates + can be pinned. This is very useful as it prevents webadmins from "locking + themself out of their servers". + + Adding this feature would make curls pinning 100% compatible to HPKP and allow + more flexible pinning. + +13.12 Support HSTS + + "HTTP Strict Transport Security" is TOFU (trust on first use), time-based + features indicated by a HTTP header send by the webserver. It is widely used + in browsers and it's purpose is to prevent insecure HTTP connections after + a previous HTTPS connection. It protects against SSLStripping attacks. + + Doc: https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security + RFC 6797: https://tools.ietf.org/html/rfc6797 + +13.13 Support HPKP + + "HTTP Public Key Pinning" is TOFU (trust on first use), time-based + features indicated by a HTTP header send by the webserver. It's purpose is + to prevent Man-in-the-middle attacks by trusted CAs by allowing webadmins + to specify which CAs/certificates/public keys to trust when connection to + their websites. + + It can be build based on PINNEDPUBLICKEY. + + Wikipedia: https://en.wikipedia.org/wiki/HTTP_Public_Key_Pinning + OWASP: https://www.owasp.org/index.php/Certificate_and_Public_Key_Pinning + Doc: https://developer.mozilla.org/de/docs/Web/Security/Public_Key_Pinning + RFC: https://tools.ietf.org/html/draft-ietf-websec-key-pinning-21 + +14. GnuTLS + +14.1 SSL engine stuff + + Is this even possible? + +14.2 check connection + + Add a way to check if the connection seems to be alive, to correspond to the + SSL_peak() way we use with OpenSSL. + +15. WinSSL/SChannel + +15.1 Add support for client certificate authentication + + WinSSL/SChannel currently makes use of the OS-level system and user + certificate and private key stores. This does not allow the application + or the user to supply a custom client certificate using curl or libcurl. + + Therefore support for the existing -E/--cert and --key options should be + implemented by supplying a custom certificate to the SChannel APIs, see: + - Getting a Certificate for Schannel + https://msdn.microsoft.com/en-us/library/windows/desktop/aa375447.aspx + +15.2 Add support for custom server certificate validation + + WinSSL/SChannel currently makes use of the OS-level system and user + certificate trust store. This does not allow the application or user to + customize the server certificate validation process using curl or libcurl. + + Therefore support for the existing --cacert or --capath options should be + implemented by supplying a custom certificate to the SChannel APIs, see: + - Getting a Certificate for Schannel + https://msdn.microsoft.com/en-us/library/windows/desktop/aa375447.aspx + +15.3 Add support for the --ciphers option + + The cipher suites used by WinSSL/SChannel are configured on an OS-level + instead of an application-level. This does not allow the application or + the user to customize the configured cipher suites using curl or libcurl. + + Therefore support for the existing --ciphers option should be implemented + by mapping the OpenSSL/GnuTLS cipher suites to the SChannel APIs, see + - Specifying Schannel Ciphers and Cipher Strengths + https://msdn.microsoft.com/en-us/library/windows/desktop/aa380161.aspx + +16. SASL + +16.1 Other authentication mechanisms + + Add support for other authentication mechanisms such as OLP, + GSS-SPNEGO and others. + +16.2 Add QOP support to GSSAPI authentication + + Currently the GSSAPI authentication only supports the default QOP of auth + (Authentication), whilst Kerberos V5 supports both auth-int (Authentication + with integrity protection) and auth-conf (Authentication with integrity and + privacy protection). + +16.3 Support binary messages (i.e.: non-base64) + + Mandatory to support LDAP SASL authentication. + + +17. SSH protocols + +17.1 Multiplexing + + SSH is a perfectly fine multiplexed protocols which would allow libcurl to do + multiple parallel transfers from the same host using the same connection, + much in the same spirit as HTTP/2 does. libcurl however does not take + advantage of that ability but will instead always create a new connection for + new transfers even if an existing connection already exists to the host. + + To fix this, libcurl would have to detect an existing connection and "attach" + the new transfer to the existing one. + +17.2 SFTP performance + + libcurl's SFTP transfer performance is sub par and can be improved, mostly by + the approach mentioned in "1.6 Modified buffer size approach". + +17.3 Support better than MD5 hostkey hash + + libcurl offers the CURLOPT_SSH_HOST_PUBLIC_KEY_MD5 option for verifying the + server's key. MD5 is generally being deprecated so we should implement + support for stronger hashing algorithms. libssh2 itself is what provides this + underlying functionality and it supports at least SHA-1 as an alternative. + SHA-1 is also being deprecated these days so we should consider workign with + libssh2 to instead offer support for SHA-256 or similar. + + +18. Command line tool + +18.1 sync + + "curl --sync http://example.com/feed[1-100].rss" or + "curl --sync http://example.net/{index,calendar,history}.html" + + Downloads a range or set of URLs using the remote name, but only if the + remote file is newer than the local file. A Last-Modified HTTP date header + should also be used to set the mod date on the downloaded file. + +18.2 glob posts + + Globbing support for -d and -F, as in 'curl -d "name=foo[0-9]" URL'. + This is easily scripted though. + +18.3 prevent file overwriting + + Add an option that prevents curl from overwriting existing local files. When + used, and there already is an existing file with the target file name + (either -O or -o), a number should be appended (and increased if already + existing). So that index.html becomes first index.html.1 and then + index.html.2 etc. + +18.4 simultaneous parallel transfers + + The client could be told to use maximum N simultaneous parallel transfers and + then just make sure that happens. It should of course not make more than one + connection to the same remote host. This would require the client to use the + multi interface. https://curl.haxx.se/bug/feature.cgi?id=1558595 + + Using the multi interface would also allow properly using parallel transfers + with HTTP/2 and supporting HTTP/2 server push from the command line. + +18.5 provide formpost headers + + Extending the capabilities of the multipart formposting. How about leaving + the ';type=foo' syntax as it is and adding an extra tag (headers) which + works like this: curl -F "coolfiles=@fil1.txt;headers=@fil1.hdr" where + fil1.hdr contains extra headers like + + Content-Type: text/plain; charset=KOI8-R" + Content-Transfer-Encoding: base64 + X-User-Comment: Please don't use browser specific HTML code + + which should overwrite the program reasonable defaults (plain/text, + 8bit...) + +18.6 warning when setting an option + + Display a warning when libcurl returns an error when setting an option. + This can be useful to tell when support for a particular feature hasn't been + compiled into the library. + +18.7 warning when sending binary output to terminal + + Provide a way that prompts the user for confirmation before binary data is + sent to the terminal, much in the style 'less' does it. + +18.8 offer color-coded HTTP header output + + By offering different color output on the header name and the header + contents, they could be made more readable and thus help users working on + HTTP services. + +18.9 Choose the name of file in braces for complex URLs + + When using braces to download a list of URLs and you use complicated names + in the list of alternatives, it could be handy to allow curl to use other + names when saving. + + Consider a way to offer that. Possibly like + {partURL1:name1,partURL2:name2,partURL3:name3} where the name following the + colon is the output name. + + See https://github.com/curl/curl/issues/221 + +18.10 improve how curl works in a windows console window + + If you pull the scrollbar when transferring with curl in a Windows console + window, the transfer is interrupted and can get disconnected. This can + probably be improved. See https://github.com/curl/curl/issues/322 + +18.11 -w output to stderr + + -w is quite useful, but not to those of us who use curl without -o or -O + (such as for scripting through a higher level language). It would be nice to + have an option that is exactly like -w but sends it to stderr + instead. Proposed name: --write-stderr. See + https://github.com/curl/curl/issues/613 + +18.12 keep running, read instructions from pipe/socket + + Provide an option that makes curl not exit after the last URL (or even work + without a given URL), and then make it read instructions passed on a pipe or + over a socket to make further instructions so that a second subsequent curl + invoke can talk to the still running instance and ask for transfers to get + done, and thus maintain its connection pool, DNS cache and more. + +18.13 support metalink in http headers + + Curl has support for downloading a metalink xml file, processing it, and then + downloading the target of the metalink. This is done via the --metalink option. + It would be nice if metalink also supported downloading via metalink + information that is stored in HTTP headers (RFC 6249). Theoretically this could + also be supported with the --metalink option. + + See https://tools.ietf.org/html/rfc6249 + + See also https://lists.gnu.org/archive/html/bug-wget/2015-06/msg00034.html for + an implematation of this in wget. + +18.14 --fail without --location should treat 3xx as a failure + + To allow a command line like this to detect a redirect and consider it a + failure: + + curl -v --fail -O https://example.com/curl-7.48.0.tar.gz + + ... --fail must treat 3xx responses as failures too. The least problematic + way to implement this is probably to add that new logic in the command line + tool only and not in the underlying CURLOPT_FAILONERROR logic. + +18.15 --retry should resume + + When --retry is used and curl actually retries transfer, it should use the + already transferred data and do a resumed transfer for the rest (when + possible) so that it doesn't have to transfer the same data again that was + already transferred before the retry. + + See https://github.com/curl/curl/issues/1084 + +18.16 send only part of --data + + When the user only wants to send a small piece of the data provided with + --data or --data-binary, like when that data is a huge file, consider a way + to specify that curl should only send a piece of that. One suggested syntax + would be: "--data-binary @largefile.zip!1073741823-2147483647". + + See https://github.com/curl/curl/issues/1200 + +18.17 consider file name from the redirected URL with -O ? + + When a user gives a URL and uses -O, and curl follows a redirect to a new + URL, the file name is not extracted and used from the newly redirected-to URL + even if the new URL may have a much more sensible file name. + + This is clearly documented and helps for security since there's no surprise + to users which file name that might get overwritten. But maybe a new option + could allow for this or maybe -J should imply such a treatment as well as -J + already allows for the server to decide what file name to use so it already + provides the "may overwrite any file" risk. + + This is extra tricky if the original URL has no file name part at all since + then the current code path will error out with an error message, and we can't + *know* already at that point if curl will be redirected to a URL that has a + file name... + + See https://github.com/curl/curl/issues/1241 + +19. Build + +19.1 roffit + + Consider extending 'roffit' to produce decent ASCII output, and use that + instead of (g)nroff when building src/tool_hugehelp.c + +19.2 Enable PIE and RELRO by default + + Especially when having programs that execute curl via the command line, PIE + renders the exploitation of memory corruption vulnerabilities a lot more + difficult. This can be attributed to the additional information leaks being + required to conduct a successful attack. RELRO, on the other hand, masks + different binary sections like the GOT as read-only and thus kills a handful + of techniques that come in handy when attackers are able to arbitrarily + overwrite memory. A few tests showed that enabling these features had close + to no impact, neither on the performance nor on the general functionality of + curl. + + +20. Test suite + +20.1 SSL tunnel + + Make our own version of stunnel for simple port forwarding to enable HTTPS + and FTP-SSL tests without the stunnel dependency, and it could allow us to + provide test tools built with either OpenSSL or GnuTLS + +20.2 nicer lacking perl message + + If perl wasn't found by the configure script, don't attempt to run the tests + but explain something nice why it doesn't. + +20.3 more protocols supported + + Extend the test suite to include more protocols. The telnet could just do FTP + or http operations (for which we have test servers). + +20.4 more platforms supported + + Make the test suite work on more platforms. OpenBSD and Mac OS. Remove + fork()s and it should become even more portable. + +20.5 Add support for concurrent connections + + Tests 836, 882 and 938 were designed to verify that separate connections aren't + used when using different login credentials in protocols that shouldn't re-use + a connection under such circumstances. + + Unfortunately, ftpserver.pl doesn't appear to support multiple concurrent + connections. The read while() loop seems to loop until it receives a disconnect + from the client, where it then enters the waiting for connections loop. When + the client opens a second connection to the server, the first connection hasn't + been dropped (unless it has been forced - which we shouldn't do in these tests) + and thus the wait for connections loop is never entered to receive the second + connection. + +20.6 Use the RFC6265 test suite + + A test suite made for HTTP cookies (RFC 6265) by Adam Barth is available at + https://github.com/abarth/http-state/tree/master/tests + + It'd be really awesome if someone would write a script/setup that would run + curl with that test suite and detect deviances. Ideally, that would even be + incorporated into our regular test suite. + + +21. Next SONAME bump + +21.1 http-style HEAD output for FTP + + #undef CURL_FTP_HTTPSTYLE_HEAD in lib/ftp.c to remove the HTTP-style headers + from being output in NOBODY requests over FTP + +21.2 combine error codes + + Combine some of the error codes to remove duplicates. The original + numbering should not be changed, and the old identifiers would be + macroed to the new ones in an CURL_NO_OLDIES section to help with + backward compatibility. + + Candidates for removal and their replacements: + + CURLE_FILE_COULDNT_READ_FILE => CURLE_REMOTE_FILE_NOT_FOUND + + CURLE_FTP_COULDNT_RETR_FILE => CURLE_REMOTE_FILE_NOT_FOUND + + CURLE_FTP_COULDNT_USE_REST => CURLE_RANGE_ERROR + + CURLE_FUNCTION_NOT_FOUND => CURLE_FAILED_INIT + + CURLE_LDAP_INVALID_URL => CURLE_URL_MALFORMAT + + CURLE_TFTP_NOSUCHUSER => CURLE_TFTP_ILLEGAL + + CURLE_TFTP_NOTFOUND => CURLE_REMOTE_FILE_NOT_FOUND + + CURLE_TFTP_PERM => CURLE_REMOTE_ACCESS_DENIED + +21.3 extend CURLOPT_SOCKOPTFUNCTION prototype + + The current prototype only provides 'purpose' that tells what the + connection/socket is for, but not any protocol or similar. It makes it hard + for applications to differentiate on TCP vs UDP and even HTTP vs FTP and + similar. + +22. Next major release + +22.1 cleanup return codes + + curl_easy_cleanup() returns void, but curl_multi_cleanup() returns a + CURLMcode. These should be changed to be the same. + +22.2 remove obsolete defines + + remove obsolete defines from curl/curl.h + +22.3 size_t + + make several functions use size_t instead of int in their APIs + +22.4 remove several functions + + remove the following functions from the public API: + + curl_getenv + + curl_mprintf (and variations) + + curl_strequal + + curl_strnequal + + They will instead become curlx_ - alternatives. That makes the curl app + still capable of using them, by building with them from source. + + These functions have no purpose anymore: + + curl_multi_socket + + curl_multi_socket_all + +22.5 remove CURLOPT_FAILONERROR + + Remove support for CURLOPT_FAILONERROR, it has gotten too kludgy and weird + internally. Let the app judge success or not for itself. + +22.6 remove CURLOPT_DNS_USE_GLOBAL_CACHE + + Remove support for a global DNS cache. Anything global is silly, and we + already offer the share interface for the same functionality but done + "right". + +22.7 remove progress meter from libcurl + + The internally provided progress meter output doesn't belong in the library. + Basically no application wants it (apart from curl) but instead applications + can and should do their own progress meters using the progress callback. + + The progress callback should then be bumped as well to get proper 64bit + variable types passed to it instead of doubles so that big files work + correctly. + +22.8 remove 'curl_httppost' from public + + curl_formadd() was made to fill in a public struct, but the fact that the + struct is public is never really used by application for their own advantage + but instead often restricts how the form functions can or can't be modified. + + Changing them to return a private handle will benefit the implementation and + allow us much greater freedoms while still maintaining a solid API and ABI. diff --git a/deps-win32/curl-7.54.1/docs/TheArtOfHttpScripting b/deps-win32/curl-7.54.1/docs/TheArtOfHttpScripting new file mode 100644 index 0000000..b2bd9db --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/TheArtOfHttpScripting @@ -0,0 +1,758 @@ + _ _ ____ _ + ___| | | | _ \| | + / __| | | | |_) | | + | (__| |_| | _ <| |___ + \___|\___/|_| \_\_____| + + +The Art Of Scripting HTTP Requests Using Curl + + 1. HTTP Scripting + 1.1 Background + 1.2 The HTTP Protocol + 1.3 See the Protocol + 1.4 See the Timing + 1.5 See the Response + 2. URL + 2.1 Spec + 2.2 Host + 2.3 Port number + 2.4 User name and password + 2.5 Path part + 3. Fetch a page + 3.1 GET + 3.2 HEAD + 3.3 Multiple URLs in a single command line + 3.4 Multiple HTTP methods in a single command line + 4. HTML forms + 4.1 Forms explained + 4.2 GET + 4.3 POST + 4.4 File Upload POST + 4.5 Hidden Fields + 4.6 Figure Out What A POST Looks Like + 5. HTTP upload + 5.1 PUT + 6. HTTP Authentication + 6.1 Basic Authentication + 6.2 Other Authentication + 6.3 Proxy Authentication + 6.4 Hiding credentials + 7. More HTTP Headers + 7.1 Referer + 7.2 User Agent + 8. Redirects + 8.1 Location header + 8.2 Other redirects + 9. Cookies + 9.1 Cookie Basics + 9.2 Cookie options + 10. HTTPS + 10.1 HTTPS is HTTP secure + 10.2 Certificates + 11. Custom Request Elements + 11.1 Modify method and headers + 11.2 More on changed methods + 12. Web Login + 12.1 Some login tricks + 13. Debug + 13.1 Some debug tricks + 14. References + 14.1 Standards + 14.2 Sites + +============================================================================== + +1. HTTP Scripting + + 1.1 Background + + This document assumes that you're familiar with HTML and general networking. + + The increasing amount of applications moving to the web has made "HTTP + Scripting" more frequently requested and wanted. To be able to automatically + extract information from the web, to fake users, to post or upload data to + web servers are all important tasks today. + + Curl is a command line tool for doing all sorts of URL manipulations and + transfers, but this particular document will focus on how to use it when + doing HTTP requests for fun and profit. I'll assume that you know how to + invoke 'curl --help' or 'curl --manual' to get basic information about it. + + Curl is not written to do everything for you. It makes the requests, it gets + the data, it sends data and it retrieves the information. You probably need + to glue everything together using some kind of script language or repeated + manual invokes. + + 1.2 The HTTP Protocol + + HTTP is the protocol used to fetch data from web servers. It is a very simple + protocol that is built upon TCP/IP. The protocol also allows information to + get sent to the server from the client using a few different methods, as will + be shown here. + + HTTP is plain ASCII text lines being sent by the client to a server to + request a particular action, and then the server replies a few text lines + before the actual requested content is sent to the client. + + The client, curl, sends a HTTP request. The request contains a method (like + GET, POST, HEAD etc), a number of request headers and sometimes a request + body. The HTTP server responds with a status line (indicating if things went + well), response headers and most often also a response body. The "body" part + is the plain data you requested, like the actual HTML or the image etc. + + 1.3 See the Protocol + + Using curl's option --verbose (-v as a short option) will display what kind + of commands curl sends to the server, as well as a few other informational + texts. + + --verbose is the single most useful option when it comes to debug or even + understand the curl<->server interaction. + + Sometimes even --verbose is not enough. Then --trace and --trace-ascii offer + even more details as they show EVERYTHING curl sends and receives. Use it + like this: + + curl --trace-ascii debugdump.txt http://www.example.com/ + + 1.4 See the Timing + + Many times you may wonder what exactly is taking all the time, or you just + want to know the amount of milliseconds between two points in a + transfer. For those, and other similar situations, the --trace-time option + is what you need. It'll prepend the time to each trace output line: + + curl --trace-ascii d.txt --trace-time http://example.com/ + + 1.5 See the Response + + By default curl sends the response to stdout. You need to redirect it + somewhere to avoid that, most often that is done with -o or -O. + +2. URL + + 2.1 Spec + + The Uniform Resource Locator format is how you specify the address of a + particular resource on the Internet. You know these, you've seen URLs like + https://curl.haxx.se or https://yourbank.com a million times. RFC 3986 is the + canonical spec. And yeah, the formal name is not URL, it is URI. + + 2.2 Host + + The host name is usually resolved using DNS or your /etc/hosts file to an IP + address and that's what curl will communicate with. Alternatively you specify + the IP address directly in the URL instead of a name. + + For development and other trying out situations, you can point to a different + IP address for a host name than what would otherwise be used, by using curl's + --resolve option: + + curl --resolve www.example.org:80:127.0.0.1 http://www.example.org/ + + 2.3 Port number + + Each protocol curl supports operates on a default port number, be it over TCP + or in some cases UDP. Normally you don't have to take that into + consideration, but at times you run test servers on other ports or + similar. Then you can specify the port number in the URL with a colon and a + number immediately following the host name. Like when doing HTTP to port + 1234: + + curl http://www.example.org:1234/ + + The port number you specify in the URL is the number that the server uses to + offer its services. Sometimes you may use a local proxy, and then you may + need to specify that proxy's port number separately for what curl needs to + connect to locally. Like when using a HTTP proxy on port 4321: + + curl --proxy http://proxy.example.org:4321 http://remote.example.org/ + + 2.4 User name and password + + Some services are setup to require HTTP authentication and then you need to + provide name and password which is then transferred to the remote site in + various ways depending on the exact authentication protocol used. + + You can opt to either insert the user and password in the URL or you can + provide them separately: + + curl http://user:password@example.org/ + + or + + curl -u user:password http://example.org/ + + You need to pay attention that this kind of HTTP authentication is not what + is usually done and requested by user-oriented web sites these days. They + tend to use forms and cookies instead. + + 2.5 Path part + + The path part is just sent off to the server to request that it sends back + the associated response. The path is what is to the right side of the slash + that follows the host name and possibly port number. + +3. Fetch a page + + 3.1 GET + + The simplest and most common request/operation made using HTTP is to GET a + URL. The URL could itself refer to a web page, an image or a file. The client + issues a GET request to the server and receives the document it asked for. + If you issue the command line + + curl https://curl.haxx.se + + you get a web page returned in your terminal window. The entire HTML document + that that URL holds. + + All HTTP replies contain a set of response headers that are normally hidden, + use curl's --include (-i) option to display them as well as the rest of the + document. + + 3.2 HEAD + + You can ask the remote server for ONLY the headers by using the --head (-I) + option which will make curl issue a HEAD request. In some special cases + servers deny the HEAD method while others still work, which is a particular + kind of annoyance. + + The HEAD method is defined and made so that the server returns the headers + exactly the way it would do for a GET, but without a body. It means that you + may see a Content-Length: in the response headers, but there must not be an + actual body in the HEAD response. + + 3.3 Multiple URLs in a single command line + + A single curl command line may involve one or many URLs. The most common case + is probably to just use one, but you can specify any amount of URLs. Yes + any. No limits. You'll then get requests repeated over and over for all the + given URLs. + + Example, send two GETs: + + curl http://url1.example.com http://url2.example.com + + If you use --data to POST to the URL, using multiple URLs means that you send + that same POST to all the given URLs. + + Example, send two POSTs: + + curl --data name=curl http://url1.example.com http://url2.example.com + + + 3.4 Multiple HTTP methods in a single command line + + Sometimes you need to operate on several URLs in a single command line and do + different HTTP methods on each. For this, you'll enjoy the --next option. It + is basically a separator that separates a bunch of options from the next. All + the URLs before --next will get the same method and will get all the POST + data merged into one. + + When curl reaches the --next on the command line, it'll sort of reset the + method and the POST data and allow a new set. + + Perhaps this is best shown with a few examples. To send first a HEAD and then + a GET: + + curl -I http://example.com --next http://example.com + + To first send a POST and then a GET: + + curl -d score=10 http://example.com/post.cgi --next http://example.com/results.html + + +4. HTML forms + + 4.1 Forms explained + + Forms are the general way a web site can present a HTML page with fields for + the user to enter data in, and then press some kind of 'OK' or 'Submit' + button to get that data sent to the server. The server then typically uses + the posted data to decide how to act. Like using the entered words to search + in a database, or to add the info in a bug tracking system, display the entered + address on a map or using the info as a login-prompt verifying that the user + is allowed to see what it is about to see. + + Of course there has to be some kind of program on the server end to receive + the data you send. You cannot just invent something out of the air. + + 4.2 GET + + A GET-form uses the method GET, as specified in HTML like: + +
+ + +
+ + In your favorite browser, this form will appear with a text box to fill in + and a press-button labeled "OK". If you fill in '1905' and press the OK + button, your browser will then create a new URL to get for you. The URL will + get "junk.cgi?birthyear=1905&press=OK" appended to the path part of the + previous URL. + + If the original form was seen on the page "www.hotmail.com/when/birth.html", + the second page you'll get will become + "www.hotmail.com/when/junk.cgi?birthyear=1905&press=OK". + + Most search engines work this way. + + To make curl do the GET form post for you, just enter the expected created + URL: + + curl "http://www.hotmail.com/when/junk.cgi?birthyear=1905&press=OK" + + 4.3 POST + + The GET method makes all input field names get displayed in the URL field of + your browser. That's generally a good thing when you want to be able to + bookmark that page with your given data, but it is an obvious disadvantage + if you entered secret information in one of the fields or if there are a + large amount of fields creating a very long and unreadable URL. + + The HTTP protocol then offers the POST method. This way the client sends the + data separated from the URL and thus you won't see any of it in the URL + address field. + + The form would look very similar to the previous one: + +
+ + +
+ + And to use curl to post this form with the same data filled in as before, we + could do it like: + + curl --data "birthyear=1905&press=%20OK%20" \ + http://www.example.com/when.cgi + + This kind of POST will use the Content-Type + application/x-www-form-urlencoded and is the most widely used POST kind. + + The data you send to the server MUST already be properly encoded, curl will + not do that for you. For example, if you want the data to contain a space, + you need to replace that space with %20 etc. Failing to comply with this + will most likely cause your data to be received wrongly and messed up. + + Recent curl versions can in fact url-encode POST data for you, like this: + + curl --data-urlencode "name=I am Daniel" http://www.example.com + + If you repeat --data several times on the command line, curl will + concatenate all the given data pieces - and put a '&' symbol between each + data segment. + + 4.4 File Upload POST + + Back in late 1995 they defined an additional way to post data over HTTP. It + is documented in the RFC 1867, why this method sometimes is referred to as + RFC1867-posting. + + This method is mainly designed to better support file uploads. A form that + allows a user to upload a file could be written like this in HTML: + +
+ + +
+ + This clearly shows that the Content-Type about to be sent is + multipart/form-data. + + To post to a form like this with curl, you enter a command line like: + + curl --form upload=@localfilename --form press=OK [URL] + + 4.5 Hidden Fields + + A very common way for HTML based applications to pass state information + between pages is to add hidden fields to the forms. Hidden fields are + already filled in, they aren't displayed to the user and they get passed + along just as all the other fields. + + A similar example form with one visible field, one hidden field and one + submit button could look like: + +
+ + + +
+ + To POST this with curl, you won't have to think about if the fields are + hidden or not. To curl they're all the same: + + curl --data "birthyear=1905&press=OK&person=daniel" [URL] + + 4.6 Figure Out What A POST Looks Like + + When you're about fill in a form and send to a server by using curl instead + of a browser, you're of course very interested in sending a POST exactly the + way your browser does. + + An easy way to get to see this, is to save the HTML page with the form on + your local disk, modify the 'method' to a GET, and press the submit button + (you could also change the action URL if you want to). + + You will then clearly see the data get appended to the URL, separated with a + '?'-letter as GET forms are supposed to. + +5. HTTP upload + + 5.1 PUT + + Perhaps the best way to upload data to a HTTP server is to use PUT. Then + again, this of course requires that someone put a program or script on the + server end that knows how to receive a HTTP PUT stream. + + Put a file to a HTTP server with curl: + + curl --upload-file uploadfile http://www.example.com/receive.cgi + +6. HTTP Authentication + + 6.1 Basic Authentication + + HTTP Authentication is the ability to tell the server your username and + password so that it can verify that you're allowed to do the request you're + doing. The Basic authentication used in HTTP (which is the type curl uses by + default) is *plain* *text* based, which means it sends username and password + only slightly obfuscated, but still fully readable by anyone that sniffs on + the network between you and the remote server. + + To tell curl to use a user and password for authentication: + + curl --user name:password http://www.example.com + + 6.2 Other Authentication + + The site might require a different authentication method (check the headers + returned by the server), and then --ntlm, --digest, --negotiate or even + --anyauth might be options that suit you. + + 6.3 Proxy Authentication + + Sometimes your HTTP access is only available through the use of a HTTP + proxy. This seems to be especially common at various companies. A HTTP proxy + may require its own user and password to allow the client to get through to + the Internet. To specify those with curl, run something like: + + curl --proxy-user proxyuser:proxypassword curl.haxx.se + + If your proxy requires the authentication to be done using the NTLM method, + use --proxy-ntlm, if it requires Digest use --proxy-digest. + + If you use any one of these user+password options but leave out the password + part, curl will prompt for the password interactively. + + 6.4 Hiding credentials + + Do note that when a program is run, its parameters might be possible to see + when listing the running processes of the system. Thus, other users may be + able to watch your passwords if you pass them as plain command line + options. There are ways to circumvent this. + + It is worth noting that while this is how HTTP Authentication works, very + many web sites will not use this concept when they provide logins etc. See + the Web Login chapter further below for more details on that. + +7. More HTTP Headers + + 7.1 Referer + + A HTTP request may include a 'referer' field (yes it is misspelled), which + can be used to tell from which URL the client got to this particular + resource. Some programs/scripts check the referer field of requests to verify + that this wasn't arriving from an external site or an unknown page. While + this is a stupid way to check something so easily forged, many scripts still + do it. Using curl, you can put anything you want in the referer-field and + thus more easily be able to fool the server into serving your request. + + Use curl to set the referer field with: + + curl --referer http://www.example.come http://www.example.com + + 7.2 User Agent + + Very similar to the referer field, all HTTP requests may set the User-Agent + field. It names what user agent (client) that is being used. Many + applications use this information to decide how to display pages. Silly web + programmers try to make different pages for users of different browsers to + make them look the best possible for their particular browsers. They usually + also do different kinds of javascript, vbscript etc. + + At times, you will see that getting a page with curl will not return the same + page that you see when getting the page with your browser. Then you know it + is time to set the User Agent field to fool the server into thinking you're + one of those browsers. + + To make curl look like Internet Explorer 5 on a Windows 2000 box: + + curl --user-agent "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)" [URL] + + Or why not look like you're using Netscape 4.73 on an old Linux box: + + curl --user-agent "Mozilla/4.73 [en] (X11; U; Linux 2.2.15 i686)" [URL] + +8. Redirects + + 8.1 Location header + + When a resource is requested from a server, the reply from the server may + include a hint about where the browser should go next to find this page, or a + new page keeping newly generated output. The header that tells the browser + to redirect is Location:. + + Curl does not follow Location: headers by default, but will simply display + such pages in the same manner it displays all HTTP replies. It does however + feature an option that will make it attempt to follow the Location: pointers. + + To tell curl to follow a Location: + + curl --location http://www.example.com + + If you use curl to POST to a site that immediately redirects you to another + page, you can safely use --location (-L) and --data/--form together. Curl will + only use POST in the first request, and then revert to GET in the following + operations. + + 8.2 Other redirects + + Browser typically support at least two other ways of redirects that curl + doesn't: first the html may contain a meta refresh tag that asks the browser + to load a specific URL after a set number of seconds, or it may use + javascript to do it. + +9. Cookies + + 9.1 Cookie Basics + + The way the web browsers do "client side state control" is by using + cookies. Cookies are just names with associated contents. The cookies are + sent to the client by the server. The server tells the client for what path + and host name it wants the cookie sent back, and it also sends an expiration + date and a few more properties. + + When a client communicates with a server with a name and path as previously + specified in a received cookie, the client sends back the cookies and their + contents to the server, unless of course they are expired. + + Many applications and servers use this method to connect a series of requests + into a single logical session. To be able to use curl in such occasions, we + must be able to record and send back cookies the way the web application + expects them. The same way browsers deal with them. + + 9.2 Cookie options + + The simplest way to send a few cookies to the server when getting a page with + curl is to add them on the command line like: + + curl --cookie "name=Daniel" http://www.example.com + + Cookies are sent as common HTTP headers. This is practical as it allows curl + to record cookies simply by recording headers. Record cookies with curl by + using the --dump-header (-D) option like: + + curl --dump-header headers_and_cookies http://www.example.com + + (Take note that the --cookie-jar option described below is a better way to + store cookies.) + + Curl has a full blown cookie parsing engine built-in that comes in use if you + want to reconnect to a server and use cookies that were stored from a + previous connection (or hand-crafted manually to fool the server into + believing you had a previous connection). To use previously stored cookies, + you run curl like: + + curl --cookie stored_cookies_in_file http://www.example.com + + Curl's "cookie engine" gets enabled when you use the --cookie option. If you + only want curl to understand received cookies, use --cookie with a file that + doesn't exist. Example, if you want to let curl understand cookies from a + page and follow a location (and thus possibly send back cookies it received), + you can invoke it like: + + curl --cookie nada --location http://www.example.com + + Curl has the ability to read and write cookie files that use the same file + format that Netscape and Mozilla once used. It is a convenient way to share + cookies between scripts or invokes. The --cookie (-b) switch automatically + detects if a given file is such a cookie file and parses it, and by using the + --cookie-jar (-c) option you'll make curl write a new cookie file at the end + of an operation: + + curl --cookie cookies.txt --cookie-jar newcookies.txt \ + http://www.example.com + +10. HTTPS + + 10.1 HTTPS is HTTP secure + + There are a few ways to do secure HTTP transfers. By far the most common + protocol for doing this is what is generally known as HTTPS, HTTP over + SSL. SSL encrypts all the data that is sent and received over the network and + thus makes it harder for attackers to spy on sensitive information. + + SSL (or TLS as the latest version of the standard is called) offers a + truckload of advanced features to allow all those encryptions and key + infrastructure mechanisms encrypted HTTP requires. + + Curl supports encrypted fetches when built to use a TLS library and it can be + built to use one out of a fairly large set of libraries - "curl -V" will show + which one your curl was built to use (if any!). To get a page from a HTTPS + server, simply run curl like: + + curl https://secure.example.com + + 10.2 Certificates + + In the HTTPS world, you use certificates to validate that you are the one + you claim to be, as an addition to normal passwords. Curl supports client- + side certificates. All certificates are locked with a pass phrase, which you + need to enter before the certificate can be used by curl. The pass phrase + can be specified on the command line or if not, entered interactively when + curl queries for it. Use a certificate with curl on a HTTPS server like: + + curl --cert mycert.pem https://secure.example.com + + curl also tries to verify that the server is who it claims to be, by + verifying the server's certificate against a locally stored CA cert + bundle. Failing the verification will cause curl to deny the connection. You + must then use --insecure (-k) in case you want to tell curl to ignore that + the server can't be verified. + + More about server certificate verification and ca cert bundles can be read + in the SSLCERTS document, available online here: + + https://curl.haxx.se/docs/sslcerts.html + + At times you may end up with your own CA cert store and then you can tell + curl to use that to verify the server's certificate: + + curl --cacert ca-bundle.pem https://example.com/ + + +11. Custom Request Elements + +11.1 Modify method and headers + + Doing fancy stuff, you may need to add or change elements of a single curl + request. + + For example, you can change the POST request to a PROPFIND and send the data + as "Content-Type: text/xml" (instead of the default Content-Type) like this: + + curl --data "" --header "Content-Type: text/xml" \ + --request PROPFIND url.com + + You can delete a default header by providing one without content. Like you + can ruin the request by chopping off the Host: header: + + curl --header "Host:" http://www.example.com + + You can add headers the same way. Your server may want a "Destination:" + header, and you can add it: + + curl --header "Destination: http://nowhere" http://example.com + + 11.2 More on changed methods + + It should be noted that curl selects which methods to use on its own + depending on what action to ask for. -d will do POST, -I will do HEAD and so + on. If you use the --request / -X option you can change the method keyword + curl selects, but you will not modify curl's behavior. This means that if you + for example use -d "data" to do a POST, you can modify the method to a + PROPFIND with -X and curl will still think it sends a POST. You can change + the normal GET to a POST method by simply adding -X POST in a command line + like: + + curl -X POST http://example.org/ + + ... but curl will still think and act as if it sent a GET so it won't send any + request body etc. + + +12. Web Login + + 12.1 Some login tricks + + While not strictly just HTTP related, it still causes a lot of people problems + so here's the executive run-down of how the vast majority of all login forms + work and how to login to them using curl. + + It can also be noted that to do this properly in an automated fashion, you + will most certainly need to script things and do multiple curl invokes etc. + + First, servers mostly use cookies to track the logged-in status of the + client, so you will need to capture the cookies you receive in the + responses. Then, many sites also set a special cookie on the login page (to + make sure you got there through their login page) so you should make a habit + of first getting the login-form page to capture the cookies set there. + + Some web-based login systems feature various amounts of javascript, and + sometimes they use such code to set or modify cookie contents. Possibly they + do that to prevent programmed logins, like this manual describes how to... + Anyway, if reading the code isn't enough to let you repeat the behavior + manually, capturing the HTTP requests done by your browsers and analyzing the + sent cookies is usually a working method to work out how to shortcut the + javascript need. + + In the actual
tag for the login, lots of sites fill-in random/session + or otherwise secretly generated hidden tags and you may need to first capture + the HTML code for the login form and extract all the hidden fields to be able + to do a proper login POST. Remember that the contents need to be URL encoded + when sent in a normal POST. + +13. Debug + + 13.1 Some debug tricks + + Many times when you run curl on a site, you'll notice that the site doesn't + seem to respond the same way to your curl requests as it does to your + browser's. + + Then you need to start making your curl requests more similar to your + browser's requests: + + * Use the --trace-ascii option to store fully detailed logs of the requests + for easier analyzing and better understanding + + * Make sure you check for and use cookies when needed (both reading with + --cookie and writing with --cookie-jar) + + * Set user-agent to one like a recent popular browser does + + * Set referer like it is set by the browser + + * If you use POST, make sure you send all the fields and in the same order as + the browser does it. + + A very good helper to make sure you do this right, is the LiveHTTPHeader tool + that lets you view all headers you send and receive with Mozilla/Firefox + (even when using HTTPS). Chrome features similar functionality out of the box + among the developer's tools. + + A more raw approach is to capture the HTTP traffic on the network with tools + such as ethereal or tcpdump and check what headers that were sent and + received by the browser. (HTTPS makes this technique inefficient.) + +14. References + + 14.1 Standards + + RFC 7230 is a must to read if you want in-depth understanding of the HTTP + protocol + + RFC 3986 explains the URL syntax + + RFC 1867 defines the HTTP post upload format + + RFC 6525 defines how HTTP cookies work + + 14.2 Sites + + https://curl.haxx.se is the home of the curl project diff --git a/deps-win32/curl-7.54.1/docs/VERSIONS b/deps-win32/curl-7.54.1/docs/VERSIONS new file mode 100644 index 0000000..72a4547 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/VERSIONS @@ -0,0 +1,56 @@ +Version Numbers and Releases +============================ + + Curl is not only curl. Curl is also libcurl. They're actually individually + versioned, but they mostly follow each other rather closely. + + The version numbering is always built up using the same system: + + X.Y.Z + + - X is main version number + - Y is release number + - Z is patch number + +## Bumping numbers + + One of these numbers will get bumped in each new release. The numbers to the + right of a bumped number will be reset to zero. If Z is zero, it may not be + included in the version number. + + The main version number will get bumped when *really* big, world colliding + changes are made. The release number is bumped when changes are performed or + things/features are added. The patch number is bumped when the changes are + mere bugfixes. + + It means that after release 1.2.3, we can release 2.0 if something really big + has been made, 1.3 if not that big changes were made or 1.2.4 if mostly bugs + were fixed. + + Bumping, as in increasing the number with 1, is unconditionally only + affecting one of the numbers (except the ones to the right of it, that may be + set to zero). 1 becomes 2, 3 becomes 4, 9 becomes 10, 88 becomes 89 and 99 + becomes 100. So, after 1.2.9 comes 1.2.10. After 3.99.3, 3.100 might come. + + All original curl source release archives are named according to the libcurl + version (not according to the curl client version that, as said before, might + differ). + + As a service to any application that might want to support new libcurl + features while still being able to build with older versions, all releases + have the libcurl version stored in the curl/curlver.h file using a static + numbering scheme that can be used for comparison. The version number is + defined as: + + #define LIBCURL_VERSION_NUM 0xXXYYZZ + + Where XX, YY and ZZ are the main version, release and patch numbers in + hexadecimal. All three number fields are always represented using two digits + (eight bits each). 1.2 would appear as "0x010200" while version 9.11.7 + appears as "0x090b07". + + This 6-digit hexadecimal number is always a greater number in a more recent + release. It makes comparisons with greater than and less than work. + + This number is also available as three separate defines: + `LIBCURL_VERSION_MAJOR`, `LIBCURL_VERSION_MINOR` and `LIBCURL_VERSION_PATCH`. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/CMakeLists.txt b/deps-win32/curl-7.54.1/docs/cmdline-opts/CMakeLists.txt new file mode 100644 index 0000000..5aa20df --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/CMakeLists.txt @@ -0,0 +1,12 @@ +set(MANPAGE "${CMAKE_BINARY_DIR}/docs/curl.1") + +# Load DPAGES and OTHERPAGES from shared file +transform_makefile_inc("Makefile.inc" "${CMAKE_CURRENT_BINARY_DIR}/Makefile.inc.cmake") +include("${CMAKE_CURRENT_BINARY_DIR}/Makefile.inc.cmake") + +add_custom_command(OUTPUT "${MANPAGE}" + COMMAND "${PERL_EXECUTABLE}" "${CMAKE_CURRENT_SOURCE_DIR}/gen.pl" mainpage "${CMAKE_CURRENT_SOURCE_DIR}" > "${MANPAGE}" + DEPENDS ${DPAGES} ${OTHERPAGES} + VERBATIM +) +add_custom_target(generate-curl.1 DEPENDS "${MANPAGE}") diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/MANPAGE.md b/deps-win32/curl-7.54.1/docs/cmdline-opts/MANPAGE.md new file mode 100644 index 0000000..3a8270b --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/MANPAGE.md @@ -0,0 +1,52 @@ +# curl man page generator + +This is the curl man page generator. It generates a single nroff man page +output from the set of sources files in this directory. + +There is one source file for each supported command line option. The format is +described below. + +## Option files + +Each command line option is described in a file named `.d`, where +option name is written without any prefixing dashes. Like the file name for +the -v, --verbose option is named `verbose.d`. + +Each file has a set of meta-data and a body of text. + +### Meta-data + + Short: (single letter, without dash) + Long: (long form name, without dashes) + Arg: (the argument the option takes) + Magic: (description of "magic" options) + Tags: (space separated list) + Protocols: (space separated list for which protocols this option works) + Added: (version number in which this was added) + Mutexed: (space separated list of options this overrides, no dashes) + Requires: (space separated list of features this requires, no dashes) + See-also: (space separated list of related options, no dashes) + Help: (short text for the --help output for this option) + --- (end of meta-data) + +### Body + +The body of the description. Only refer to options with their long form option +version, like --verbose. The output generator will replace such with the +correct markup that shows both short and long version. + +## Header + +`page-header` is the nroff formatted file that will be output before the +generated options output for the master man page. + +## Generate + +`./gen.pl mainpage` + +This command outputs a single huge nroff file, meant to become `curl.1`. The +full curl man page. + +`./gen.pl listhelp` + +Generates a full `curl --help` output for all known command line options. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/Makefile.am b/deps-win32/curl-7.54.1/docs/cmdline-opts/Makefile.am new file mode 100644 index 0000000..e6ecf7a --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/Makefile.am @@ -0,0 +1,34 @@ +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) 1998 - 2017, Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.haxx.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +########################################################################### + +AUTOMAKE_OPTIONS = foreign no-dependencies + +MANPAGE = $(top_builddir)/docs/curl.1 + +include Makefile.inc + +EXTRA_DIST = $(DPAGES) MANPAGE.md gen.pl $(OTHERPAGES) CMakeLists.txt + +all: $(MANPAGE) + +$(MANPAGE): $(DPAGES) $(OTHERPAGES) Makefile.inc + @PERL@ $(srcdir)/gen.pl mainpage $(srcdir) > $(MANPAGE) diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/Makefile.inc b/deps-win32/curl-7.54.1/docs/cmdline-opts/Makefile.inc new file mode 100644 index 0000000..4577fac --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/Makefile.inc @@ -0,0 +1,47 @@ +# Shared between Makefile.am and CMakeLists.txt + +DPAGES = abstract-unix-socket.d anyauth.d append.d basic.d cacert.d capath.d cert.d \ + cert-status.d cert-type.d ciphers.d compressed.d config.d \ + connect-timeout.d connect-to.d continue-at.d cookie.d cookie-jar.d \ + create-dirs.d crlf.d crlfile.d data-ascii.d data-binary.d data.d \ + data-raw.d data-urlencode.d delegation.d digest.d disable.d \ + disable-eprt.d disable-epsv.d dns-interface.d dns-ipv4-addr.d \ + dns-ipv6-addr.d dns-servers.d dump-header.d egd-file.d engine.d \ + expect100-timeout.d fail.d fail-early.d false-start.d \ + form.d form-string.d ftp-account.d ftp-alternative-to-user.d \ + ftp-create-dirs.d ftp-method.d ftp-pasv.d ftp-port.d ftp-pret.d \ + ftp-skip-pasv-ip.d ftp-ssl-ccc.d ftp-ssl-ccc-mode.d ftp-ssl-control.d \ + get.d globoff.d head.d header.d help.d hostpubmd5.d http1.0.d \ + http1.1.d http2.d http2-prior-knowledge.d ignore-content-length.d \ + include.d insecure.d interface.d ipv4.d ipv6.d junk-session-cookies.d \ + keepalive-time.d key.d key-type.d krb.d libcurl.d limit-rate.d \ + list-only.d local-port.d location.d location-trusted.d \ + login-options.d mail-auth.d mail-from.d mail-rcpt.d manual.d \ + max-filesize.d max-redirs.d max-time.d metalink.d negotiate.d netrc.d \ + netrc-file.d netrc-optional.d next.d no-alpn.d no-buffer.d \ + no-keepalive.d no-npn.d noproxy.d no-sessionid.d ntlm.d ntlm-wb.d \ + oauth2-bearer.d output.d pass.d path-as-is.d pinnedpubkey.d post301.d \ + post302.d post303.d preproxy.d progress-bar.d proto.d proto-default.d \ + proto-redir.d proxy1.0.d proxy-anyauth.d proxy-basic.d proxy-cacert.d \ + proxy-capath.d proxy-cert.d proxy-cert-type.d proxy-ciphers.d \ + proxy-crlfile.d proxy.d proxy-digest.d proxy-header.d \ + proxy-insecure.d proxy-key.d proxy-key-type.d proxy-negotiate.d \ + proxy-ntlm.d proxy-pass.d proxy-service-name.d \ + proxy-ssl-allow-beast.d proxy-tlsauthtype.d proxy-tlspassword.d \ + proxy-tlsuser.d proxy-tlsv1.d proxytunnel.d proxy-user.d pubkey.d \ + quote.d random-file.d range.d raw.d referer.d remote-header-name.d \ + remote-name-all.d remote-name.d remote-time.d request.d resolve.d \ + retry-connrefused.d retry.d retry-delay.d retry-max-time.d sasl-ir.d \ + service-name.d show-error.d silent.d socks4a.d socks4.d socks5.d \ + socks5-gssapi-nec.d socks5-gssapi-service.d socks5-hostname.d \ + speed-limit.d speed-time.d ssl-allow-beast.d ssl.d ssl-no-revoke.d \ + ssl-reqd.d sslv2.d sslv3.d stderr.d suppress-connect-headers.d \ + tcp-fastopen.d tcp-nodelay.d \ + telnet-option.d tftp-blksize.d tftp-no-options.d time-cond.d \ + tls-max.d \ + tlsauthtype.d tlspassword.d tlsuser.d tlsv1.0.d tlsv1.1.d tlsv1.2.d \ + tlsv1.3.d tlsv1.d trace-ascii.d trace.d trace-time.d tr-encoding.d \ + unix-socket.d upload-file.d url.d use-ascii.d user-agent.d user.d \ + verbose.d version.d write-out.d xattr.d + +OTHERPAGES = page-footer page-header diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/abstract-unix-socket.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/abstract-unix-socket.d new file mode 100644 index 0000000..1fda4e5 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/abstract-unix-socket.d @@ -0,0 +1,9 @@ +Long: abstract-unix-socket +Arg: +Help: Connect via abstract Unix domain socket +Added: 7.53.0 +Protocols: HTTP +--- +Connect through an abstract Unix domain socket, instead of using the network. +Note: netstat shows the path of an abstract socket prefixed with '@', however +the argument should not have this leading character. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/anyauth.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/anyauth.d new file mode 100644 index 0000000..c32d1ed --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/anyauth.d @@ -0,0 +1,17 @@ +Long: anyauth +Help: Pick any authentication method +Protocols: HTTP +See-also: proxy-anyauth basic digest +--- +Tells curl to figure out authentication method by itself, and use the most +secure one the remote site claims to support. This is done by first doing a +request and checking the response-headers, thus possibly inducing an extra +network round-trip. This is used instead of setting a specific authentication +method, which you can do with --basic, --digest, --ntlm, and --negotiate. + +Using --anyauth is not recommended if you do uploads from stdin, since it may +require data to be sent twice and then the client must be able to rewind. If +the need should arise when uploading from stdin, the upload operation will +fail. + +Used together with --user. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/append.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/append.d new file mode 100644 index 0000000..f001b12 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/append.d @@ -0,0 +1,8 @@ +Short: a +Long: append +Help: Append to target file when uploading +Protocols: FTP SFTP +--- +When used in an upload, this makes curl append to the target file instead of +overwriting it. If the remote file doesn't exist, it will be created. Note +that this flag is ignored by some SFTP servers (including OpenSSH). diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/basic.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/basic.d new file mode 100644 index 0000000..09d42af --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/basic.d @@ -0,0 +1,11 @@ +Long: basic +Help: Use HTTP Basic Authentication +See-also: proxy-basic +Protocols: HTTP +--- +Tells curl to use HTTP Basic authentication with the remote host. This is the +default and this option is usually pointless, unless you use it to override a +previously set option that sets a different authentication method (such as +--ntlm, --digest, or --negotiate). + +Used together with --user. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/cacert.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/cacert.d new file mode 100644 index 0000000..04e1139 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/cacert.d @@ -0,0 +1,28 @@ +Long: cacert +Arg: +Help: CA certificate to verify peer against +Protocols: TLS +--- +Tells curl to use the specified certificate file to verify the peer. The file +may contain multiple CA certificates. The certificate(s) must be in PEM +format. Normally curl is built to use a default file for this, so this option +is typically used to alter that default file. + +curl recognizes the environment variable named 'CURL_CA_BUNDLE' if it is +set, and uses the given path as a path to a CA cert bundle. This option +overrides that variable. + +The windows version of curl will automatically look for a CA certs file named +\'curl-ca-bundle.crt\', either in the same directory as curl.exe, or in the +Current Working Directory, or in any folder along your PATH. + +If curl is built against the NSS SSL library, the NSS PEM PKCS#11 module +(libnsspem.so) needs to be available for this option to work properly. + +(iOS and macOS only) If curl is built against Secure Transport, then this +option is supported for backward compatibility with other SSL engines, but it +should not be set. If the option is not set, then curl will use the +certificates in the system and user Keychain to verify the peer, which is the +preferred method of verifying the peer's certificate chain. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/capath.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/capath.d new file mode 100644 index 0000000..0763f7a --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/capath.d @@ -0,0 +1,15 @@ +Long: capath +Arg: +Help: CA directory to verify peer against +Protocols: TLS +--- +Tells curl to use the specified certificate directory to verify the +peer. Multiple paths can be provided by separating them with ":" (e.g. +\&"path1:path2:path3"). The certificates must be in PEM format, and if curl is +built against OpenSSL, the directory must have been processed using the +c_rehash utility supplied with OpenSSL. Using --capath can allow +OpenSSL-powered curl to make SSL-connections much more efficiently than using +--cacert if the --cacert file contains many CA certificates. + +If this option is set, the default capath value will be ignored, and if it is +used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/cert-status.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/cert-status.d new file mode 100644 index 0000000..f1aaa21 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/cert-status.d @@ -0,0 +1,13 @@ +Long: cert-status +Protocols: TLS +Added: 7.41.0 +Help: Verify the status of the server certificate +--- +Tells curl to verify the status of the server certificate by using the +Certificate Status Request (aka. OCSP stapling) TLS extension. + +If this option is enabled and the server sends an invalid (e.g. expired) +response, if the response suggests that the server certificate has been revoked, +or no response at all is received, the verification fails. + +This is currently only implemented in the OpenSSL, GnuTLS and NSS backends. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/cert-type.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/cert-type.d new file mode 100644 index 0000000..a04bdce --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/cert-type.d @@ -0,0 +1,10 @@ +Long: cert-type +Protocols: TLS +Arg: +Help: Certificate file type (DER/PEM/ENG) +See-also: cert key key-type +--- +Tells curl what certificate type the provided certificate is in. PEM, DER and +ENG are recognized types. If not specified, PEM is assumed. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/cert.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/cert.d new file mode 100644 index 0000000..0cd5d53 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/cert.d @@ -0,0 +1,32 @@ +Short: E +Long: cert +Arg: +Help: Client certificate file and password +Protocols: TLS +See-also: cert-type key key-type +--- +Tells curl to use the specified client certificate file when getting a file +with HTTPS, FTPS or another SSL-based protocol. The certificate must be in +PKCS#12 format if using Secure Transport, or PEM format if using any other +engine. If the optional password isn't specified, it will be queried for on +the terminal. Note that this option assumes a \&"certificate" file that is the +private key and the client certificate concatenated! See --cert and --key to +specify them independently. + +If curl is built against the NSS SSL library then this option can tell +curl the nickname of the certificate to use within the NSS database defined +by the environment variable SSL_DIR (or by default /etc/pki/nssdb). If the +NSS PEM PKCS#11 module (libnsspem.so) is available then PEM files may be +loaded. If you want to use a file from the current directory, please precede +it with "./" prefix, in order to avoid confusion with a nickname. If the +nickname contains ":", it needs to be preceded by "\\" so that it is not +recognized as password delimiter. If the nickname contains "\\", it needs to +be escaped as "\\\\" so that it is not recognized as an escape character. + +(iOS and macOS only) If curl is built against Secure Transport, then the +certificate string can either be the name of a certificate/private key in the +system or user keychain, or the path to a PKCS#12-encoded certificate and +private key. If you want to use a file from the current directory, please +precede it with "./" prefix, in order to avoid confusion with a nickname. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ciphers.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ciphers.d new file mode 100644 index 0000000..d3bac60 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ciphers.d @@ -0,0 +1,11 @@ +Long: ciphers +Arg: +help: SSL ciphers to use +Protocols: TLS +--- +Specifies which ciphers to use in the connection. The list of ciphers must +specify valid ciphers. Read up on SSL cipher list details on this URL: + + https://curl.haxx.se/docs/ssl-ciphers.html + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/compressed.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/compressed.d new file mode 100644 index 0000000..dc130c1 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/compressed.d @@ -0,0 +1,7 @@ +Long: compressed +Help: Request compressed response +Protocols: HTTP +--- +Request a compressed response using one of the algorithms curl supports, and +save the uncompressed document. If this option is used and the server sends +an unsupported encoding, curl will report an error. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/config.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/config.d new file mode 100644 index 0000000..105d628 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/config.d @@ -0,0 +1,61 @@ +Long: config +Arg: +Help: Read config from a file +Short: K +--- + +Specify a text file to read curl arguments from. The command line arguments +found in the text file will be used as if they were provided on the command +line. + +Options and their parameters must be specified on the same line in the file, +separated by whitespace, colon, or the equals sign. Long option names can +optionally be given in the config file without the initial double dashes and +if so, the colon or equals characters can be used as separators. If the option +is specified with one or two dashes, there can be no colon or equals character +between the option and its parameter. + +If the parameter is to contain whitespace, the parameter must be enclosed +within quotes. Within double quotes, the following escape sequences are +available: \\\\, \\", \\t, \\n, \\r and \\v. A backslash preceding any other +letter is ignored. If the first column of a config line is a '#' character, +the rest of the line will be treated as a comment. Only write one option per +physical line in the config file. + +Specify the filename to --config as '-' to make curl read the file from stdin. + +Note that to be able to specify a URL in the config file, you need to specify +it using the --url option, and not by simply writing the URL on its own +line. So, it could look similar to this: + +url = "https://curl.haxx.se/docs/" + +When curl is invoked, it (unless --disable is used) checks for a default +config file and uses it if found. The default config file is checked for in +the following places in this order: + +1) curl tries to find the "home dir": It first checks for the CURL_HOME and +then the HOME environment variables. Failing that, it uses getpwuid() on +Unix-like systems (which returns the home dir given the current user in your +system). On Windows, it then checks for the APPDATA variable, or as a last +resort the '%USERPROFILE%\\Application Data'. + +2) On windows, if there is no _curlrc file in the home dir, it checks for one +in the same dir the curl executable is placed. On Unix-like systems, it will +simply try to load .curlrc from the determined home dir. + +.nf +# --- Example file --- +# this is a comment +url = "example.com" +output = "curlhere.html" +user-agent = "superagent/1.0" + +# and fetch another URL too +url = "example.com/docs/manpage.html" +-O +referer = "http://nowhereatall.example.com/" +# --- End of example file --- +.fi + +This option can be used multiple times to load multiple config files. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/connect-timeout.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/connect-timeout.d new file mode 100644 index 0000000..3a32d86 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/connect-timeout.d @@ -0,0 +1,11 @@ +Long: connect-timeout +Arg: +Help: Maximum time allowed for connection +See-also: max-time +--- +Maximum time in seconds that you allow curl's connection to take. This only +limits the connection phase, so if curl connects within the given period it +will continue - if not it will exit. Since version 7.32.0, this option +accepts decimal values. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/connect-to.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/connect-to.d new file mode 100644 index 0000000..3fa0568 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/connect-to.d @@ -0,0 +1,18 @@ +Long: connect-to +Arg: +Help: Connect to host +Added: 7.49.0 +See-also: resolve header +--- + +For a request to the given HOST:PORT pair, connect to +CONNECT-TO-HOST:CONNECT-TO-PORT instead. This option is suitable to direct +requests at a specific server, e.g. at a specific cluster node in a cluster of +servers. This option is only used to establish the network connection. It +does NOT affect the hostname/port that is used for TLS/SSL (e.g. SNI, +certificate verification) or for the application protocols. "host" and "port" +may be the empty string, meaning "any host/port". "connect-to-host" and +"connect-to-port" may also be the empty string, meaning "use the request's +original host/port". + +This option can be used many times to add many connect rules. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/continue-at.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/continue-at.d new file mode 100644 index 0000000..733f494 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/continue-at.d @@ -0,0 +1,15 @@ +Short: C +Long: continue-at +Arg: +Help: Resumed transfer offset +See-also: range +--- +Continue/Resume a previous file transfer at the given offset. The given offset +is the exact number of bytes that will be skipped, counting from the beginning +of the source file before it is transferred to the destination. If used with +uploads, the FTP server command SIZE will not be used by curl. + +Use "-C -" to tell curl to automatically find out where/how to resume the +transfer. It then uses the given output/input files to figure that out. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/cookie-jar.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/cookie-jar.d new file mode 100644 index 0000000..da79777 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/cookie-jar.d @@ -0,0 +1,24 @@ +Short: c +Long: cookie-jar +Arg: +Protocols: HTTP +Help: Write cookies to after operation +--- +Specify to which file you want curl to write all cookies after a completed +operation. Curl writes all cookies from its in-memory cookie storage to the +given file at the end of operations. If no cookies are known, no data will be +written. The file will be written using the Netscape cookie file format. If +you set the file name to a single dash, "-", the cookies will be written to +stdout. + +This command line option will activate the cookie engine that makes curl +record and use cookies. Another way to activate it is to use the --cookie +option. + +If the cookie jar can't be created or written to, the whole curl operation +won't fail or even report an error clearly. Using --verbose will get a warning +displayed, but that is the only visible feedback you get about this possibly +lethal situation. + +If this option is used several times, the last specified file name will be +used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/cookie.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/cookie.d new file mode 100644 index 0000000..383adda --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/cookie.d @@ -0,0 +1,36 @@ +Short: b +Long: cookie +Arg: +Protocols: HTTP +Help: Send cookies from string/file +--- +Pass the data to the HTTP server in the Cookie header. It is supposedly +the data previously received from the server in a "Set-Cookie:" line. The +data should be in the format "NAME1=VALUE1; NAME2=VALUE2". + +If no '=' symbol is used in the argument, it is instead treated as a filename +to read previously stored cookie from. This option also activates the cookie +engine which will make curl record incoming cookies, which may be handy if +you're using this in combination with the --location option or do multiple URL +transfers on the same invoke. + +The file format of the file to read cookies from should be plain HTTP headers +(Set-Cookie style) or the Netscape/Mozilla cookie file format. + +The file specified with --cookie is only used as input. No cookies will be +written to the file. To store cookies, use the --cookie-jar option. + +Exercise caution if you are using this option and multiple transfers may +occur. If you use the NAME1=VALUE1; format, or in a file use the Set-Cookie +format and don't specify a domain, then the cookie is sent for any domain +(even after redirects are followed) and cannot be modified by a server-set +cookie. If the cookie engine is enabled and a server sets a cookie of the same +name then both will be sent on a future transfer to that server, likely not +what you intended. To address these issues set a domain in Set-Cookie (doing +that will include sub domains) or use the Netscape format. + +If this option is used several times, the last one will be used. + +Users very often want to both read cookies from a file and write updated +cookies back to a file, so using both --cookie and --cookie-jar in the same +command line is common. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/create-dirs.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/create-dirs.d new file mode 100644 index 0000000..49e22e7 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/create-dirs.d @@ -0,0 +1,9 @@ +Long: create-dirs +Help: Create necessary local directory hierarchy +--- +When used in conjunction with the --output option, curl will create the +necessary local directory hierarchy as needed. This option creates the dirs +mentioned with the --output option, nothing else. If the --output file name +uses no dir or if the dirs it mentions already exist, no dir will be created. + +To create remote directories when using FTP or SFTP, try --ftp-create-dirs. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/crlf.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/crlf.d new file mode 100644 index 0000000..f6694b6 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/crlf.d @@ -0,0 +1,7 @@ +Long: crlf +Help: Convert LF to CRLF in upload +Protocols: FTP SMTP +--- +Convert LF to CRLF in upload. Useful for MVS (OS/390). + +(SMTP added in 7.40.0) diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/crlfile.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/crlfile.d new file mode 100644 index 0000000..0fcc63c --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/crlfile.d @@ -0,0 +1,10 @@ +Long: crlfile +Arg: +Protocols: TLS +Help: Get a CRL list in PEM format from the given file +Added: 7.19.7 +--- +Provide a file using PEM format with a Certificate Revocation List that may +specify peer certificates that are to be considered revoked. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/data-ascii.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-ascii.d new file mode 100644 index 0000000..bda4abc --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-ascii.d @@ -0,0 +1,6 @@ +Long: data-ascii +Arg: +Help: HTTP POST ASCII data +Protocols: HTTP +--- +This is just an alias for --data. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/data-binary.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-binary.d new file mode 100644 index 0000000..c6721c6 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-binary.d @@ -0,0 +1,13 @@ +Long: data-binary +Arg: +Help: HTTP POST binary data +Protocols: HTTP +--- +This posts data exactly as specified with no extra processing whatsoever. + +If you start the data with the letter @, the rest should be a filename. Data +is posted in a similar manner as --data does, except that newlines and +carriage returns are preserved and conversions are never done. + +If this option is used several times, the ones following the first will append +data as described in --data. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/data-raw.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-raw.d new file mode 100644 index 0000000..7669b4a --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-raw.d @@ -0,0 +1,9 @@ +Long: data-raw +Arg: +Protocols: HTTP +Help: HTTP POST data, '@' allowed +Added: 7.43.0 +See-also: data +--- +This posts data similarly to --data but without the special +interpretation of the @ character. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/data-urlencode.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-urlencode.d new file mode 100644 index 0000000..9873f33 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/data-urlencode.d @@ -0,0 +1,33 @@ +Long: data-urlencode +Arg: +Help: HTTP POST data url encoded +Protocols: HTTP +See-also: data data-raw +Added: 7.18.0 +--- +This posts data, similar to the other --data options with the exception +that this performs URL-encoding. + +To be CGI-compliant, the part should begin with a \fIname\fP followed +by a separator and a content specification. The part can be passed to +curl using one of the following syntaxes: +.RS +.IP "content" +This will make curl URL-encode the content and pass that on. Just be careful +so that the content doesn't contain any = or @ symbols, as that will then make +the syntax match one of the other cases below! +.IP "=content" +This will make curl URL-encode the content and pass that on. The preceding = +symbol is not included in the data. +.IP "name=content" +This will make curl URL-encode the content part and pass that on. Note that +the name part is expected to be URL-encoded already. +.IP "@filename" +This will make curl load data from the given file (including any newlines), +URL-encode that data and pass it on in the POST. +.IP "name@filename" +This will make curl load data from the given file (including any newlines), +URL-encode that data and pass it on in the POST. The name part gets an equal +sign appended, resulting in \fIname=urlencoded-file-content\fP. Note that the +name is expected to be URL-encoded already. +.RE diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/data.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/data.d new file mode 100644 index 0000000..1572858 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/data.d @@ -0,0 +1,30 @@ +Long: data +Short: d +Arg: +Help: HTTP POST data +Protocols: HTTP +See-also: data-binary data-urlencode data-raw +Mutexed: form head upload +--- +Sends the specified data in a POST request to the HTTP server, in the same way +that a browser does when a user has filled in an HTML form and presses the +submit button. This will cause curl to pass the data to the server using the +content-type application/x-www-form-urlencoded. Compare to --form. + +--data-raw is almost the same but does not have a special interpretation of +the @ character. To post data purely binary, you should instead use the +--data-binary option. To URL-encode the value of a form field you may use +--data-urlencode. + +If any of these options is used more than once on the same command line, the +data pieces specified will be merged together with a separating +&-symbol. Thus, using '-d name=daniel -d skill=lousy' would generate a post +chunk that looks like \&'name=daniel&skill=lousy'. + +If you start the data with the letter @, the rest should be a file name to +read the data from, or - if you want curl to read the data from +stdin. Multiple files can also be specified. Posting data from a file named +'foobar' would thus be done with --data @foobar. When --data is told to read +from a file like that, carriage returns and newlines will be stripped out. If +you don't want the @ character to have a special interpretation use --data-raw +instead. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/delegation.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/delegation.d new file mode 100644 index 0000000..138d823 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/delegation.d @@ -0,0 +1,16 @@ +Long: delegation +Arg: +Help: GSS-API delegation permission +Protocols: GSS/kerberos +--- +Set LEVEL to tell the server what it is allowed to delegate when it +comes to user credentials. +.RS +.IP "none" +Don't allow any delegation. +.IP "policy" +Delegates if and only if the OK-AS-DELEGATE flag is set in the Kerberos +service ticket, which is a matter of realm policy. +.IP "always" +Unconditionally allow the server to delegate. +.RE diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/digest.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/digest.d new file mode 100644 index 0000000..5cdd925 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/digest.d @@ -0,0 +1,11 @@ +Long: digest +Help: Use HTTP Digest Authentication +Protocols: HTTP +Mutexed: basic ntlm negotiate +See-also: user proxy-digest anyauth +--- +Enables HTTP Digest authentication. This is an authentication scheme that +prevents the password from being sent over the wire in clear text. Use this in +combination with the normal --user option to set user name and password. + +If this option is used several times, only the first one is used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/disable-eprt.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/disable-eprt.d new file mode 100644 index 0000000..a1e53c0 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/disable-eprt.d @@ -0,0 +1,19 @@ +Long: disable-eprt +Help: Inhibit using EPRT or LPRT +Protocols: FTP +--- +Tell curl to disable the use of the EPRT and LPRT commands when doing active +FTP transfers. Curl will normally always first attempt to use EPRT, then LPRT +before using PORT, but with this option, it will use PORT right away. EPRT and +LPRT are extensions to the original FTP protocol, and may not work on all +servers, but they enable more functionality in a better way than the +traditional PORT command. + +--eprt can be used to explicitly enable EPRT again and --no-eprt is an alias +for --disable-eprt. + +If the server is accessed using IPv6, this option will have no effect as EPRT +is necessary then. + +Disabling EPRT only changes the active behavior. If you want to switch to +passive mode you need to not use --ftp-port or force it with --ftp-pasv. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/disable-epsv.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/disable-epsv.d new file mode 100644 index 0000000..6d2cb70 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/disable-epsv.d @@ -0,0 +1,16 @@ +Long: disable-epsv +Help: Inhibit using EPSV +Protocols: FTP +--- +(FTP) Tell curl to disable the use of the EPSV command when doing passive FTP +transfers. Curl will normally always first attempt to use EPSV before PASV, +but with this option, it will not try using EPSV. + +--epsv can be used to explicitly enable EPSV again and --no-epsv is an alias +for --disable-epsv. + +If the server is an IPv6 host, this option will have no effect as EPSV is +necessary then. + +Disabling EPSV only changes the passive behavior. If you want to switch to +active mode you need to use --ftp-port. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/disable.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/disable.d new file mode 100644 index 0000000..20b27b4 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/disable.d @@ -0,0 +1,7 @@ +Long: disable +Short: q +Help: Disable .curlrc +--- +If used as the first parameter on the command line, the \fIcurlrc\fP config +file will not be read and used. See the --config for details on the default +config file search path. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-interface.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-interface.d new file mode 100644 index 0000000..45e5af2 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-interface.d @@ -0,0 +1,11 @@ +Long: dns-interface +Arg: +Help: Interface to use for DNS requests +Protocols: DNS +See-also: dns-ipv4-addr dns-ipv6-addr +Added: 7.33.0 +Requires: c-ares +--- +Tell curl to send outgoing DNS requests through . This option is a +counterpart to --interface (which does not affect DNS). The supplied string +must be an interface name (not an address). diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-ipv4-addr.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-ipv4-addr.d new file mode 100644 index 0000000..597b858 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-ipv4-addr.d @@ -0,0 +1,11 @@ +Long: dns-ipv4-addr +Arg:
+Help: IPv4 address to use for DNS requests +Protocols: DNS +See-also: dns-interface dns-ipv6-addr +Added: 7.33.0 +Requires: c-ares +--- +Tell curl to bind to when making IPv4 DNS requests, so that +the DNS requests originate from this address. The argument should be a +single IPv4 address. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-ipv6-addr.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-ipv6-addr.d new file mode 100644 index 0000000..581f019 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-ipv6-addr.d @@ -0,0 +1,11 @@ +Long: dns-ipv6-addr +Arg:
+Help: IPv6 address to use for DNS requests +Protocols: DNS +See-also: dns-interface dns-ipv4-addr +Added: 7.33.0 +Requires: c-ares +--- +Tell curl to bind to when making IPv6 DNS requests, so that +the DNS requests originate from this address. The argument should be a +single IPv6 address. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-servers.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-servers.d new file mode 100644 index 0000000..a98fd07 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/dns-servers.d @@ -0,0 +1,10 @@ +Long: dns-servers +Arg: +Help: DNS server addrs to use +Requires: c-ares +Added: 7.33.0 +--- +Set the list of DNS servers to be used instead of the system default. +The list of IP addresses should be separated with commas. Port numbers +may also optionally be given as \fI:\fP after each IP +address. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/dump-header.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/dump-header.d new file mode 100644 index 0000000..05c10af --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/dump-header.d @@ -0,0 +1,18 @@ +Long: dump-header +Short: D +Arg: +Help: Write the received headers to +Protocols: HTTP FTP +See-also: output +--- +Write the received protocol headers to the specified file. + +This option is handy to use when you want to store the headers that an HTTP +site sends to you. Cookies from the headers could then be read in a second +curl invocation by using the --cookie option! The --cookie-jar option is a +better way to store cookies. + +When used in FTP, the FTP server response lines are considered being "headers" +and thus are saved there. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/egd-file.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/egd-file.d new file mode 100644 index 0000000..c22790f --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/egd-file.d @@ -0,0 +1,8 @@ +Long: egd-file +Arg: +Help: EGD socket path for random data +Protocols: TLS +See-also: random-file +--- +Specify the path name to the Entropy Gathering Daemon socket. The socket is +used to seed the random engine for SSL connections. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/engine.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/engine.d new file mode 100644 index 0000000..cde1a47 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/engine.d @@ -0,0 +1,8 @@ +Long: engine +Arg: +Help: Crypto engine to use +Protocols: TLS +--- +Select the OpenSSL crypto engine to use for cipher operations. Use --engine +list to print a list of build-time supported engines. Note that not all (or +none) of the engines may be available at run-time. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/expect100-timeout.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/expect100-timeout.d new file mode 100644 index 0000000..c88f0b8 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/expect100-timeout.d @@ -0,0 +1,11 @@ +Long: expect100-timeout +Arg: +Help: How long to wait for 100-continue +Protocols: HTTP +Added: 7.47.0 +See-also: connect-timeout +--- +Maximum time in seconds that you allow curl to wait for a 100-continue +response when curl emits an Expects: 100-continue header in its request. By +default curl will wait one second. This option accepts decimal values! When +curl stops waiting, it will continue as if the response has been received. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/fail-early.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/fail-early.d new file mode 100644 index 0000000..375d4c9 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/fail-early.d @@ -0,0 +1,21 @@ +Long: fail-early +Help: Fail on first transfer error, do not continue +Added: 7.52.0 +--- +Fail and exit on the first detected transfer error. + +When curl is used to do multiple transfers on the command line, it will +attempt to operate on each given URL, one by one. By default, it will ignore +errors if there are more URLs given and the last URL's success will determine +the error code curl returns. So early failures will be "hidden" by subsequent +successful transfers. + +Using this option, curl will instead return an error on the first transfer +that fails, independent of the amount of URLs that are given on the command +line. This way, no transfer failures go undetected by scripts and similar. + +This option is global and does not need to be specified for each use of --next. + +This option does not imply --fail, which causes transfers to fail due to the +server's HTTP status code. You can combine the two options, however note --fail +is not global and is therefore contained by --next. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/fail.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/fail.d new file mode 100644 index 0000000..c46c571 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/fail.d @@ -0,0 +1,14 @@ +Long: fail +Short: f +Protocols: HTTP +Help: Fail silently (no output at all) on HTTP errors +--- +Fail silently (no output at all) on server errors. This is mostly done to +better enable scripts etc to better deal with failed attempts. In normal cases +when an HTTP server fails to deliver a document, it returns an HTML document +stating so (which often also describes why and more). This flag will prevent +curl from outputting that and return error 22. + +This method is not fail-safe and there are occasions where non-successful +response codes will slip through, especially when authentication is involved +(response codes 401 and 407). diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/false-start.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/false-start.d new file mode 100644 index 0000000..65a8afb --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/false-start.d @@ -0,0 +1,12 @@ +Long: false-start +Help: Enable TLS False Start +Protocols: TLS +Added: 7.42.0 +--- +Tells curl to use false start during the TLS handshake. False start is a mode +where a TLS client will start sending application data before verifying the +server's Finished message, thus saving a round trip when performing a full +handshake. + +This is currently only implemented in the NSS and Secure Transport (on iOS 7.0 +or later, or OS X 10.9 or later) backends. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/form-string.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/form-string.d new file mode 100644 index 0000000..8079055 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/form-string.d @@ -0,0 +1,11 @@ +Long: form-string +Help: Specify HTTP multipart POST data +Protocols: HTTP +Arg: +See-also: form +--- +Similar to --form except that the value string for the named parameter is used +literally. Leading \&'@' and \&'<' characters, and the \&';type=' string in +the value have no special meaning. Use this in preference to --form if +there's any possibility that the string value may accidentally trigger the +\&'@' or \&'<' features of --form. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/form.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/form.d new file mode 100644 index 0000000..87a7d07 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/form.d @@ -0,0 +1,54 @@ +Long: form +Short: F +Arg: +Help: Specify HTTP multipart POST data +Protocols: HTTP +Mutexed: data head upload +--- +This lets curl emulate a filled-in form in which a user has pressed the submit +button. This causes curl to POST data using the Content-Type +multipart/form-data according to RFC 2388. This enables uploading of binary +files etc. To force the 'content' part to be a file, prefix the file name with +an @ sign. To just get the content part from a file, prefix the file name with +the symbol <. The difference between @ and < is then that @ makes a file get +attached in the post as a file upload, while the < makes a text field and just +get the contents for that text field from a file. + +Example: to send an image to a server, where \&'profile' is the name of the +form-field to which portrait.jpg will be the input: + + curl -F profile=@portrait.jpg https://example.com/upload.cgi + +To read content from stdin instead of a file, use - as the filename. This goes +for both @ and < constructs. Unfortunately it does not support reading the +file from a named pipe or similar, as it needs the full size before the +transfer starts. + +You can also tell curl what Content-Type to use by using 'type=', in a manner +similar to: + + curl -F "web=@index.html;type=text/html" example.com + +or + + curl -F "name=daniel;type=text/foo" example.com + +You can also explicitly change the name field of a file upload part by setting +filename=, like this: + + curl -F "file=@localfile;filename=nameinpost" example.com + +If filename/path contains ',' or ';', it must be quoted by double-quotes like: + + curl -F "file=@\\"localfile\\";filename=\\"nameinpost\\"" example.com + +or + + curl -F 'file=@"localfile";filename="nameinpost"' example.com + +Note that if a filename/path is quoted by double-quotes, any double-quote +or backslash within the filename must be escaped by backslash. + +See further examples and details in the MANUAL. + +This option can be used multiple times. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-account.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-account.d new file mode 100644 index 0000000..013c4f3 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-account.d @@ -0,0 +1,10 @@ +Long: ftp-account +Arg: +Help: Account data string +Protocols: FTP +Added: 7.13.0 +--- +When an FTP server asks for "account data" after user name and password has +been provided, this data is sent off using the ACCT command. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-alternative-to-user.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-alternative-to-user.d new file mode 100644 index 0000000..8982ba8 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-alternative-to-user.d @@ -0,0 +1,10 @@ +Long: ftp-alternative-to-user +Arg: +Help: String to replace USER [name] +Protocols: FTP +Added: 7.15.5 +--- +If authenticating with the USER and PASS commands fails, send this command. +When connecting to Tumbleweed's Secure Transport server over FTPS using a +client certificate, using "SITE AUTH" will tell the server to retrieve the +username from the certificate. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-create-dirs.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-create-dirs.d new file mode 100644 index 0000000..ede5710 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-create-dirs.d @@ -0,0 +1,8 @@ +Long: ftp-create-dirs +Protocols: FTP SFTP +Help: Create the remote dirs if not present +See-also: create-dirs +--- +When an FTP or SFTP URL/operation uses a path that doesn't currently exist on +the server, the standard behavior of curl is to fail. Using this option, curl +will instead attempt to create missing directories. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-method.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-method.d new file mode 100644 index 0000000..95aa522 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-method.d @@ -0,0 +1,21 @@ +Long: ftp-method +Arg: +Help: Control CWD usage +Protocols: FTP +Added: 7.15.1 +--- +Control what method curl should use to reach a file on an FTP(S) +server. The method argument should be one of the following alternatives: +.RS +.IP multicwd +curl does a single CWD operation for each path part in the given URL. For deep +hierarchies this means very many commands. This is how RFC 1738 says it should +be done. This is the default but the slowest behavior. +.IP nocwd +curl does no CWD at all. curl will do SIZE, RETR, STOR etc and give a full +path to the server for all these commands. This is the fastest behavior. +.IP singlecwd +curl does one CWD with the full target directory and then operates on the file +\&"normally" (like in the multicwd case). This is somewhat more standards +compliant than 'nocwd' but without the full penalty of 'multicwd'. +.RE diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-pasv.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-pasv.d new file mode 100644 index 0000000..44103e2 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-pasv.d @@ -0,0 +1,16 @@ +Long: ftp-pasv +Help: Use PASV/EPSV instead of PORT +Protocols: FTP +Added: 7.11.0 +See-also: disable-epsv +--- +Use passive mode for the data connection. Passive is the internal default +behavior, but using this option can be used to override a previous --ftp-port +option. + +If this option is used several times, only the first one is used. Undoing an +enforced passive really isn't doable but you must then instead enforce the +correct --ftp-port again. + +Passive mode means that curl will try the EPSV command first and then PASV, +unless --disable-epsv is used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-port.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-port.d new file mode 100644 index 0000000..a852e90 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-port.d @@ -0,0 +1,32 @@ +Long: ftp-port +Arg:
+Help: Use PORT instead of PASV +Short: P +Protocols: FTP +See-also: ftp-pasv disable-eprt +--- +Reverses the default initiator/listener roles when connecting with FTP. This +option makes curl use active mode. curl then tells the server to connect back +to the client's specified address and port, while passive mode asks the server +to setup an IP address and port for it to connect to.
should be one +of: +.RS +.IP interface +i.e "eth0" to specify which interface's IP address you want to use (Unix only) +.IP "IP address" +i.e "192.168.10.1" to specify the exact IP address +.IP "host name" +i.e "my.host.domain" to specify the machine +.IP "-" +make curl pick the same IP address that is already used for the control +connection +.RE + +If this option is used several times, the last one will be used. Disable the +use of PORT with --ftp-pasv. Disable the attempt to use the EPRT command +instead of PORT by using --disable-eprt. EPRT is really PORT++. + +Since 7.19.5, you can append \&":[start]-[end]\&" to the right of the address, +to tell curl what TCP port range to use. That means you specify a port range, +from a lower to a higher number. A single number works as well, but do note +that it increases the risk of failure since the port may not be available. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-pret.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-pret.d new file mode 100644 index 0000000..dac4c35 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-pret.d @@ -0,0 +1,8 @@ +Long: ftp-pret +Help: Send PRET before PASV +Protocols: FTP +Added: 7.20.0 +--- +Tell curl to send a PRET command before PASV (and EPSV). Certain FTP servers, +mainly drftpd, require this non-standard command for directory listings as +well as up and downloads in PASV mode. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-skip-pasv-ip.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-skip-pasv-ip.d new file mode 100644 index 0000000..da6ab11 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-skip-pasv-ip.d @@ -0,0 +1,12 @@ +Long: ftp-skip-pasv-ip +Help: Skip the IP address for PASV +Protocols: FTP +Added: 7.14.2 +See-also: ftp-pasv +--- +Tell curl to not use the IP address the server suggests in its response +to curl's PASV command when curl connects the data connection. Instead curl +will re-use the same IP address it already uses for the control +connection. + +This option has no effect if PORT, EPRT or EPSV is used instead of PASV. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-ccc-mode.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-ccc-mode.d new file mode 100644 index 0000000..be10294 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-ccc-mode.d @@ -0,0 +1,11 @@ +Long: ftp-ssl-ccc-mode +Arg: +Help: Set CCC mode +Protocols: FTP +Added: 7.16.2 +See-also: ftp-ssl-ccc +--- +Sets the CCC mode. The passive mode will not initiate the shutdown, but +instead wait for the server to do it, and will not reply to the shutdown from +the server. The active mode initiates the shutdown and waits for a reply from +the server. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-ccc.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-ccc.d new file mode 100644 index 0000000..c6edc5b --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-ccc.d @@ -0,0 +1,10 @@ +Long: ftp-ssl-ccc +Help: Send CCC after authenticating +Protocols: FTP +See-also: ssl ftp-ssl-ccc-mode +Added: 7.16.1 +--- +Use CCC (Clear Command Channel) Shuts down the SSL/TLS layer after +authenticating. The rest of the control channel communication will be +unencrypted. This allows NAT routers to follow the FTP transaction. The +default mode is passive. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-control.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-control.d new file mode 100644 index 0000000..87a8225 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ftp-ssl-control.d @@ -0,0 +1,8 @@ +Long: ftp-ssl-control +Help: Require SSL/TLS for FTP login, clear for transfer +Protocols: FTP +Added: 7.16.0 +--- +Require SSL/TLS for the FTP login, clear for transfer. Allows secure +authentication, but non-encrypted data transfers for efficiency. Fails the +transfer if the server doesn't support SSL/TLS. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/gen.pl b/deps-win32/curl-7.54.1/docs/cmdline-opts/gen.pl new file mode 100644 index 0000000..73ea6d4 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/gen.pl @@ -0,0 +1,385 @@ +#!/usr/bin/perl + +=begin comment + +This script generates the manpage. + +Example: gen.pl mainpage > curl.1 + +Dev notes: + +We open *input* files in :crlf translation (a no-op on many platforms) in +case we have CRLF line endings in Windows but a perl that defaults to LF. +Unfortunately it seems some perls like msysgit can't handle a global input-only +:crlf so it has to be specified on each file open for text input. + +=end comment +=cut + +my $some_dir=$ARGV[1] || "."; + +opendir(my $dh, $some_dir) || die "Can't opendir $some_dir: $!"; +my @s = grep { /\.d$/ && -f "$some_dir/$_" } readdir($dh); +closedir $dh; + +my %optshort; +my %optlong; +my %helplong; +my %arglong; +my %redirlong; +my %protolong; + +# get the long name version, return the man page string +sub manpageify { + my ($k)=@_; + my $l; + if($optlong{$k} ne "") { + # both short + long + $l = "\\fI-".$optlong{$k}.", --$k\\fP"; + } + else { + # only long + $l = "\\fI--$k\\fP"; + } + return $l; +} + +sub printdesc { + my @desc = @_; + for my $d (@desc) { + # skip lines starting with space (examples) + if($d =~ /^[^ ]/) { + for my $k (keys %optlong) { + my $l = manpageify($k); + $d =~ s/--$k([^a-z0-9_-])/$l$1/; + } + } + print $d; + } +} + +sub seealso { + my($standalone, $data)=@_; + if($standalone) { + return sprintf + ".SH \"SEE ALSO\"\n$data\n"; + } + else { + return "See also $data. "; + } +} + +sub overrides { + my ($standalone, $data)=@_; + if($standalone) { + return ".SH \"OVERRIDES\"\n$data\n"; + } + else { + return $data; + } +} + +sub protocols { + my ($standalone, $data)=@_; + if($standalone) { + return ".SH \"PROTOCOLS\"\n$data\n"; + } + else { + return "($data) "; + } +} + +sub added { + my ($standalone, $data)=@_; + if($standalone) { + return ".SH \"ADDED\"\nAdded in curl version $data\n"; + } + else { + return "Added in $data. "; + } +} + +sub single { + my ($f, $standalone)=@_; + open(F, "<:crlf", "$some_dir/$f") || + return 1; + my $short; + my $long; + my $tags; + my $added; + my $protocols; + my $arg; + my $mutexed; + my $requires; + my $seealso; + my $magic; # cmdline special option + while() { + if(/^Short: *(.)/i) { + $short=$1; + } + elsif(/^Long: *(.*)/i) { + $long=$1; + } + elsif(/^Added: *(.*)/i) { + $added=$1; + } + elsif(/^Tags: *(.*)/i) { + $tags=$1; + } + elsif(/^Arg: *(.*)/i) { + $arg=$1; + } + elsif(/^Magic: *(.*)/i) { + $magic=$1; + } + elsif(/^Mutexed: *(.*)/i) { + $mutexed=$1; + } + elsif(/^Protocols: *(.*)/i) { + $protocols=$1; + } + elsif(/^See-also: *(.*)/i) { + $seealso=$1; + } + elsif(/^Requires: *(.*)/i) { + $requires=$1; + } + elsif(/^Help: *(.*)/i) { + ; + } + elsif(/^---/) { + if(!$long) { + print STDERR "WARN: no 'Long:' in $f\n"; + } + last; + } + else { + chomp; + print STDERR "WARN: unrecognized line in $f, ignoring:\n:'$_';" + } + } + my @dest; + while() { + push @desc, $_; + } + close(F); + my $opt; + if(defined($short) && $long) { + $opt = "-$short, --$long"; + } + elsif($short && !$long) { + $opt = "-$short"; + } + elsif($long && !$short) { + $opt = "--$long"; + } + + if($arg) { + $opt .= " $arg"; + } + + if($standalone) { + print ".TH curl 1 \"30 Nov 2016\" \"curl 7.52.0\" \"curl manual\"\n"; + print ".SH OPTION\n"; + print "curl $opt\n"; + } + else { + print ".IP \"$opt\"\n"; + } + if($protocols) { + print protocols($standalone, $protocols); + } + + if($standalone) { + print ".SH DESCRIPTION\n"; + } + + printdesc(@desc); + undef @desc; + + my @foot; + if($seealso) { + my @m=split(/ /, $seealso); + my $mstr; + for my $k (@m) { + my $l = manpageify($k); + $mstr .= sprintf "%s$l", $mstr?" and ":""; + } + push @foot, seealso($standalone, $mstr); + } + if($requires) { + my $l = manpageify($long); + push @foot, "$l requires that the underlying libcurl". + " was built to support $requires. "; + } + if($mutexed) { + my @m=split(/ /, $mutexed); + my $mstr; + for my $k (@m) { + my $l = manpageify($k); + $mstr .= sprintf "%s$l", $mstr?" and ":""; + } + push @foot, overrides($standalone, "This option overrides $mstr. "); + } + if($added) { + push @foot, added($standalone, $added); + } + if($foot[0]) { + print "\n"; + my $f = join("", @foot); + $f =~ s/ +\z//; # remove trailing space + print "$f\n"; + } + return 0; +} + +sub getshortlong { + my ($f)=@_; + open(F, "<:crlf", "$some_dir/$f"); + my $short; + my $long; + my $help; + my $arg; + my $protocols; + while() { + if(/^Short: (.)/i) { + $short=$1; + } + elsif(/^Long: (.*)/i) { + $long=$1; + } + elsif(/^Help: (.*)/i) { + $help=$1; + } + elsif(/^Arg: (.*)/i) { + $arg=$1; + } + elsif(/^Protocols: (.*)/i) { + $protocols=$1; + } + elsif(/^---/) { + last; + } + } + close(F); + if($short) { + $optshort{$short}=$long; + } + if($long) { + $optlong{$long}=$short; + $helplong{$long}=$help; + $arglong{$long}=$arg; + $protolong{$long}=$protocols; + } +} + +sub indexoptions { + foreach my $f (@s) { + getshortlong($f); + } +} + +sub header { + my ($f)=@_; + open(F, "<:crlf", "$some_dir/$f"); + my @d; + while() { + push @d, $_; + } + close(F); + printdesc(@d); +} + +sub listhelp { + foreach my $f (sort keys %helplong) { + my $long = $f; + my $short = $optlong{$long}; + my $opt; + + if(defined($short) && $long) { + $opt = "-$short, --$long"; + } + elsif($long && !$short) { + $opt = " --$long"; + } + + my $arg = $arglong{$long}; + if($arg) { + $opt .= " $arg"; + } + my $desc = $helplong{$f}; + $desc =~ s/\"/\\\"/g; # escape double quotes + + my $line = sprintf " {\"%s\",\n \"%s\"},\n", $opt, $desc; + + if(length($opt) + length($desc) > 78) { + print STDERR "WARN: the --$long line is too long\n"; + } + print $line; + } +} + +sub mainpage { + # show the page header + header("page-header"); + + # output docs for all options + foreach my $f (sort @s) { + single($f, 0); + } + + header("page-footer"); +} + +sub showonly { + my ($f) = @_; + if(single($f, 1)) { + print STDERR "$f: failed\n"; + } +} + +sub showprotocols { + my %prots; + foreach my $f (keys %optlong) { + my @p = split(/ /, $protolong{$f}); + for my $p (@p) { + $prots{$p}++; + } + } + for(sort keys %prots) { + printf "$_ (%d options)\n", $prots{$_}; + } +} + +sub getargs { + my $f; + do { + $f = shift @ARGV; + if($f eq "mainpage") { + mainpage(); + return; + } + elsif($f eq "listhelp") { + listhelp(); + return; + } + elsif($f eq "single") { + showonly(shift @ARGV); + return; + } + elsif($f eq "protos") { + showprotocols(); + return; + } + } while($f); + + print "Usage: gen.pl [srcdir]\n"; +} + +#------------------------------------------------------------------------ + +# learn all existing options +indexoptions(); + +getargs(); + diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/get.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/get.d new file mode 100644 index 0000000..be7cb25 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/get.d @@ -0,0 +1,15 @@ +Long: get +Short: G +Help: Put the post data in the URL and use GET +--- +When used, this option will make all data specified with --data, --data-binary +or --data-urlencode to be used in an HTTP GET request instead of the POST +request that otherwise would be used. The data will be appended to the URL +with a '?' separator. + +If used in combination with --head, the POST data will instead be appended to +the URL with a HEAD request. + +If this option is used several times, only the first one is used. This is +because undoing a GET doesn't make sense, but you should then instead enforce +the alternative method you prefer. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/globoff.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/globoff.d new file mode 100644 index 0000000..fff6516 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/globoff.d @@ -0,0 +1,8 @@ +Long: globoff +Short: g +Help: Disable URL sequences and ranges using {} and [] +--- +This option switches off the "URL globbing parser". When you set this option, +you can specify URLs that contain the letters {}[] without having them being +interpreted by curl itself. Note that these letters are not normal legal URL +contents but they should be encoded according to the URI standard. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/head.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/head.d new file mode 100644 index 0000000..350a100 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/head.d @@ -0,0 +1,8 @@ +Long: head +Short: I +Help: Show document info only +Protocols: HTTP FTP FILE +--- +Fetch the headers only! HTTP-servers feature the command HEAD which this uses +to get nothing but the header of a document. When used on an FTP or FILE file, +curl displays the file size and last modification time only. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/header.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/header.d new file mode 100644 index 0000000..90af735 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/header.d @@ -0,0 +1,38 @@ +Long: header +Short: H +Arg:
+Help: Pass custom header LINE to server +Protocols: HTTP +--- + +Extra header to include in the request when sending HTTP to a server. You may +specify any number of extra headers. Note that if you should add a custom +header that has the same name as one of the internal ones curl would use, your +externally set header will be used instead of the internal one. This allows +you to make even trickier stuff than curl would normally do. You should not +replace internally set headers without knowing perfectly well what you're +doing. Remove an internal header by giving a replacement without content on +the right side of the colon, as in: -H \&"Host:". If you send the custom +header with no-value then its header must be terminated with a semicolon, such +as \-H \&"X-Custom-Header;" to send "X-Custom-Header:". + +curl will make sure that each header you add/replace is sent with the proper +end-of-line marker, you should thus \fBnot\fP add that as a part of the header +content: do not add newlines or carriage returns, they will only mess things up +for you. + +See also the --user-agent and --referer options. + +Starting in 7.37.0, you need --proxy-header to send custom headers intended +for a proxy. + +Example: + + curl -H "X-First-Name: Joe" http://example.com/ + +\fBWARNING\fP: headers set with this option will be set in all requests - even +after redirects are followed, like when told with --location. This can lead to +the header being sent to other hosts than the original host, so sensitive +headers should be used with caution combined with following redirects. + +This option can be used multiple times to add/replace/remove multiple headers. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/help.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/help.d new file mode 100644 index 0000000..64aa696 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/help.d @@ -0,0 +1,6 @@ +Long: help +Short: h +Help: This help text +--- +Usage help. This lists all current command line options with a short +description. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/hostpubmd5.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/hostpubmd5.d new file mode 100644 index 0000000..a851158 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/hostpubmd5.d @@ -0,0 +1,9 @@ +Long: hostpubmd5 +Arg: +Help: Acceptable MD5 hash of the host public key +Protocols: SFTP SCP +Added: 7.17.1 +--- +Pass a string containing 32 hexadecimal digits. The string should +be the 128 bit MD5 checksum of the remote host's public key, curl will refuse +the connection with the host unless the md5sums match. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/http1.0.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/http1.0.d new file mode 100644 index 0000000..d9bbd76 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/http1.0.d @@ -0,0 +1,10 @@ +Short: 0 +Long: http1.0 +Tags: Versions +Protocols: HTTP +Added: +Mutexed: http1.1 http2 +Help: Use HTTP 1.0 +--- +Tells curl to use HTTP version 1.0 instead of using its internally preferred +HTTP version. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/http1.1.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/http1.1.d new file mode 100644 index 0000000..f1e6b5c --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/http1.1.d @@ -0,0 +1,8 @@ +Long: http1.1 +Tags: Versions +Protocols: HTTP +Added: 7.33.0 +Mutexed: http1.0 http2 +Help: Use HTTP 1.1 +--- +Tells curl to use HTTP version 1.1. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/http2-prior-knowledge.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/http2-prior-knowledge.d new file mode 100644 index 0000000..f793f77 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/http2-prior-knowledge.d @@ -0,0 +1,12 @@ +Long: http2-prior-knowledge +Tags: Versions +Protocols: HTTP +Added: 7.49.0 +Mutexed: http1.1 http1.0 http2 +Requires: HTTP/2 +Help: Use HTTP 2 without HTTP/1.1 Upgrade +--- +Tells curl to issue its non-TLS HTTP requests using HTTP/2 without HTTP/1.1 +Upgrade. It requires prior knowledge that the server supports HTTP/2 straight +away. HTTPS requests will still do HTTP/2 the standard way with negotiated +protocol version in the TLS handshake. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/http2.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/http2.d new file mode 100644 index 0000000..04cff00 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/http2.d @@ -0,0 +1,10 @@ +Long: http2 +Tags: Versions +Protocols: HTTP +Added: 7.33.0 +Mutexed: http1.1 http1.0 http2-prior-knowledge +Requires: HTTP/2 +See-also: no-alpn +Help: Use HTTP 2 +--- +Tells curl to use HTTP version 2. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ignore-content-length.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ignore-content-length.d new file mode 100644 index 0000000..53524f5 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ignore-content-length.d @@ -0,0 +1,10 @@ +Long: ignore-content-length +Help: Ignore the size of the remote resource +Protocols: FTP HTTP +--- +For HTTP, Ignore the Content-Length header. This is particularly useful for +servers running Apache 1.x, which will report incorrect Content-Length for +files larger than 2 gigabytes. + +For FTP (since 7.46.0), skip the RETR command to figure out the size before +downloading a file. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/include.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/include.d new file mode 100644 index 0000000..e55d516 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/include.d @@ -0,0 +1,7 @@ +Long: include +Short: i +Help: Include protocol headers in the output +See-also: verbose +--- +Include the HTTP-header in the output. The HTTP-header includes things like +server-name, date of the document, HTTP-version and more... diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/insecure.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/insecure.d new file mode 100644 index 0000000..49b0a43 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/insecure.d @@ -0,0 +1,16 @@ +Long: insecure +Short: k +Help: Allow insecure server connections when using SSL +Protocols: TLS +See-also: proxy-insecure cacert +--- + +By default, every SSL connection curl makes is verified to be secure. This +option allows curl to proceed and operate even for server connections +otherwise considered insecure. + +The server connection is verified by making sure the server's certificate +contains the right name and verifies successfully using the cert store. + +See this online resource for further details: + https://curl.haxx.se/docs/sslcerts.html diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/interface.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/interface.d new file mode 100644 index 0000000..da84cd2 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/interface.d @@ -0,0 +1,12 @@ +Long: interface +Arg: +Help: Use network INTERFACE (or address) +See-also: dns-interface +--- + +Perform an operation using a specified interface. You can enter interface +name, IP address or host name. An example could look like: + + curl --interface eth0:1 https://www.example.com/ + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ipv4.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ipv4.d new file mode 100644 index 0000000..9c40c8c --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ipv4.d @@ -0,0 +1,12 @@ +Short: 4 +Long: ipv4 +Tags: Versions +Protocols: +Added: +Mutexed: ipv6 +Requires: +See-also: http1.1 http2 +Help: Resolve names to IPv4 addresses +--- +This option tells curl to resolve names to IPv4 addresses only, and not for +example try IPv6. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/ipv6.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/ipv6.d new file mode 100644 index 0000000..c2392e7 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/ipv6.d @@ -0,0 +1,12 @@ +Short: 6 +Long: ipv6 +Tags: Versions +Protocols: +Added: +Mutexed: ipv6 +Requires: +See-also: http1.1 http2 +Help: Resolve names to IPv6 addresses +--- +This option tells curl to resolve names to IPv6 addresses only, and not for +example try IPv4. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/junk-session-cookies.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/junk-session-cookies.d new file mode 100644 index 0000000..40ccd9c --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/junk-session-cookies.d @@ -0,0 +1,10 @@ +Long: junk-session-cookies +Short: j +Help: Ignore session cookies read from file +Protocols: HTTP +See-also: cookie cookie-jar +--- +When curl is told to read cookies from a given file, this option will make it +discard all "session cookies". This will basically have the same effect as if +a new session is started. Typical browsers always discard session cookies when +they're closed down. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/keepalive-time.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/keepalive-time.d new file mode 100644 index 0000000..c816e13 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/keepalive-time.d @@ -0,0 +1,13 @@ +Long: keepalive-time +Arg: +Help: Interval time for keepalive probes +Added: 7.18.0 +--- +This option sets the time a connection needs to remain idle before sending +keepalive probes and the time between individual keepalive probes. It is +currently effective on operating systems offering the TCP_KEEPIDLE and +TCP_KEEPINTVL socket options (meaning Linux, recent AIX, HP-UX and more). This +option has no effect if --no-keepalive is used. + +If this option is used several times, the last one will be used. If +unspecified, the option defaults to 60 seconds. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/key-type.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/key-type.d new file mode 100644 index 0000000..bf39bcd --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/key-type.d @@ -0,0 +1,9 @@ +Long: key-type +Arg: +Help: Private key file type (DER/PEM/ENG) +Protocols: TLS +--- +Private key file type. Specify which type your --key provided private key +is. DER, PEM, and ENG are supported. If not specified, PEM is assumed. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/key.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/key.d new file mode 100644 index 0000000..fbf583a --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/key.d @@ -0,0 +1,10 @@ +Long: key +Arg: +Protocols: TLS SSH +Help: Private key file name +--- +Private key file name. Allows you to provide your private key in this separate +file. For SSH, if not specified, curl tries the following candidates in order: +'~/.ssh/id_rsa', '~/.ssh/id_dsa', './id_rsa', './id_dsa'. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/krb.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/krb.d new file mode 100644 index 0000000..19547af --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/krb.d @@ -0,0 +1,11 @@ +Long: krb +Arg: +Help: Enable Kerberos with security +Protocols: FTP +Requires: Kerberos +--- +Enable Kerberos authentication and use. The level must be entered and should +be one of 'clear', 'safe', 'confidential', or 'private'. Should you use a +level that is not one of these, 'private' will instead be used. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/libcurl.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/libcurl.d new file mode 100644 index 0000000..ef132fe --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/libcurl.d @@ -0,0 +1,11 @@ +Long: libcurl +Arg: +Help: Dump libcurl equivalent code of this command line +Added: 7.16.1 +--- +Append this option to any ordinary curl command line, and you will get a +libcurl-using C source code written to the file that does the equivalent +of what your command-line operation does! + +If this option is used several times, the last given file name will be +used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/limit-rate.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/limit-rate.d new file mode 100644 index 0000000..8784a84 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/limit-rate.d @@ -0,0 +1,18 @@ +Long: limit-rate +Arg: +Help: Limit transfer speed to RATE +--- +Specify the maximum transfer rate you want curl to use - for both downloads +and uploads. This feature is useful if you have a limited pipe and you'd like +your transfer not to use your entire bandwidth. To make it slower than it +otherwise would be. + +The given speed is measured in bytes/second, unless a suffix is appended. +Appending 'k' or 'K' will count the number as kilobytes, 'm' or M' makes it +megabytes, while 'g' or 'G' makes it gigabytes. Examples: 200K, 3m and 1G. + +If you also use the --speed-limit option, that option will take precedence and +might cripple the rate-limiting slightly, to help keeping the speed-limit +logic working. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/list-only.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/list-only.d new file mode 100644 index 0000000..4c56304 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/list-only.d @@ -0,0 +1,24 @@ +Long: list-only +Short: l +Protocols: FTP POP3 +Help: List only mode +Added: 7.21.5 +--- +(FTP) +When listing an FTP directory, this switch forces a name-only view. This is +especially useful if the user wants to machine-parse the contents of an FTP +directory since the normal directory view doesn't use a standard look or +format. When used like this, the option causes a NLST command to be sent to +the server instead of LIST. + +Note: Some FTP servers list only files in their response to NLST; they do not +include sub-directories and symbolic links. + +(POP3) +When retrieving a specific email from POP3, this switch forces a LIST command +to be performed instead of RETR. This is particularly useful if the user wants +to see if a specific message id exists on the server and what size it is. + +Note: When combined with --request, this option can be used to send an UIDL +command instead, so the user may use the email's unique identifier rather than +it's message id to make the request. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/local-port.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/local-port.d new file mode 100644 index 0000000..d96b46e --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/local-port.d @@ -0,0 +1,9 @@ +Long: local-port +Arg: +Help: Force use of RANGE for local port numbers +Added: 7.15.2 +--- +Set a preferred single number or range (FROM-TO) of local port numbers to use +for the connection(s). Note that port numbers by nature are a scarce resource +that will be busy at times so setting this range to something too narrow might +cause unnecessary connection setup failures. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/location-trusted.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/location-trusted.d new file mode 100644 index 0000000..995a871 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/location-trusted.d @@ -0,0 +1,9 @@ +Long: location-trusted +Help: Like --location, and send auth to other hosts +Protocols: HTTP +See-also: user +--- +Like --location, but will allow sending the name + password to all hosts that +the site may redirect to. This may or may not introduce a security breach if +the site redirects you to a site to which you'll send your authentication info +(which is plaintext in the case of HTTP Basic authentication). diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/location.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/location.d new file mode 100644 index 0000000..7c70e69 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/location.d @@ -0,0 +1,23 @@ +Long: location +Short: L +Help: Follow redirects +Protocols: HTTP +--- +If the server reports that the requested page has moved to a different +location (indicated with a Location: header and a 3XX response code), this +option will make curl redo the request on the new place. If used together with +--include or --head, headers from all requested pages will be shown. When +authentication is used, curl only sends its credentials to the initial +host. If a redirect takes curl to a different host, it won't be able to +intercept the user+password. See also --location-trusted on how to change +this. You can limit the amount of redirects to follow by using the +--max-redirs option. + +When curl follows a redirect and the request is not a plain GET (for example +POST or PUT), it will do the following request with a GET if the HTTP response +was 301, 302, or 303. If the response code was any other 3xx code, curl will +re-send the following request using the same unmodified method. + +You can tell curl to not change the non-GET request method to GET after a 30x +response by using the dedicated options for that: --post301, --post302 and +--post303. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/login-options.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/login-options.d new file mode 100644 index 0000000..8bad051 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/login-options.d @@ -0,0 +1,14 @@ +Long: login-options +Arg: +Protocols: IMAP POP3 SMTP +Help: Server login options +Added: 7.34.0 +--- +Specify the login options to use during server authentication. + +You can use the login options to specify protocol specific options that may +be used during authentication. At present only IMAP, POP3 and SMTP support +login options. For more information about the login options please see +RFC 2384, RFC 5092 and IETF draft draft-earhart-url-smtp-00.txt + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-auth.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-auth.d new file mode 100644 index 0000000..70cf0ed --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-auth.d @@ -0,0 +1,10 @@ +Long: mail-auth +Arg:
+Protocols: SMTP +Help: Originator address of the original email +Added: 7.25.0 +See-also: mail-rcpt mail-from +--- +Specify a single address. This will be used to specify the authentication +address (identity) of a submitted message that is being relayed to another +server. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-from.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-from.d new file mode 100644 index 0000000..1d93234 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-from.d @@ -0,0 +1,8 @@ +Long: mail-from +Arg:
+Help: Mail from this address +Protocols: SMTP +Added: 7.20.0 +See-also: mail-rcpt mail-auth +--- +Specify a single address that the given mail should get sent from. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-rcpt.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-rcpt.d new file mode 100644 index 0000000..d747cea --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/mail-rcpt.d @@ -0,0 +1,19 @@ +Long: mail-rcpt +Arg:
+Help: Mail from this address +Protocols: SMTP +Added: 7.20.0 +--- +Specify a single address, user name or mailing list name. Repeat this +option several times to send to multiple recipients. + +When performing a mail transfer, the recipient should specify a valid email +address to send the mail to. + +When performing an address verification (VRFY command), the recipient should be +specified as the user name or user name and domain (as per Section 3.5 of +RFC5321). (Added in 7.34.0) + +When performing a mailing list expand (EXPN command), the recipient should be +specified using the mailing list name, such as "Friends" or "London-Office". +(Added in 7.34.0) diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/manual.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/manual.d new file mode 100644 index 0000000..a9dbb0c --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/manual.d @@ -0,0 +1,5 @@ +Long: manual +Short: M +Help: Display the full manual +--- +Manual. Display the huge help text. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/max-filesize.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/max-filesize.d new file mode 100644 index 0000000..e92ef58 --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/max-filesize.d @@ -0,0 +1,12 @@ +Long: max-filesize +Arg: +Help: Maximum file size to download +See-also: limit-rate +--- +Specify the maximum size (in bytes) of a file to download. If the file +requested is larger than this value, the transfer will not start and curl will +return with exit code 63. + +\fBNOTE:\fP The file size is not always known prior to download, and for such +files this option has no effect even if the file transfer ends up being larger +than this given limit. This concerns both FTP and HTTP transfers. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/max-redirs.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/max-redirs.d new file mode 100644 index 0000000..04b824b --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/max-redirs.d @@ -0,0 +1,11 @@ +Long: max-redirs +Arg: +Help: Maximum number of redirects allowed +Protocols: HTTP +--- +Set maximum number of redirection-followings allowed. When --location is used, +is used to prevent curl from following redirections \&"in absurdum". By +default, the limit is set to 50 redirections. Set this option to -1 to make it +unlimited. + +If this option is used several times, the last one will be used. diff --git a/deps-win32/curl-7.54.1/docs/cmdline-opts/max-time.d b/deps-win32/curl-7.54.1/docs/cmdline-opts/max-time.d new file mode 100644 index 0000000..c22343d --- /dev/null +++ b/deps-win32/curl-7.54.1/docs/cmdline-opts/max-time.d @@ -0,0 +1,13 @@ +Long: max-time +Short: m +Arg: