-// files (-1 for URL)
+// files
.float url_fh;
+#define URL_FH_CURL -1
+#define URL_FH_STDOUT -2
// URLs
.string url_url;
.url_ready_func url_ready;
.entity url_ready_pass;
+// for multi handles
+.float url_attempt;
+.float url_mode;
+
entity url_fromid[NUM_URL_ID];
float autocvar__urllib_nextslot;
url_fromid[id] = world;
// if we get here, we MUST have both buffers cleared
- if(e.url_rbuf != -1 || e.url_wbuf != -1 || e.url_fh != -1)
+ if(e.url_rbuf != -1 || e.url_wbuf != -1 || e.url_fh != URL_FH_CURL)
error("url_URI_Get_Callback: not a request waiting for data");
if(status == 0)
}
}
-void url_fopen(string url, float mode, url_ready_func rdy, entity pass)
+void url_single_fopen(string url, float mode, url_ready_func rdy, entity pass)
{
entity e;
float i;
// create a writing end that does nothing yet
e = spawn();
- e.classname = "url_fopen_file";
+ e.classname = "url_single_fopen_file";
e.url_url = strzone(url);
- e.url_fh = -1;
+ e.url_fh = URL_FH_CURL;
e.url_wbuf = buf_create();
if(e.url_wbuf < 0)
{
- print("url_fopen: out of memory in buf_create\n");
+ print("url_single_fopen: out of memory in buf_create\n");
rdy(e, pass, URL_READY_ERROR);
strunzone(e.url_url);
remove(e);
}
e.url_wbufpos = 0;
e.url_rbuf = -1;
+ e.url_ready = rdy;
+ e.url_ready_pass = pass;
rdy(e, pass, URL_READY_CANWRITE);
break;
break;
if(i >= autocvar__urllib_nextslot)
{
- print("url_fopen: too many concurrent requests\n");
+ print("url_single_fopen: too many concurrent requests\n");
rdy(world, pass, URL_READY_ERROR);
return;
}
// GET the data
if(!crypto_uri_postbuf(url, i + MIN_URL_ID, string_null, string_null, -1, 0))
{
- print("url_fopen: failure in crypto_uri_postbuf\n");
+ print("url_single_fopen: failure in crypto_uri_postbuf\n");
rdy(world, pass, URL_READY_ERROR);
return;
}
// all). Wait for data to come from the
// server, then call the callback
e = spawn();
- e.classname = "url_fopen_file";
+ e.classname = "url_single_fopen_file";
e.url_url = strzone(url);
- e.url_fh = -1;
+ e.url_fh = URL_FH_CURL;
e.url_rbuf = -1;
e.url_wbuf = -1;
e.url_ready = rdy;
break;
}
}
+ else if(url == "-")
+ {
+ switch(mode)
+ {
+ case FILE_WRITE:
+ case FILE_APPEND:
+ e = spawn();
+ e.classname = "url_single_fopen_stdout";
+ e.url_fh = URL_FH_STDOUT;
+ e.url_ready = rdy;
+ e.url_ready_pass = pass;
+ rdy(e, pass, URL_READY_CANWRITE);
+ break;
+ case FILE_READ:
+ print("url_single_fopen: cannot open '-' for reading\n");
+ rdy(world, pass, URL_READY_ERROR);
+ break;
+ }
+ }
else
{
float fh;
else
{
e = spawn();
- e.classname = "url_fopen_file";
+ e.classname = "url_single_fopen_file";
e.url_fh = fh;
+ e.url_ready = rdy;
+ e.url_ready_pass = pass;
if(mode == FILE_READ)
rdy(e, pass, URL_READY_CANREAD);
else
}
// close a file
-void url_fclose(entity e, url_ready_func rdy, entity pass)
+void url_fclose(entity e)
{
float i;
- if(e.url_fh < 0)
+ if(e.url_fh == URL_FH_CURL)
{
if(e.url_rbuf == -1 || e.url_wbuf != -1) // not(post GET/POST request)
if(e.url_rbuf != -1 || e.url_wbuf == -1) // not(pre POST request)
if(i >= autocvar__urllib_nextslot)
{
print("url_fclose: too many concurrent requests\n");
- rdy(e, pass, URL_READY_ERROR);
+ e.url_ready(e,e.url_ready_pass, URL_READY_ERROR);
buf_del(e.url_wbuf);
strunzone(e.url_url);
remove(e);
if(!crypto_uri_postbuf(e.url_url, i + MIN_URL_ID, "text/plain", "", e.url_wbuf, 0))
{
print("url_fclose: failure in crypto_uri_postbuf\n");
- rdy(e, pass, URL_READY_ERROR);
+ e.url_ready(e, e.url_ready_pass, URL_READY_ERROR);
buf_del(e.url_wbuf);
strunzone(e.url_url);
remove(e);
// call the callback
buf_del(e.url_wbuf);
e.url_wbuf = -1;
- e.url_ready = rdy;
- e.url_ready_pass = pass;
e.url_id = i;
url_fromid[i] = e;
else
{
// we have READ all data, just close
- rdy(e, pass, URL_READY_CLOSED);
+ e.url_ready(e, e.url_ready_pass, URL_READY_CLOSED);
buf_del(e.url_rbuf);
strunzone(e.url_url);
remove(e);
}
}
+ else if(e.url_fh == URL_FH_STDOUT)
+ {
+ e.url_ready(e, e.url_ready_pass, URL_READY_CLOSED); // closing creates no reading handle
+ remove(e);
+ }
else
{
// file
fclose(e.url_fh);
- rdy(e, pass, URL_READY_CLOSED); // closing creates no reading handle
+ e.url_ready(e, e.url_ready_pass, URL_READY_CLOSED); // closing creates no reading handle
remove(e);
}
}
// with \n (blame FRIK_FILE)
string url_fgets(entity e)
{
- if(e.url_fh < 0)
+ if(e.url_fh == URL_FH_CURL)
{
if(e.url_rbuf == -1)
error("url_fgets: not readable in current state");
e.url_rbufpos += 1;
return s;
}
+ else if(e.url_fh == URL_FH_STDOUT)
+ {
+ // stdout
+ return string_null;
+ }
else
{
// file
// without \n (blame FRIK_FILE)
void url_fputs(entity e, string s)
{
- if(e.url_fh < 0)
+ if(e.url_fh == URL_FH_CURL)
{
if(e.url_wbuf == -1)
error("url_fputs: not writable in current state");
bufstr_set(e.url_wbuf, e.url_wbufpos, s);
e.url_wbufpos += 1;
}
+ else if(e.url_fh == URL_FH_STDOUT)
+ {
+ // stdout
+ print(s);
+ }
else
{
// file
fputs(e.url_fh, s);
}
}
+
+// multi URL object, tries URLs separated by space in sequence
+void url_multi_ready(entity fh, entity me, float status)
+{
+ float n;
+ if(status == URL_READY_ERROR || status < 0)
+ {
+ if(status == -422) // Unprocessable Entity
+ {
+ print("uri_multi_ready: got HTTP error 422, data is in unusable format - not continuing\n");
+ me.url_ready(fh, me.url_ready_pass, status);
+ strunzone(me.url_url);
+ remove(me);
+ return;
+ }
+ me.url_attempt += 1;
+ n = tokenize_console(me.url_url);
+ if(n <= me.url_attempt)
+ {
+ me.url_ready(fh, me.url_ready_pass, status);
+ strunzone(me.url_url);
+ remove(me);
+ return;
+ }
+ url_single_fopen(argv(me.url_attempt), me.url_mode, url_multi_ready, me);
+ return;
+ }
+ me.url_ready(fh, me.url_ready_pass, status);
+}
+void url_multi_fopen(string url, float mode, url_ready_func rdy, entity pass)
+{
+ float n;
+ n = tokenize_console(url);
+ if(n <= 0)
+ {
+ print("url_multi_fopen: need at least one URL\n");
+ rdy(world, pass, URL_READY_ERROR);
+ return;
+ }
+
+ entity me;
+ me = spawn();
+ me.classname = "url_multi";
+ me.url_url = strzone(url);
+ me.url_attempt = 0;
+ me.url_mode = mode;
+ me.url_ready = rdy;
+ me.url_ready_pass = pass;
+ url_single_fopen(argv(0), mode, url_multi_ready, me);
+}