summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am14
-rw-r--r--src/rexmpp.c1580
-rw-r--r--src/rexmpp.h286
-rw-r--r--src/rexmpp_tcp.c358
-rw-r--r--src/rexmpp_tcp.h177
5 files changed, 2415 insertions, 0 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..6883960
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,14 @@
+AM_CFLAGS = -Werror -Wall -Wextra -Wno-pointer-sign -Wno-unused-parameter
+
+# -Wno-pointer-sign is used to suppress libxml2-related warnings.
+# Since we only care about UTF-8, and in almost all cases just its
+# ASCII subset (comparing or setting fixed namespaces, element names,
+# etc), it shouldn't matter. Later it would be nice to abstract XML
+# manipulations anyway, to allow libexpat as an alternative.
+
+lib_LTLIBRARIES = librexmpp.la
+
+librexmpp_la_SOURCES = rexmpp_tcp.h rexmpp_tcp.c rexmpp.h rexmpp.c
+include_HEADERS = rexmpp_tcp.h rexmpp.h
+librexmpp_la_CFLAGS = $(AM_CFLAGS) $(LIBXML_CFLAGS) $(GNUTLS_CFLAGS) $(GSASL_CFLAGS) $(CARES_CFLAGS)
+librexmpp_la_LIBADD = $(LIBXML_LIBS) $(GNUTLS_LIBS) $(GSASL_LIBS) $(CARES_LIBS)
diff --git a/src/rexmpp.c b/src/rexmpp.c
new file mode 100644
index 0000000..04ec798
--- /dev/null
+++ b/src/rexmpp.c
@@ -0,0 +1,1580 @@
+/**
+ @file rexmpp.c
+ @brief rexmpp, a reusable XMPP IM client library.
+ @author defanor <defanor@uberspace.net>
+ @date 2020
+ @copyright MIT license.
+*/
+
+#include <string.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <syslog.h>
+#include <arpa/nameser.h>
+
+#include <ares.h>
+#include <libxml/tree.h>
+#include <libxml/xmlsave.h>
+#include <gnutls/gnutls.h>
+#include <gnutls/x509.h>
+#include <gsasl.h>
+
+#include "rexmpp.h"
+#include "rexmpp_tcp.h"
+
+
+void rexmpp_sax_start_elem_ns (rexmpp_t *s,
+ const char *localname,
+ const char *prefix,
+ const char *URI,
+ int nb_namespaces,
+ const char **namespaces,
+ int nb_attributes,
+ int nb_defaulted,
+ const char **attributes);
+
+void rexmpp_sax_end_elem_ns(rexmpp_t *s,
+ const char *localname,
+ const char *prefix,
+ const char *URI);
+
+void rexmpp_sax_characters (rexmpp_t *s, const char * ch, int len);
+
+void rexmpp_log (rexmpp_t *s, int priority, const char *format, ...)
+{
+ va_list args;
+ if (s->log_function != NULL) {
+ va_start(args, format);
+ s->log_function (s, priority, format, args);
+ va_end(args);
+ }
+}
+
+rexmpp_err_t rexmpp_init (rexmpp_t *s,
+ const char *jid,
+ log_function_t log_function,
+ sasl_property_cb_t sasl_property_cb,
+ xml_in_cb_t xml_in_cb,
+ xml_out_cb_t xml_out_cb)
+{
+ int err;
+ xmlSAXHandler sax = {
+ .initialized = XML_SAX2_MAGIC,
+ .characters = (charactersSAXFunc)rexmpp_sax_characters,
+ .startElementNs = (startElementNsSAX2Func)rexmpp_sax_start_elem_ns,
+ .endElementNs = (endElementNsSAX2Func)rexmpp_sax_end_elem_ns,
+ };
+
+ s->tcp_state = REXMPP_TCP_NONE;
+ s->resolver_state = REXMPP_RESOLVER_NONE;
+ s->stream_state = REXMPP_STREAM_NONE;
+ s->tls_state = REXMPP_TLS_INACTIVE;
+ s->sasl_state = REXMPP_SASL_INACTIVE;
+ s->sm_state = REXMPP_SM_INACTIVE;
+ s->carbons_state = REXMPP_CARBONS_INACTIVE;
+ s->send_buffer = NULL;
+ s->send_queue = NULL;
+ s->server_srv = NULL;
+ s->server_srv_cur = NULL;
+ s->server_srv_tls = NULL;
+ s->server_srv_tls_cur = NULL;
+ s->server_socket = -1;
+ s->current_element_root = NULL;
+ s->current_element = NULL;
+ s->stream_features = NULL;
+ s->stanza_queue = NULL;
+ s->stream_id = NULL;
+ s->active_iq = NULL;
+ s->tls_session_data = NULL;
+ s->tls_session_data_size = 0;
+ s->id_counter = 0;
+ s->reconnect_number = 0;
+ s->next_reconnect_time.tv_sec = 0;
+ s->next_reconnect_time.tv_usec = 0;
+ s->initial_jid = jid;
+ s->assigned_jid = NULL;
+ s->stanza_queue_size = 1024;
+ s->send_queue_size = 1024;
+ s->iq_queue_size = 1024;
+ s->log_function = log_function;
+ s->sasl_property_cb = sasl_property_cb;
+ s->xml_in_cb = xml_in_cb;
+ s->xml_out_cb = xml_out_cb;
+
+ s->xml_parser = xmlCreatePushParserCtxt(&sax, s, "", 0, NULL);
+
+ if (s->xml_parser == NULL) {
+ rexmpp_log(s, LOG_CRIT, "Failed to create an XML parser context.");
+ return REXMPP_E_XML;
+ }
+
+ err = ares_library_init(ARES_LIB_INIT_ALL);
+ if (err != 0) {
+ rexmpp_log(s, LOG_CRIT, "ares library initialisation error: %s",
+ ares_strerror(err));
+ xmlFreeParserCtxt(s->xml_parser);
+ return REXMPP_E_DNS;
+ }
+
+ err = ares_init(&(s->resolver_channel));
+ if (err) {
+ rexmpp_log(s, LOG_CRIT, "ares channel initialisation error: %s",
+ ares_strerror(err));
+ ares_library_cleanup();
+ xmlFreeParserCtxt(s->xml_parser);
+ return REXMPP_E_DNS;
+ }
+
+ err = gnutls_certificate_allocate_credentials(&(s->gnutls_cred));
+ if (err) {
+ rexmpp_log(s, LOG_CRIT, "gnutls credentials allocation error: %s",
+ gnutls_strerror(err));
+ ares_destroy(s->resolver_channel);
+ ares_library_cleanup();
+ xmlFreeParserCtxt(s->xml_parser);
+ return REXMPP_E_TLS;
+ }
+
+ err = gsasl_init(&(s->sasl_ctx));
+ if (err) {
+ rexmpp_log(s, LOG_CRIT, "gsasl initialisation error: %s",
+ gsasl_strerror(err));
+ gnutls_certificate_free_credentials(s->gnutls_cred);
+ ares_destroy(s->resolver_channel);
+ ares_library_cleanup();
+ xmlFreeParserCtxt(s->xml_parser);
+ return REXMPP_E_SASL;
+ }
+ gsasl_callback_set(s->sasl_ctx, s->sasl_property_cb);
+
+ return REXMPP_SUCCESS;
+}
+
+/* Prepares for a reconnect: cleans up some things (e.g., SASL and TLS
+ structures), but keeps others (e.g., stanza queue and stream ID,
+ since we may resume the stream afterwards). */
+void rexmpp_cleanup (rexmpp_t *s) {
+ if (s->tls_state != REXMPP_TLS_INACTIVE) {
+ gnutls_deinit(s->gnutls_session);
+ s->tls_state = REXMPP_TLS_INACTIVE;
+ }
+ if (s->sasl_state != REXMPP_SASL_INACTIVE) {
+ gsasl_finish(s->sasl_session);
+ s->sasl_session = NULL;
+ s->sasl_state = REXMPP_SASL_INACTIVE;
+ }
+ if (s->tcp_state == REXMPP_TCP_CONNECTING) {
+ int sock = rexmpp_tcp_conn_finish(&s->server_connection);
+ if (sock != -1) {
+ close(sock);
+ }
+ s->tcp_state = REXMPP_TCP_NONE;
+ }
+ if (s->server_socket != -1) {
+ close(s->server_socket);
+ s->server_socket = -1;
+ }
+ if (s->send_buffer != NULL) {
+ free(s->send_buffer);
+ s->send_buffer = NULL;
+ }
+ if (s->stream_features != NULL) {
+ xmlFreeNode(s->stream_features);
+ s->stream_features = NULL;
+ }
+ while (s->send_queue != NULL) {
+ xmlNodePtr next = xmlNextElementSibling(s->send_queue);
+ xmlFreeNode(s->send_queue);
+ s->send_queue = next;
+ }
+ if (s->current_element_root != NULL) {
+ xmlFreeNode(s->current_element_root);
+ s->current_element_root = NULL;
+ s->current_element = NULL;
+ }
+ if (s->server_srv != NULL) {
+ ares_free_data(s->server_srv);
+ s->server_srv = NULL;
+ s->server_srv_cur = NULL;
+ }
+ if (s->server_srv_tls != NULL) {
+ ares_free_data(s->server_srv_tls);
+ s->server_srv_tls = NULL;
+ s->server_srv_tls_cur = NULL;
+ }
+ s->sm_state = REXMPP_SM_INACTIVE;
+ if (s->carbons_state != REXMPP_CARBONS_DISABLED) {
+ s->carbons_state = REXMPP_CARBONS_INACTIVE;
+ }
+}
+
+/* Frees the things that persist through reconnects. */
+void rexmpp_done (rexmpp_t *s) {
+ rexmpp_cleanup(s);
+ gsasl_done(s->sasl_ctx);
+ gnutls_certificate_free_credentials(s->gnutls_cred);
+ ares_destroy(s->resolver_channel);
+ ares_library_cleanup();
+ xmlFreeParserCtxt(s->xml_parser);
+ if (s->stream_id != NULL) {
+ free(s->stream_id);
+ s->stream_id = NULL;
+ }
+ while (s->stanza_queue != NULL) {
+ xmlNodePtr next = xmlNextElementSibling(s->stanza_queue);
+ xmlFreeNode(s->send_queue);
+ s->send_queue = next;
+ }
+ while (s->active_iq != NULL) {
+ rexmpp_iq_t *next = s->active_iq->next;
+ xmlFreeNode(s->active_iq->request);
+ free(s->active_iq);
+ s->active_iq = next;
+ }
+ if (s->tls_session_data != NULL) {
+ free(s->tls_session_data);
+ }
+}
+
+void rexmpp_schedule_reconnect(rexmpp_t *s) {
+ gettimeofday(&(s->next_reconnect_time), NULL);
+ if (s->reconnect_number < 12) {
+ s->next_reconnect_time.tv_sec += (2 << s->reconnect_number);
+ } else {
+ s->next_reconnect_time.tv_sec += 3600;
+ }
+ rexmpp_log(s, LOG_DEBUG, "Scheduled reconnect number %d, for %u.%u",
+ s->reconnect_number,
+ s->next_reconnect_time.tv_sec, s->next_reconnect_time.tv_usec);
+ s->reconnect_number++;
+}
+
+
+const char *jid_bare_to_host (const char *jid_bare) {
+ char *jid_host;
+ jid_host = strchr(jid_bare, '@');
+ if (jid_host != NULL) {
+ return jid_host + 1;
+ }
+ return NULL;
+}
+
+xmlNodePtr rexmpp_xml_add_id (rexmpp_t *s, xmlNodePtr node) {
+ char buf[11];
+ snprintf(buf, 11, "%u", s->id_counter);
+ s->id_counter++;
+ xmlNewProp(node, "id", buf);
+ return node;
+}
+
+unsigned int rexmpp_xml_siblings_count (xmlNodePtr node) {
+ unsigned int i;
+ for (i = 0; node != NULL; i++) {
+ node = xmlNextElementSibling(node);
+ }
+ return i;
+}
+
+int rexmpp_xml_match (xmlNodePtr node,
+ const char *namespace,
+ const char *name)
+{
+ if (node == NULL) {
+ return 0;
+ }
+ if (name != NULL) {
+ if (strcmp(name, node->name) != 0) {
+ return 0;
+ }
+ }
+ if (namespace != NULL) {
+ if (node->ns == NULL) {
+ if (strcmp(namespace, "jabber:client") != 0) {
+ return 0;
+ }
+ } else {
+ if (strcmp(namespace, node->ns->href) != 0) {
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+xmlNodePtr rexmpp_xml_find_child (xmlNodePtr node,
+ const char *namespace,
+ const char *name)
+{
+ if (node == NULL) {
+ return NULL;
+ }
+ xmlNodePtr child;
+ for (child = xmlFirstElementChild(node);
+ child != NULL;
+ child = xmlNextElementSibling(child))
+ {
+ if (rexmpp_xml_match(child, namespace, name)) {
+ return child;
+ }
+ }
+ return NULL;
+}
+
+xmlNodePtr rexmpp_xml_set_delay (rexmpp_t *s, xmlNodePtr node) {
+ if (rexmpp_xml_find_child (node, NULL, "delay")) {
+ return node;
+ }
+ char buf[42];
+ time_t t = time(NULL);
+ struct tm *local_time = localtime(&t);
+ strftime(buf, 42, "%FT%T%z", local_time);
+ xmlNodePtr delay = xmlNewChild(node, NULL, "delay", NULL);
+ xmlNewProp(delay, "stamp", buf);
+ if (s != NULL && s->assigned_jid != NULL) {
+ xmlNewProp(delay, "from", s->assigned_jid);
+ }
+ return node;
+}
+
+char *rexmpp_xml_serialize(xmlNodePtr node) {
+ xmlBufferPtr buf = xmlBufferCreate();
+ xmlSaveCtxtPtr ctx = xmlSaveToBuffer(buf, "utf-8", 0);
+ xmlSaveTree(ctx, node);
+ xmlSaveFlush(ctx);
+ unsigned char *out = xmlBufferDetach(buf);
+ xmlBufferFree(buf);
+ return out;
+}
+
+int rexmpp_xml_is_stanza (xmlNodePtr node) {
+ return rexmpp_xml_match(node, "jabber:client", "message") ||
+ rexmpp_xml_match(node, "jabber:client", "iq") ||
+ rexmpp_xml_match(node, "jabber:client", "presence");
+}
+
+
+rexmpp_err_t rexmpp_send_start (rexmpp_t *s, const void *data, size_t data_len)
+{
+ int sasl_err;
+ if (s->send_buffer != NULL) {
+ rexmpp_log(s, LOG_CRIT, "send buffer is not empty: %s", s->send_buffer);
+ return REXMPP_E_SEND_BUFFER_NOT_EMPTY;
+ }
+ if (s->sasl_state == REXMPP_SASL_ACTIVE) {
+ sasl_err = gsasl_encode (s->sasl_session, data, data_len,
+ &(s->send_buffer), &(s->send_buffer_len));
+ if (sasl_err != GSASL_OK) {
+ rexmpp_log(s, LOG_ERR, "SASL encoding error: %s",
+ gsasl_strerror(sasl_err));
+ s->sasl_state = REXMPP_SASL_ERROR;
+ return REXMPP_E_SASL;
+ }
+ } else {
+ s->send_buffer = malloc(data_len);
+ if (s->send_buffer == NULL) {
+ return REXMPP_E_MALLOC;
+ }
+ memcpy(s->send_buffer, data, data_len);
+ s->send_buffer_len = data_len;
+ }
+ s->send_buffer_sent = 0;
+ return REXMPP_E_AGAIN;
+}
+
+rexmpp_err_t rexmpp_send_continue (rexmpp_t *s)
+{
+ if (s->send_buffer == NULL) {
+ rexmpp_log(s, LOG_ERR, "nothing to send");
+ return REXMPP_E_SEND_BUFFER_EMPTY;
+ }
+ int ret;
+ while (1) {
+ if (s->tls_state == REXMPP_TLS_ACTIVE) {
+ ret = gnutls_record_send (s->gnutls_session,
+ s->send_buffer,
+ s->send_buffer_len);
+ } else {
+ ret = send (s->server_socket,
+ s->send_buffer + s->send_buffer_sent,
+ s->send_buffer_len - s->send_buffer_sent,
+ 0);
+ }
+ if (ret > 0) {
+ s->send_buffer_sent += ret;
+ if (s->send_buffer_sent == s->send_buffer_len) {
+ free(s->send_buffer);
+ s->send_buffer = NULL;
+ if (s->send_queue != NULL) {
+ xmlNodePtr node = s->send_queue;
+ unsigned char *buf = rexmpp_xml_serialize(node);
+ ret = rexmpp_send_start(s, buf, strlen(buf));
+ free(buf);
+ if (ret != REXMPP_E_AGAIN) {
+ return ret;
+ }
+ s->send_queue = xmlNextElementSibling(s->send_queue);
+ xmlFreeNode(node);
+ } else {
+ return REXMPP_SUCCESS;
+ }
+ }
+ } else {
+ if (s->tls_state == REXMPP_TLS_ACTIVE) {
+ if (ret != GNUTLS_E_AGAIN) {
+ s->tls_state = REXMPP_TLS_ERROR;
+ /* Assume a TCP error for now as well. */
+ s->tcp_state = REXMPP_TCP_ERROR;
+ rexmpp_log(s, LOG_ERR, "TLS send error: %s", gnutls_strerror(ret));
+ rexmpp_cleanup(s);
+ rexmpp_schedule_reconnect(s);
+ return REXMPP_E_TLS;
+ }
+ } else {
+ if (errno != EAGAIN) {
+ s->tcp_state = REXMPP_TCP_ERROR;
+ rexmpp_log(s, LOG_ERR, "TCP send error: %s", strerror(errno));
+ rexmpp_cleanup(s);
+ rexmpp_schedule_reconnect(s);
+ return REXMPP_E_TCP;
+ }
+ }
+ return REXMPP_E_AGAIN;
+ }
+ }
+}
+
+rexmpp_err_t rexmpp_send_raw (rexmpp_t *s, const void *data, size_t data_len)
+{
+ int ret = rexmpp_send_start(s, data, data_len);
+ if (ret != REXMPP_E_AGAIN) {
+ return ret;
+ }
+ return rexmpp_send_continue(s);
+}
+
+rexmpp_err_t rexmpp_sm_send_req (rexmpp_t *s);
+
+rexmpp_err_t rexmpp_send (rexmpp_t *s, xmlNodePtr node)
+{
+ int need_ack = 0;
+ int ret;
+
+ if (s->xml_out_cb != NULL && s->xml_out_cb(s, node) == 1) {
+ xmlFreeNode(node);
+ rexmpp_log(s, LOG_WARNING, "Message sending was cancelled by xml_out_cb.");
+ return REXMPP_E_CANCELLED;
+ }
+
+ if (rexmpp_xml_siblings_count(s->send_queue) >= s->send_queue_size) {
+ xmlFreeNode(node);
+ rexmpp_log(s, LOG_ERR, "The send queue is full, not sending.");
+ return REXMPP_E_SEND_QUEUE_FULL;
+ }
+
+ if (rexmpp_xml_is_stanza(node)) {
+ if (s->sm_state == REXMPP_SM_ACTIVE) {
+ if (s->stanzas_out_count - s->stanzas_out_acknowledged >=
+ s->stanza_queue_size) {
+ xmlFreeNode(node);
+ rexmpp_log(s, LOG_ERR, "The stanza queue is full, not sending.");
+ return REXMPP_E_STANZA_QUEUE_FULL;
+ }
+ need_ack = 1;
+ xmlNodePtr queued_stanza = rexmpp_xml_set_delay(s, xmlCopyNode(node, 1));
+ if (s->stanza_queue == NULL) {
+ s->stanza_queue = queued_stanza;
+ } else {
+ xmlNodePtr last = s->stanza_queue;
+ while (xmlNextElementSibling(last) != NULL) {
+ last = xmlNextElementSibling(last);
+ }
+ xmlAddNextSibling(last, queued_stanza);
+ }
+ }
+ if (s->sm_state != REXMPP_SM_INACTIVE) {
+ s->stanzas_out_count++;
+ }
+ }
+
+ if (s->send_buffer == NULL) {
+ unsigned char *buf = rexmpp_xml_serialize(node);
+ ret = rexmpp_send_raw(s, buf, strlen(buf));
+ free(buf);
+ xmlFreeNode(node);
+ if (ret != REXMPP_SUCCESS && ret != REXMPP_E_AGAIN) {
+ return ret;
+ }
+ } else {
+ if (s->send_queue == NULL) {
+ s->send_queue = node;
+ } else {
+ xmlNodePtr last = s->send_queue;
+ while (xmlNextElementSibling(last) != NULL) {
+ last = xmlNextElementSibling(last);
+ }
+ xmlAddNextSibling(last, node);
+ }
+ ret = REXMPP_E_AGAIN;
+ }
+ if (need_ack) {
+ return rexmpp_sm_send_req(s);
+ }
+ return ret;
+}
+
+
+void rexmpp_iq_new (rexmpp_t *s,
+ const char *type,
+ const char *to,
+ xmlNodePtr payload,
+ rexmpp_iq_callback_t cb)
+{
+ unsigned int i;
+ rexmpp_iq_t *prev = NULL, *last = s->active_iq;
+ for (i = 0; last != NULL && last->next != NULL; i++) {
+ prev = last;
+ last = last->next;
+ }
+ if (i >= s->iq_queue_size && s->iq_queue_size > 0) {
+ rexmpp_log(s, LOG_WARNING,
+ "The IQ queue limit is reached, giving up on the oldest IQ.");
+ prev->next = NULL;
+ if (last->cb != NULL) {
+ last->cb(s, last->request, NULL);
+ }
+ xmlFreeNode(last->request);
+ free(last);
+ }
+
+ xmlNodePtr iq_stanza = rexmpp_xml_add_id(s, xmlNewNode(NULL, "iq"));
+ xmlNewNs(iq_stanza, "jabber:client", NULL);
+ xmlNewProp(iq_stanza, "type", type);
+ if (to != NULL) {
+ xmlNewProp(iq_stanza, "to", to);
+ }
+ if (s->assigned_jid != NULL) {
+ xmlNewProp(iq_stanza, "from", s->assigned_jid);
+ }
+ xmlAddChild(iq_stanza, payload);
+ rexmpp_iq_t *iq = malloc(sizeof(rexmpp_iq_t));
+ iq->request = xmlCopyNode(iq_stanza, 1);
+ iq->cb = cb;
+ iq->next = s->active_iq;
+ s->active_iq = iq;
+ rexmpp_send(s, iq_stanza);
+}
+
+rexmpp_err_t rexmpp_sm_ack (rexmpp_t *s) {
+ char buf[11];
+ xmlNodePtr ack = xmlNewNode(NULL, "a");
+ xmlNewNs(ack, "urn:xmpp:sm:3", NULL);
+ snprintf(buf, 11, "%u", s->stanzas_in_count);
+ xmlNewProp(ack, "h", buf);
+ return rexmpp_send(s, ack);
+}
+
+rexmpp_err_t rexmpp_sm_send_req (rexmpp_t *s) {
+ xmlNodePtr ack = xmlNewNode(NULL, "r");
+ xmlNewNs(ack, "urn:xmpp:sm:3", NULL);
+ return rexmpp_send(s, ack);
+}
+
+void rexmpp_recv (rexmpp_t *s) {
+ char chunk_raw[4096], *chunk;
+ ssize_t chunk_raw_len, chunk_len;
+ int sasl_err;
+ /* Loop here in order to consume data from TLS buffers, which
+ wouldn't show up on select(). */
+ do {
+ if (s->tls_state == REXMPP_TLS_ACTIVE) {
+ chunk_raw_len = gnutls_record_recv(s->gnutls_session, chunk_raw, 4096);
+ } else {
+ chunk_raw_len = recv(s->server_socket, chunk_raw, 4096, 0);
+ }
+ if (chunk_raw_len > 0) {
+ if (s->sasl_state == REXMPP_SASL_ACTIVE) {
+ sasl_err = gsasl_decode(s->sasl_session, chunk_raw, chunk_raw_len,
+ &chunk, &chunk_len);
+ if (sasl_err != GSASL_OK) {
+ rexmpp_log(s, LOG_ERR, "SASL decoding error: %s",
+ gsasl_strerror(sasl_err));
+ s->sasl_state = REXMPP_SASL_ERROR;
+ return;
+ }
+ } else {
+ chunk = chunk_raw;
+ chunk_len = chunk_raw_len;
+ }
+ xmlParseChunk(s->xml_parser, chunk, chunk_len, 0);
+ } else if (chunk_raw_len == 0) {
+ if (s->tls_state == REXMPP_TLS_ACTIVE) {
+ s->tls_state = REXMPP_TLS_CLOSED;
+ rexmpp_log(s, LOG_INFO, "TLS disconnected");
+ }
+ s->tcp_state = REXMPP_TCP_CLOSED;
+ rexmpp_log(s, LOG_INFO, "TCP disconnected");
+ rexmpp_cleanup(s);
+ if (s->stream_state == REXMPP_STREAM_READY) {
+ rexmpp_schedule_reconnect(s);
+ }
+ } else {
+ if (s->tls_state == REXMPP_TLS_ACTIVE) {
+ if (chunk_raw_len != GNUTLS_E_AGAIN) {
+ s->tls_state = REXMPP_TLS_ERROR;
+ /* Assume a TCP error for now as well. */
+ s->tcp_state = REXMPP_TCP_ERROR;
+ rexmpp_log(s, LOG_ERR, "TLS recv error: %s",
+ gnutls_strerror(chunk_raw_len));
+ rexmpp_cleanup(s);
+ rexmpp_schedule_reconnect(s);
+ }
+ } else if (errno != EAGAIN) {
+ s->tcp_state = REXMPP_TCP_ERROR;
+ rexmpp_log(s, LOG_ERR, "TCP recv error: %s", strerror(errno));
+ rexmpp_cleanup(s);
+ rexmpp_schedule_reconnect(s);
+ }
+ }
+ } while (chunk_raw_len > 0);
+}
+
+int rexmpp_stream_open (rexmpp_t *s) {
+ char buf[2048];
+ snprintf(buf, 2048,
+ "<?xml version='1.0'?>\n"
+ "<stream:stream to='%s' version='1.0' "
+ "xml:lang='en' xmlns='jabber:client' "
+ "xmlns:stream='http://etherx.jabber.org/streams'>",
+ jid_bare_to_host(s->initial_jid));
+ s->stream_state = REXMPP_STREAM_OPENING;
+ rexmpp_send_raw(s, buf, strlen(buf));
+
+ return 0;
+}
+
+void rexmpp_process_conn_err (rexmpp_t *s, int err);
+
+void rexmpp_try_next_host (rexmpp_t *s) {
+ const char *host;
+ int port;
+ /* todo: check priorities and weights */
+ s->tls_state = REXMPP_TLS_INACTIVE;
+ if (s->server_srv_tls != NULL && s->server_srv_tls_cur == NULL) {
+ /* We have xmpps-client records available, but haven't tried any
+ of them yet. */
+ s->server_srv_tls_cur = s->server_srv_tls;
+ host = s->server_srv_tls_cur->host;
+ port = s->server_srv_tls_cur->port;
+ s->tls_state = REXMPP_TLS_AWAITING_DIRECT;
+ } else if (s->server_srv_tls_cur != NULL &&
+ s->server_srv_tls_cur->next != NULL) {
+ /* We have tried some xmpps-client records, but there is more. */
+ s->server_srv_tls_cur = s->server_srv_tls_cur->next;
+ host = s->server_srv_tls_cur->host;
+ port = s->server_srv_tls_cur->port;
+ s->tls_state = REXMPP_TLS_AWAITING_DIRECT;
+ } else if (s->server_srv != NULL && s->server_srv_cur == NULL) {
+ /* Starting with xmpp-client records. */
+ s->server_srv_cur = s->server_srv;
+ host = s->server_srv_cur->host;
+ port = s->server_srv_cur->port;
+ } else if (s->server_srv_tls_cur != NULL &&
+ s->server_srv_tls_cur->next != NULL) {
+ /* Advancing in xmpp-client records. */
+ s->server_srv_cur = s->server_srv_cur->next;
+ host = s->server_srv_cur->host;
+ port = s->server_srv_cur->port;
+ } else {
+ /* No candidate records left to try. Schedule a reconnect. */
+ rexmpp_cleanup(s);
+ rexmpp_schedule_reconnect(s);
+ return;
+ }
+ rexmpp_log(s, LOG_DEBUG, "Connecting to %s:%d", host, port);
+ rexmpp_process_conn_err(s,
+ rexmpp_tcp_conn_init(&s->server_connection,
+ host, port));
+}
+
+void rexmpp_tls_handshake (rexmpp_t *s) {
+ s->tls_state = REXMPP_TLS_HANDSHAKE;
+ int ret = gnutls_handshake(s->gnutls_session);
+ if (ret == GNUTLS_E_AGAIN) {
+ rexmpp_log(s, LOG_DEBUG, "Waiting for TLS handshake to complete");
+ } else if (ret == 0) {
+ rexmpp_log(s, LOG_DEBUG, "TLS ready");
+ s->tls_state = REXMPP_TLS_ACTIVE;
+
+ if (gnutls_session_is_resumed(s->gnutls_session)) {
+ rexmpp_log(s, LOG_INFO, "TLS session is resumed");
+ } else {
+ if (s->tls_session_data != NULL) {
+ rexmpp_log(s, LOG_DEBUG, "TLS session is not resumed");
+ free(s->tls_session_data);
+ s->tls_session_data = NULL;
+ }
+ gnutls_session_get_data(s->gnutls_session, NULL,
+ &s->tls_session_data_size);
+ s->tls_session_data = malloc(s->tls_session_data_size);
+ ret = gnutls_session_get_data(s->gnutls_session, s->tls_session_data,
+ &s->tls_session_data_size);
+ if (ret != GNUTLS_E_SUCCESS) {
+ rexmpp_log(s, LOG_ERR, "Failed to get TLS session data: %s",
+ gnutls_strerror(ret));
+ }
+ }
+
+ if (s->stream_state == REXMPP_STREAM_NONE) {
+ /* It's a direct TLS connection, so open a stream after
+ connecting. */
+ rexmpp_stream_open(s);
+ } else {
+ /* A STARTTLS connection, restart the stream. */
+ s->stream_state = REXMPP_STREAM_RESTART;
+ }
+
+ } else {
+ rexmpp_log(s, LOG_ERR, "Unexpected TLS handshake error: %s",
+ gnutls_strerror(ret));
+ if (s->stream_state == REXMPP_STREAM_NONE) {
+ /* It was a direct TLS connection attempt: cleanup the session,
+ continue connection attempts. */
+ gnutls_deinit(s->gnutls_session);
+ s->tls_state = REXMPP_TLS_INACTIVE;
+ rexmpp_try_next_host(s);
+ } else {
+ rexmpp_cleanup(s);
+ }
+ }
+}
+
+void rexmpp_tls_start (rexmpp_t *s) {
+ gnutls_datum_t xmpp_client_protocol = {"xmpp-client", strlen("xmpp-client")};
+ rexmpp_log(s, LOG_DEBUG, "starting TLS");
+ gnutls_init(&s->gnutls_session, GNUTLS_CLIENT);
+ gnutls_session_set_ptr(s->gnutls_session, s);
+ gnutls_alpn_set_protocols(s->gnutls_session, &xmpp_client_protocol, 1, 0);
+ gnutls_server_name_set(s->gnutls_session, GNUTLS_NAME_DNS,
+ jid_bare_to_host(s->initial_jid),
+ strlen(jid_bare_to_host(s->initial_jid)));
+ gnutls_set_default_priority(s->gnutls_session);
+ gnutls_credentials_set(s->gnutls_session, GNUTLS_CRD_CERTIFICATE,
+ s->gnutls_cred);
+ gnutls_transport_set_int(s->gnutls_session, s->server_socket);
+ gnutls_handshake_set_timeout(s->gnutls_session,
+ GNUTLS_DEFAULT_HANDSHAKE_TIMEOUT);
+ if (s->tls_session_data != NULL) {
+ int ret = gnutls_session_set_data(s->gnutls_session,
+ s->tls_session_data,
+ s->tls_session_data_size);
+ if (ret != GNUTLS_E_SUCCESS) {
+ rexmpp_log(s, LOG_ERR, "Failed to set TLS session data: %s",
+ gnutls_strerror(ret));
+ }
+ }
+ s->tls_state = REXMPP_TLS_HANDSHAKE;
+ rexmpp_tls_handshake(s);
+}
+
+
+void rexmpp_process_conn_err (rexmpp_t *s, int err) {
+ s->tcp_state = REXMPP_TCP_CONNECTING;
+ if (err == REXMPP_CONN_DONE) {
+ rexmpp_log(s, LOG_INFO, "Established a TCP connection");
+ s->reconnect_number = 0;
+ xmlCtxtResetPush(s->xml_parser, "", 0, "", "utf-8");
+ s->server_socket = rexmpp_tcp_conn_finish(&s->server_connection);
+ s->tcp_state = REXMPP_TCP_CONNECTED;
+ if (s->tls_state == REXMPP_TLS_AWAITING_DIRECT) {
+ rexmpp_tls_start(s);
+ } else {
+ rexmpp_stream_open(s);
+ }
+ } else if (err != REXMPP_CONN_IN_PROGRESS) {
+ rexmpp_log(s, LOG_WARNING, "Failed to connect");
+ if (err == REXMPP_CONN_ERROR) {
+ s->tcp_state = REXMPP_TCP_NONE;
+ } else {
+ s->tcp_state = REXMPP_TCP_CONNECTION_FAILURE;
+ }
+ rexmpp_tcp_conn_finish(&s->server_connection);
+ rexmpp_try_next_host(s);
+ }
+}
+
+void rexmpp_after_srv (rexmpp_t *s) {
+ if (s->resolver_state == REXMPP_RESOLVER_SRV) {
+ s->resolver_state = REXMPP_RESOLVER_SRV_2;
+ } else if (s->resolver_state == REXMPP_RESOLVER_SRV_2) {
+ s->resolver_state = REXMPP_RESOLVER_READY;
+ }
+ if (s->resolver_state != REXMPP_RESOLVER_READY) {
+ return;
+ }
+
+ /* todo: sort the records */
+
+ if (s->server_srv == NULL && s->server_srv_tls == NULL) {
+ /* Failed to resolve anything: a fallback. */
+ const char *host = jid_bare_to_host(s->initial_jid);
+ int port = 5222;
+ rexmpp_log(s, LOG_DEBUG, "Connecting to %s:%d", host, port);
+ rexmpp_process_conn_err(s, rexmpp_tcp_conn_init(&s->server_connection,
+ host, port));
+ } else {
+ rexmpp_try_next_host(s);
+ }
+}
+
+void rexmpp_srv_tls_cb (void *s_ptr,
+ int status,
+ int timeouts,
+ unsigned char *abuf,
+ int alen)
+{
+ rexmpp_t *s = s_ptr;
+ if (status == ARES_SUCCESS) {
+ ares_parse_srv_reply(abuf, alen, &(s->server_srv_tls));
+ } else {
+ rexmpp_log(s, LOG_WARNING, "Failed to query an xmpps-client SRV record: %s",
+ ares_strerror(status));
+ }
+ if (status != ARES_EDESTRUCTION) {
+ rexmpp_after_srv(s);
+ }
+}
+
+void rexmpp_srv_cb (void *s_ptr,
+ int status,
+ int timeouts,
+ unsigned char *abuf,
+ int alen)
+{
+ rexmpp_t *s = s_ptr;
+ if (status == ARES_SUCCESS) {
+ ares_parse_srv_reply(abuf, alen, &(s->server_srv));
+ } else {
+ rexmpp_log(s, LOG_WARNING, "Failed to query an xmpp-client SRV record: %s",
+ ares_strerror(status));
+ }
+ if (status != ARES_EDESTRUCTION) {
+ rexmpp_after_srv(s);
+ }
+}
+
+
+/* Should be called after reconnect, and after rexmpp_sm_handle_ack in
+ case of resumption. */
+rexmpp_err_t rexmpp_resend_stanzas (rexmpp_t *s) {
+ uint32_t i, count;
+ rexmpp_err_t ret = REXMPP_SUCCESS;
+ xmlNodePtr sq;
+ count = s->stanzas_out_count - s->stanzas_out_acknowledged;
+ for (i = 0; i < count && s->stanza_queue != NULL; i++) {
+ sq = xmlNextElementSibling(s->stanza_queue);
+ ret = rexmpp_send(s, s->stanza_queue);
+ if (ret != REXMPP_SUCCESS && ret != REXMPP_E_AGAIN) {
+ return ret;
+ }
+ s->stanza_queue = sq;
+ }
+ if (i != count) {
+ rexmpp_log(s, LOG_ERR,
+ "not enough stanzas in the queue: needed %u, had %u",
+ count, i);
+ }
+ /* Don't count these stanzas twice. */
+ s->stanzas_out_count -= i;
+ return ret;
+}
+
+void rexmpp_sm_handle_ack (rexmpp_t *s, xmlNodePtr elem) {
+ char *h = xmlGetProp(elem, "h");
+ if (h != NULL) {
+ uint32_t prev_ack = s->stanzas_out_acknowledged;
+ s->stanzas_out_acknowledged = strtoul(h, NULL, 10);
+ xmlFree(h);
+ rexmpp_log(s, LOG_DEBUG,
+ "server acknowledged %u out of %u sent stanzas",
+ s->stanzas_out_acknowledged,
+ s->stanzas_out_count);
+ if (s->stanzas_out_count >= s->stanzas_out_acknowledged) {
+ if (prev_ack <= s->stanzas_out_acknowledged) {
+ uint32_t i;
+ for (i = prev_ack; i < s->stanzas_out_acknowledged; i++) {
+ xmlNodePtr sq = xmlNextElementSibling(s->stanza_queue);
+ xmlFreeNode(s->stanza_queue);
+ s->stanza_queue = sq;
+ }
+ } else {
+ rexmpp_log(s, LOG_ERR,
+ "the server acknowledged %u stanzas previously, and %u now",
+ prev_ack, s->stanzas_out_acknowledged);
+ }
+ } else {
+ rexmpp_log(s, LOG_ERR,
+ "the server acknowledged more stanzas than we have sent");
+ }
+ } else {
+ rexmpp_log(s, LOG_ERR, "no 'h' attribute in <a>");
+ }
+}
+
+void rexmpp_carbons_enabled (rexmpp_t *s, xmlNodePtr req, xmlNodePtr response) {
+ char *type = xmlGetProp(response, "type");
+ if (strcmp(type, "result") == 0) {
+ rexmpp_log(s, LOG_INFO, "carbons enabled");
+ s->carbons_state = REXMPP_CARBONS_ACTIVE;
+ } else {
+ rexmpp_log(s, LOG_WARNING, "failed to enable carbons");
+ s->carbons_state = REXMPP_CARBONS_INACTIVE;
+ }
+ free(type);
+}
+
+void rexmpp_discovery_info (rexmpp_t *s, xmlNodePtr req, xmlNodePtr response) {
+ xmlNodePtr query = xmlFirstElementChild(response);
+ if (rexmpp_xml_match(query, "http://jabber.org/protocol/disco#info",
+ "query")) {
+ xmlNodePtr child;
+ for (child = xmlFirstElementChild(query);
+ child != NULL;
+ child = xmlNextElementSibling(child))
+ {
+ if (rexmpp_xml_match(child, "http://jabber.org/protocol/disco#info",
+ "feature")) {
+ char *var = xmlGetProp(child, "var");
+ if (s->carbons_state != REXMPP_CARBONS_DISABLED &&
+ strcmp(var, "urn:xmpp:carbons:2") == 0) {
+ xmlNodePtr carbons_enable = xmlNewNode(NULL, "enable");
+ xmlNewNs(carbons_enable, "urn:xmpp:carbons:2", NULL);
+ s->carbons_state = REXMPP_CARBONS_NEGOTIATION;
+ rexmpp_iq_new(s, "set", NULL, carbons_enable,
+ rexmpp_carbons_enabled);
+ }
+ free(var);
+ }
+ }
+ }
+}
+
+void rexmpp_stream_is_ready(rexmpp_t *s) {
+ s->stream_state = REXMPP_STREAM_READY;
+ rexmpp_resend_stanzas(s);
+ rexmpp_send(s, xmlNewNode(NULL, "presence"));
+ xmlNodePtr disco_query = xmlNewNode(NULL, "query");
+ xmlNewNs(disco_query, "http://jabber.org/protocol/disco#info", NULL);
+ rexmpp_iq_new(s, "get", jid_bare_to_host(s->initial_jid),
+ disco_query, rexmpp_discovery_info);
+}
+
+/* Resource binding,
+ https://tools.ietf.org/html/rfc6120#section-7 */
+void rexmpp_bound (rexmpp_t *s, xmlNodePtr req, xmlNodePtr response) {
+ /* todo: handle errors */
+ xmlNodePtr child = xmlFirstElementChild(response);
+ if (rexmpp_xml_match(child, "urn:ietf:params:xml:ns:xmpp-bind", "bind")) {
+ xmlNodePtr jid = xmlFirstElementChild(child);
+ if (rexmpp_xml_match(jid, "urn:ietf:params:xml:ns:xmpp-bind", "jid")) {
+ rexmpp_log(s, LOG_INFO, "jid: %s", xmlNodeGetContent(jid));
+ s->assigned_jid = malloc(strlen(xmlNodeGetContent(jid)) + 1);
+ strcpy(s->assigned_jid, xmlNodeGetContent(jid));
+ }
+ if (s->stream_id == NULL &&
+ (child = rexmpp_xml_find_child(s->stream_features, "urn:xmpp:sm:3",
+ "sm"))) {
+ /* Try to resume a stream. */
+ s->sm_state = REXMPP_SM_NEGOTIATION;
+ s->stream_state = REXMPP_STREAM_SM_FULL;
+ xmlNodePtr sm_enable = xmlNewNode(NULL, "enable");
+ xmlNewNs(sm_enable, "urn:xmpp:sm:3", NULL);
+ xmlNewProp(sm_enable, "resume", "true");
+ rexmpp_send(s, sm_enable);
+ s->stanzas_out_count = 0;
+ s->stanzas_out_acknowledged = 0;
+ s->stanzas_in_count = 0;
+ } else {
+ s->sm_state = REXMPP_SM_INACTIVE;
+ rexmpp_stream_is_ready(s);
+ }
+ }
+}
+
+void rexmpp_stream_bind (rexmpp_t *s) {
+ /* Issue a bind request. */
+ s->stream_state = REXMPP_STREAM_BIND;
+ xmlNodePtr bind_cmd = xmlNewNode(NULL, "bind");
+ xmlNewNs(bind_cmd, "urn:ietf:params:xml:ns:xmpp-bind", NULL);
+ rexmpp_iq_new(s, "set", NULL, bind_cmd, rexmpp_bound);
+}
+
+void rexmpp_process_element(rexmpp_t *s) {
+ xmlNodePtr elem = s->current_element;
+
+ /* IQ responses */
+ if (rexmpp_xml_match(elem, "jabber:client", "iq")) {
+ char *type = xmlGetProp(elem, "type");
+ if (strcmp(type, "result") == 0 || strcmp(type, "error") == 0) {
+ char *id = xmlGetProp(elem, "id");
+ rexmpp_iq_t *req = s->active_iq;
+ int found = 0;
+ while (req != NULL && found == 0) {
+ char *req_id = xmlGetProp(req->request, "id");
+ if (strcmp(id, req_id) == 0) {
+ found = 1;
+ if (req->cb != NULL) {
+ req->cb(s, req->request, elem);
+ }
+ /* Remove the callback from the list, but keep in mind that
+ it could have added more entries. */
+ if (s->active_iq == req) {
+ s->active_iq = req->next;
+ } else {
+ rexmpp_iq_t *prev_req = s->active_iq;
+ for (prev_req = s->active_iq;
+ prev_req != NULL;
+ prev_req = prev_req->next)
+ {
+ if (prev_req->next == req) {
+ prev_req->next = req->next;
+ break;
+ }
+ }
+ }
+ xmlFreeNode(req->request);
+ free(req);
+ }
+ free(req_id);
+ req = req->next;
+ }
+ free(id);
+ }
+ free(type);
+ }
+
+ /* Stream negotiation,
+ https://tools.ietf.org/html/rfc6120#section-4.3 */
+ if (s->stream_state == REXMPP_STREAM_NEGOTIATION &&
+ rexmpp_xml_match(elem, "http://etherx.jabber.org/streams", "features")) {
+
+ /* Remember features. */
+ if (s->stream_features != NULL) {
+ xmlFreeNode(s->stream_features);
+ }
+ s->stream_features = xmlCopyNode(elem, 1);
+
+ /* Nothing to negotiate. */
+ if (xmlFirstElementChild(elem) == NULL) {
+ rexmpp_stream_is_ready(s);
+ return;
+ }
+
+ /* TODO: check for required features properly here. Currently
+ assuming that STARTTLS, SASL, and BIND (with an exception for
+ SM) are always required if they are present. */
+ xmlNodePtr child =
+ rexmpp_xml_find_child(elem, "urn:ietf:params:xml:ns:xmpp-tls",
+ "starttls");
+ if (child != NULL) {
+ s->stream_state = REXMPP_STREAM_STARTTLS;
+ xmlNodePtr starttls_cmd = xmlNewNode(NULL, "starttls");
+ xmlNewNs(starttls_cmd, "urn:ietf:params:xml:ns:xmpp-tls", NULL);
+ rexmpp_send(s, starttls_cmd);
+ return;
+ }
+
+ child = rexmpp_xml_find_child(elem, "urn:ietf:params:xml:ns:xmpp-sasl",
+ "mechanisms");
+ if (child != NULL) {
+ s->stream_state = REXMPP_STREAM_SASL;
+ s->sasl_state = REXMPP_SASL_NEGOTIATION;
+ char mech_list[2048]; /* todo: perhaps grow it dynamically */
+ mech_list[0] = '\0';
+ xmlNodePtr mechanism;
+ for (mechanism = xmlFirstElementChild(child);
+ mechanism != NULL;
+ mechanism = xmlNextElementSibling(mechanism)) {
+ if (rexmpp_xml_match(mechanism, "urn:ietf:params:xml:ns:xmpp-sasl",
+ "mechanism")) {
+ snprintf(mech_list + strlen(mech_list),
+ 2048 - strlen(mech_list),
+ "%s ",
+ xmlNodeGetContent(mechanism));
+ }
+ }
+ const char *mech =
+ gsasl_client_suggest_mechanism(s->sasl_ctx, mech_list);
+ rexmpp_log(s, LOG_INFO, "Selected SASL mechanism: %s", mech);
+ int sasl_err;
+ char *sasl_buf;
+ sasl_err = gsasl_client_start(s->sasl_ctx, mech, &(s->sasl_session));
+ if (sasl_err != GSASL_OK) {
+ rexmpp_log(s, LOG_CRIT, "Failed to initialise SASL session: %s",
+ gsasl_strerror(sasl_err));
+ s->sasl_state = REXMPP_SASL_ERROR;
+ return;
+ }
+ sasl_err = gsasl_step64 (s->sasl_session, "", (char**)&sasl_buf);
+ if (sasl_err != GSASL_OK) {
+ if (sasl_err == GSASL_NEEDS_MORE) {
+ rexmpp_log(s, LOG_DEBUG, "SASL needs more data");
+ } else {
+ rexmpp_log(s, LOG_ERR, "SASL error: %s",
+ gsasl_strerror(sasl_err));
+ s->sasl_state = REXMPP_SASL_ERROR;
+ return;
+ }
+ }
+ xmlNodePtr auth_cmd = xmlNewNode(NULL, "auth");
+ xmlNewProp(auth_cmd, "mechanism", mech);
+ xmlNewNs(auth_cmd, "urn:ietf:params:xml:ns:xmpp-sasl", NULL);
+ xmlNodeAddContent(auth_cmd, sasl_buf);
+ free(sasl_buf);
+ rexmpp_send(s, auth_cmd);
+ return;
+ }
+
+ child = rexmpp_xml_find_child(elem, "urn:xmpp:sm:3", "sm");
+ if (s->stream_id != NULL && child != NULL) {
+ s->stream_state = REXMPP_STREAM_SM_RESUME;
+ char buf[11];
+ snprintf(buf, 11, "%u", s->stanzas_in_count);
+ xmlNodePtr sm_resume = xmlNewNode(NULL, "resume");
+ xmlNewNs(sm_resume, "urn:xmpp:sm:3", NULL);
+ xmlNewProp(sm_resume, "previd", s->stream_id);
+ xmlNewProp(sm_resume, "h", buf);
+ rexmpp_send(s, sm_resume);
+ return;
+ }
+
+ child =
+ rexmpp_xml_find_child(elem, "urn:ietf:params:xml:ns:xmpp-bind", "bind");
+ if (child != NULL) {
+ rexmpp_stream_bind(s);
+ return;
+ }
+ }
+
+ /* Stream errors, https://tools.ietf.org/html/rfc6120#section-4.9 */
+ if (rexmpp_xml_match(elem, "http://etherx.jabber.org/streams",
+ "error")) {
+ rexmpp_log(s, LOG_ERR, "stream error");
+ s->stream_state = REXMPP_STREAM_ERROR;
+ return;
+ }
+
+ /* STARTTLS negotiation,
+ https://tools.ietf.org/html/rfc6120#section-5 */
+ if (s->stream_state == REXMPP_STREAM_STARTTLS) {
+ if (rexmpp_xml_match(elem, "urn:ietf:params:xml:ns:xmpp-tls",
+ "proceed")) {
+ rexmpp_tls_start(s);
+ return;
+ } else if (rexmpp_xml_match(elem, "urn:ietf:params:xml:ns:xmpp-tls",
+ "failure")) {
+ rexmpp_log(s, LOG_ERR, "STARTTLS failure");
+ return;
+ }
+ }
+
+ /* SASL negotiation,
+ https://tools.ietf.org/html/rfc6120#section-6 */
+ if (s->stream_state == REXMPP_STREAM_SASL) {
+ char *sasl_buf;
+ int sasl_err;
+ if (rexmpp_xml_match(elem, "urn:ietf:params:xml:ns:xmpp-sasl",
+ "challenge")) {
+ sasl_err = gsasl_step64 (s->sasl_session, xmlNodeGetContent(elem),
+ (char**)&sasl_buf);
+ if (sasl_err != GSASL_OK) {
+ if (sasl_err == GSASL_NEEDS_MORE) {
+ rexmpp_log(s, LOG_DEBUG, "SASL needs more data");
+ } else {
+ rexmpp_log(s, LOG_ERR, "SASL error: %s",
+ gsasl_strerror(sasl_err));
+ s->sasl_state = REXMPP_SASL_ERROR;
+ return;
+ }
+ }
+ xmlNodePtr response = xmlNewNode(NULL, "response");
+ xmlNewNs(response, "urn:ietf:params:xml:ns:xmpp-sasl", NULL);
+ xmlNodeAddContent(response, sasl_buf);
+ free(sasl_buf);
+ rexmpp_send(s, response);
+ return;
+ } else if (rexmpp_xml_match(elem, "urn:ietf:params:xml:ns:xmpp-sasl",
+ "success")) {
+ sasl_err = gsasl_step64 (s->sasl_session, xmlNodeGetContent(elem),
+ (char**)&sasl_buf);
+ free(sasl_buf);
+ if (sasl_err == GSASL_OK) {
+ rexmpp_log(s, LOG_DEBUG, "SASL success");
+ } else {
+ rexmpp_log(s, LOG_ERR, "SASL error: %s",
+ gsasl_strerror(sasl_err));
+ s->sasl_state = REXMPP_SASL_ERROR;
+ return;
+ }
+ s->sasl_state = REXMPP_SASL_ACTIVE;
+ s->stream_state = REXMPP_STREAM_RESTART;
+ return;
+ } else if (rexmpp_xml_match(elem, "urn:ietf:params:xml:ns:xmpp-sasl",
+ "failure")) {
+ /* todo: would be nice to retry here, but just giving up for now */
+ rexmpp_log(s, LOG_ERR, "SASL failure");
+ rexmpp_stop(s);
+ return;
+ }
+ }
+
+ /* Stream management, https://xmpp.org/extensions/xep-0198.html */
+ if (s->stream_state == REXMPP_STREAM_SM_FULL) {
+ if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "enabled")) {
+ s->sm_state = REXMPP_SM_ACTIVE;
+ char *resume = xmlGetProp(elem, "resume");
+ if (resume != NULL) {
+ if (s->stream_id != NULL) {
+ free(s->stream_id);
+ }
+ s->stream_id = xmlGetProp(elem, "id");
+ xmlFree(resume);
+ }
+ rexmpp_stream_is_ready(s);
+ } else if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "failed")) {
+ s->stream_state = REXMPP_STREAM_SM_ACKS;
+ s->sm_state = REXMPP_SM_NEGOTIATION;
+ xmlNodePtr sm_enable = xmlNewNode(NULL, "enable");
+ xmlNewNs(sm_enable, "urn:xmpp:sm:3", NULL);
+ rexmpp_send(s, sm_enable);
+ }
+ } else if (s->stream_state == REXMPP_STREAM_SM_ACKS) {
+ if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "enabled")) {
+ s->sm_state = REXMPP_SM_ACTIVE;
+ if (s->stream_id != NULL) {
+ free(s->stream_id);
+ s->stream_id = NULL;
+ }
+ } else if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "failed")) {
+ s->sm_state = REXMPP_SM_INACTIVE;
+ xmlNodePtr sm_enable = xmlNewNode(NULL, "enable");
+ xmlNewNs(sm_enable, "urn:xmpp:sm:3", NULL);
+ rexmpp_send(s, sm_enable);
+ }
+ rexmpp_stream_is_ready(s);
+ } else if (s->stream_state == REXMPP_STREAM_SM_RESUME) {
+ if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "resumed")) {
+ s->sm_state = REXMPP_SM_ACTIVE;
+ s->stream_state = REXMPP_STREAM_READY;
+ rexmpp_sm_handle_ack(s, elem);
+ rexmpp_resend_stanzas(s);
+ } else if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "failed")) {
+ /* Back to binding, but cleanup stream state first. */
+ free(s->stream_id);
+ s->stream_id = NULL;
+ while (s->active_iq != NULL) {
+ /* todo: check that those are not queued for resending? */
+ rexmpp_iq_t *next = s->active_iq->next;
+ xmlFreeNode(s->active_iq->request);
+ free(s->active_iq);
+ s->active_iq = next;
+ }
+ xmlNodePtr child =
+ rexmpp_xml_find_child(s->stream_features,
+ "urn:ietf:params:xml:ns:xmpp-bind",
+ "bind");
+ if (child != NULL) {
+ rexmpp_stream_bind(s);
+ return;
+ }
+ }
+ }
+
+ if (s->sm_state == REXMPP_SM_ACTIVE && rexmpp_xml_is_stanza(elem)) {
+ s->stanzas_in_count++;
+ }
+ if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "r")) {
+ rexmpp_sm_ack(s);
+ } else if (rexmpp_xml_match(elem, "urn:xmpp:sm:3", "a")) {
+ rexmpp_sm_handle_ack(s, elem);
+ }
+}
+
+
+void rexmpp_sax_characters (rexmpp_t *s, const char *ch, int len)
+{
+ if (s->current_element != NULL) {
+ xmlNodeAddContentLen(s->current_element, ch, len);
+ }
+}
+
+void rexmpp_sax_start_elem_ns (rexmpp_t *s,
+ const char *localname,
+ const char *prefix,
+ const char *URI,
+ int nb_namespaces,
+ const char **namespaces,
+ int nb_attributes,
+ int nb_defaulted,
+ const char **attributes)
+{
+ int i;
+ if (s->stream_state == REXMPP_STREAM_OPENING &&
+ strcmp(localname, "stream") == 0 &&
+ strcmp(URI, "http://etherx.jabber.org/streams") == 0) {
+ rexmpp_log(s, LOG_DEBUG, "stream start");
+ s->stream_state = REXMPP_STREAM_NEGOTIATION;
+ return;
+ }
+
+ if (s->stream_state != REXMPP_STREAM_OPENING) {
+ if (s->current_element == NULL) {
+ s->current_element = xmlNewNode(NULL, localname);
+ s->current_element_root = s->current_element;
+ } else {
+ xmlNodePtr node = xmlNewNode(NULL, localname);
+ xmlAddChild(s->current_element, node);
+ s->current_element = node;
+ }
+ xmlNsPtr ns = xmlNewNs(s->current_element, URI, prefix);
+ s->current_element->ns = ns;
+ for (i = 0; i < nb_attributes; i++) {
+ size_t attr_len = attributes[i * 5 + 4] - attributes[i * 5 + 3];
+ char *attr_val = malloc(attr_len + 1);
+ attr_val[attr_len] = '\0';
+ strncpy(attr_val, attributes[i * 5 + 3], attr_len);
+ xmlNewProp(s->current_element, attributes[i * 5], attr_val);
+ free(attr_val);
+ }
+ }
+}
+
+void rexmpp_sax_end_elem_ns (rexmpp_t *s,
+ const char *localname,
+ const char *prefix,
+ const char *URI)
+{
+ if ((s->stream_state == REXMPP_STREAM_CLOSING ||
+ s->stream_state == REXMPP_STREAM_ERROR) &&
+ strcmp(localname, "stream") == 0 &&
+ strcmp(URI, "http://etherx.jabber.org/streams") == 0) {
+ rexmpp_log(s, LOG_DEBUG, "stream end");
+ if (s->sasl_state == REXMPP_SASL_ACTIVE) {
+ gsasl_finish(s->sasl_session);
+ s->sasl_session = NULL;
+ s->sasl_state = REXMPP_SASL_INACTIVE;
+ }
+ s->stream_state = REXMPP_STREAM_CLOSED;
+ if (s->tls_state == REXMPP_TLS_ACTIVE) {
+ s->tls_state = REXMPP_TLS_CLOSING;
+ } else {
+ rexmpp_log(s, LOG_DEBUG, "closing the socket");
+ close(s->server_socket);
+ s->server_socket = -1;
+ rexmpp_cleanup(s);
+ s->tcp_state = REXMPP_TCP_CLOSED;
+ }
+ return;
+ }
+
+ if (s->current_element != s->current_element_root) {
+ s->current_element = s->current_element->parent;
+ } else {
+ if (s->xml_in_cb != NULL && s->xml_in_cb(s, s->current_element) != 0) {
+ rexmpp_log(s, LOG_WARNING,
+ "Message processing was cancelled by xml_in_cb.");
+ } else {
+ rexmpp_process_element(s);
+ }
+
+ xmlFreeNode(s->current_element);
+ s->current_element = NULL;
+ s->current_element_root = NULL;
+ }
+}
+
+rexmpp_err_t rexmpp_close (rexmpp_t *s) {
+ s->stream_state = REXMPP_STREAM_CLOSING;
+ char *close_stream = "</stream:stream>";
+ return rexmpp_send_raw(s, close_stream, strlen(close_stream));
+}
+
+rexmpp_err_t rexmpp_stop (rexmpp_t *s) {
+ if (s->sm_state == REXMPP_SM_ACTIVE) {
+ int ret = rexmpp_sm_ack(s);
+ if (ret != REXMPP_SUCCESS && ret != REXMPP_E_AGAIN) {
+ return ret;
+ }
+ }
+ s->stream_state = REXMPP_STREAM_CLOSE_REQUESTED;
+ if (s->send_buffer == NULL) {
+ return rexmpp_close(s);
+ } else {
+ return REXMPP_E_AGAIN;
+ }
+}
+
+rexmpp_err_t rexmpp_run (rexmpp_t *s, fd_set *read_fds, fd_set *write_fds) {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+
+ /* Inactive: start by querying SRV records. */
+ if ((s->resolver_state == REXMPP_RESOLVER_NONE ||
+ s->resolver_state == REXMPP_RESOLVER_READY) &&
+ (s->tcp_state == REXMPP_TCP_NONE ||
+ ((s->tcp_state == REXMPP_TCP_ERROR ||
+ s->tcp_state == REXMPP_TCP_CONNECTION_FAILURE) &&
+ s->reconnect_number > 0 &&
+ s->next_reconnect_time.tv_sec <= now.tv_sec))) {
+ rexmpp_log(s, LOG_DEBUG, "start (or reconnect)");
+ size_t srv_query_buf_len = strlen(jid_bare_to_host(s->initial_jid)) +
+ strlen("_xmpps-client._tcp..") +
+ 1;
+ char *srv_query = malloc(srv_query_buf_len);
+ snprintf(srv_query, srv_query_buf_len,
+ "_xmpps-client._tcp.%s.", jid_bare_to_host(s->initial_jid));
+ ares_query(s->resolver_channel, srv_query,
+ ns_c_in, ns_t_srv, rexmpp_srv_tls_cb, s);
+ snprintf(srv_query, srv_query_buf_len,
+ "_xmpp-client._tcp.%s.", jid_bare_to_host(s->initial_jid));
+ ares_query(s->resolver_channel, srv_query,
+ ns_c_in, ns_t_srv, rexmpp_srv_cb, s);
+ s->resolver_state = REXMPP_RESOLVER_SRV;
+ free(srv_query);
+ }
+
+ /* Resolving SRV records. This continues in rexmpp_srv_tls_cb,
+ rexmpp_srv_cb, and rexmpp_after_srv, possibly leading to
+ connection initiation. */
+ if (s->resolver_state != REXMPP_RESOLVER_NONE &&
+ s->resolver_state != REXMPP_RESOLVER_READY) {
+ ares_process(s->resolver_channel, read_fds, write_fds);
+ }
+
+ /* Connecting. Continues in rexmpp_process_conn_err, possibly
+ leading to stream opening. */
+ if (s->tcp_state == REXMPP_TCP_CONNECTING) {
+ rexmpp_process_conn_err(s,
+ rexmpp_tcp_conn_proceed(&s->server_connection,
+ read_fds, write_fds));
+ }
+
+ /* The things we do while connected. */
+ if (s->tcp_state == REXMPP_TCP_CONNECTED) {
+
+ /* Sending queued data. */
+ if (FD_ISSET(s->server_socket, write_fds) &&
+ s->send_buffer != NULL) {
+ rexmpp_send_continue(s);
+ }
+
+ /* Receiving data. Leads to all kinds of things. */
+ if (FD_ISSET(s->server_socket, read_fds) &&
+ s->stream_state != REXMPP_STREAM_NONE &&
+ s->tcp_state == REXMPP_TCP_CONNECTED &&
+ s->tls_state != REXMPP_TLS_HANDSHAKE) {
+ rexmpp_recv(s);
+ }
+
+ /* Performing a TLS handshake. A stream restart happens after
+ this, if everything goes well. */
+ if (s->tls_state == REXMPP_TLS_HANDSHAKE) {
+ rexmpp_tls_handshake(s);
+ }
+
+ /* Restarting a stream if needed after the above actions. Since it
+ involves resetting the parser, functions called by that parser
+ can't do it on their own. */
+ if (s->stream_state == REXMPP_STREAM_RESTART) {
+ xmlCtxtResetPush(s->xml_parser, "", 0, "", "utf-8");
+ rexmpp_stream_open(s);
+ }
+
+ /* Closing the stream once everything is sent. */
+ if (s->stream_state == REXMPP_STREAM_CLOSE_REQUESTED &&
+ s->send_buffer == NULL) {
+ rexmpp_close(s);
+ }
+
+ /* Closing TLS and TCP connections once stream is closed. If
+ there's no TLS, the TCP connection is closed at once
+ elsewhere. */
+ if (s->stream_state == REXMPP_STREAM_CLOSED &&
+ s->tls_state == REXMPP_TLS_CLOSING) {
+ int ret = gnutls_bye(s->gnutls_session, GNUTLS_SHUT_RDWR);
+ if (ret == GNUTLS_E_SUCCESS) {
+ s->tls_state = REXMPP_TLS_INACTIVE;
+ rexmpp_cleanup(s);
+ s->tcp_state = REXMPP_TCP_CLOSED;
+ }
+ }
+ }
+ if (s->tcp_state == REXMPP_TCP_CLOSED) {
+ return REXMPP_SUCCESS;
+ }
+ return REXMPP_E_AGAIN;
+}
+
+int rexmpp_fds(rexmpp_t *s, fd_set *read_fds, fd_set *write_fds) {
+ int conn_fd, max_fd = 0;
+
+ if (s->resolver_state != REXMPP_RESOLVER_NONE &&
+ s->resolver_state != REXMPP_RESOLVER_READY) {
+ max_fd = ares_fds(s->resolver_channel, read_fds, write_fds);
+ }
+
+ if (s->tcp_state == REXMPP_TCP_CONNECTING) {
+ conn_fd = rexmpp_tcp_conn_fds(&s->server_connection, read_fds, write_fds);
+ if (conn_fd > max_fd) {
+ max_fd = conn_fd;
+ }
+ }
+
+ if (s->tls_state == REXMPP_TLS_HANDSHAKE) {
+ if (gnutls_record_get_direction(s->gnutls_session) == 0) {
+ FD_SET(s->server_socket, read_fds);
+ } else {
+ FD_SET(s->server_socket, write_fds);
+ }
+ if (s->server_socket + 1 > max_fd) {
+ max_fd = s->server_socket + 1;
+ }
+ }
+
+ if (s->tcp_state == REXMPP_TCP_CONNECTED) {
+ FD_SET(s->server_socket, read_fds);
+ if (s->send_buffer != NULL) {
+ FD_SET(s->server_socket, write_fds);
+ }
+ if (s->server_socket + 1 > max_fd) {
+ max_fd = s->server_socket + 1;
+ }
+ }
+
+ return max_fd;
+}
+
+struct timeval *rexmpp_timeout (rexmpp_t *s,
+ struct timeval *max_tv,
+ struct timeval *tv)
+{
+ struct timeval *ret = max_tv;
+
+ if (s->resolver_state != REXMPP_RESOLVER_NONE &&
+ s->resolver_state != REXMPP_RESOLVER_READY) {
+ ret = ares_timeout(s->resolver_channel, max_tv, tv);
+ } else if (s->tcp_state == REXMPP_TCP_CONNECTING) {
+ ret = rexmpp_tcp_conn_timeout(&s->server_connection, max_tv, tv);
+ }
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ if (s->reconnect_number > 0 &&
+ s->next_reconnect_time.tv_sec > now.tv_sec &&
+ (ret == NULL ||
+ s->next_reconnect_time.tv_sec - now.tv_sec < ret->tv_sec)) {
+ tv->tv_sec = s->next_reconnect_time.tv_sec - now.tv_sec;
+ tv->tv_usec = 0;
+ ret = tv;
+ }
+
+ return ret;
+}
diff --git a/src/rexmpp.h b/src/rexmpp.h
new file mode 100644
index 0000000..662d540
--- /dev/null
+++ b/src/rexmpp.h
@@ -0,0 +1,286 @@
+/**
+ @file rexmpp.h
+ @brief rexmpp, a reusable XMPP IM client library.
+ @author defanor <defanor@uberspace.net>
+ @date 2020
+ @copyright MIT license.
+*/
+
+#ifndef REXMPP_H
+#define REXMPP_H
+
+#include <ares.h>
+#include <gnutls/gnutls.h>
+#include <gsasl.h>
+#include <libxml/tree.h>
+#include "rexmpp_tcp.h"
+
+
+typedef struct rexmpp rexmpp_t;
+typedef void (*rexmpp_iq_callback_t) (rexmpp_t *s, xmlNodePtr request, xmlNodePtr response);
+
+typedef struct rexmpp_iq rexmpp_iq_t;
+struct rexmpp_iq
+{
+ xmlNodePtr request;
+ rexmpp_iq_callback_t cb;
+ rexmpp_iq_t *next;
+};
+
+
+/** @brief DNS resolver state */
+enum resolver_st {
+ REXMPP_RESOLVER_NONE,
+ REXMPP_RESOLVER_READY,
+ REXMPP_RESOLVER_SRV,
+ REXMPP_RESOLVER_SRV_2,
+ REXMPP_RESOLVER_FAILURE
+};
+
+/** @brief TCP connection state */
+enum tcp_st {
+ REXMPP_TCP_NONE,
+ REXMPP_TCP_CONNECTING,
+ REXMPP_TCP_CONNECTED,
+ REXMPP_TCP_CLOSED,
+ REXMPP_TCP_CONNECTION_FAILURE,
+ REXMPP_TCP_ERROR
+};
+
+/** @brief XML stream state */
+enum stream_st {
+ REXMPP_STREAM_NONE,
+ REXMPP_STREAM_OPENING,
+ REXMPP_STREAM_NEGOTIATION,
+ REXMPP_STREAM_STARTTLS,
+ REXMPP_STREAM_SASL,
+ REXMPP_STREAM_BIND,
+ REXMPP_STREAM_SM_FULL,
+ REXMPP_STREAM_SM_ACKS,
+ REXMPP_STREAM_SM_RESUME,
+ REXMPP_STREAM_RESTART,
+ REXMPP_STREAM_READY,
+ REXMPP_STREAM_CLOSE_REQUESTED,
+ REXMPP_STREAM_CLOSING,
+ REXMPP_STREAM_CLOSED,
+ REXMPP_STREAM_ERROR
+};
+
+/** @brief TLS state */
+enum tls_st {
+ REXMPP_TLS_INACTIVE,
+ REXMPP_TLS_AWAITING_DIRECT,
+ REXMPP_TLS_HANDSHAKE,
+ REXMPP_TLS_ACTIVE,
+ REXMPP_TLS_CLOSING,
+ REXMPP_TLS_CLOSED,
+ REXMPP_TLS_ERROR
+};
+
+/** @brief SASL state */
+enum sasl_st {
+ REXMPP_SASL_INACTIVE,
+ REXMPP_SASL_NEGOTIATION,
+ REXMPP_SASL_ACTIVE,
+ REXMPP_SASL_ERROR
+};
+
+/** @brief Stream management state */
+enum sm_st {
+ REXMPP_SM_INACTIVE,
+ REXMPP_SM_NEGOTIATION,
+ REXMPP_SM_ACTIVE
+};
+
+/** @brief Carbons state */
+enum carbons_st {
+ REXMPP_CARBONS_INACTIVE,
+ REXMPP_CARBONS_NEGOTIATION,
+ REXMPP_CARBONS_ACTIVE,
+ REXMPP_CARBONS_DISABLED
+};
+
+/** Error codes. */
+enum rexmpp_err {
+ /** An operation is finished. */
+ REXMPP_SUCCESS,
+ /** An operation is in progress. */
+ REXMPP_E_AGAIN,
+ /** A message can't be queued for sending, because the queue is
+ full. */
+ REXMPP_E_SEND_QUEUE_FULL,
+ /** The library can't take responsibility for message delivery (and
+ doesn't try to send it), because XEP-0198 stanza queue is
+ full. */
+ REXMPP_E_STANZA_QUEUE_FULL,
+ /** An operation (reading or sending) was cancelled by a user. */
+ REXMPP_E_CANCELLED,
+ /** An attempt to send while send buffer is empty. */
+ REXMPP_E_SEND_BUFFER_EMPTY,
+ /** An attempt to start sending while send buffer is not empty. */
+ REXMPP_E_SEND_BUFFER_NOT_EMPTY,
+ /** SASL-related error. */
+ REXMPP_E_SASL,
+ /** TLS-related error. */
+ REXMPP_E_TLS,
+ /** TCP-related error. */
+ REXMPP_E_TCP,
+ /** DNS-related error. */
+ REXMPP_E_DNS,
+ /** XML-related error. */
+ REXMPP_E_XML,
+ /** Failure to allocate memory. */
+ REXMPP_E_MALLOC
+};
+typedef enum rexmpp_err rexmpp_err_t;
+
+typedef void (*log_function_t) (rexmpp_t *s, int priority, const char *format, va_list args);
+typedef int (*sasl_property_cb_t) (Gsasl *ctx, Gsasl_session *sctx, Gsasl_property prop);
+typedef int (*xml_in_cb_t) (rexmpp_t *s, xmlNodePtr node);
+typedef int (*xml_out_cb_t) (rexmpp_t *s, xmlNodePtr node);
+
+/** @brief Complete connection state */
+struct rexmpp
+{
+ /* Numeric states: governing future actions, helping to recall where
+ we were at before returning from rexmpp_run, and communicating
+ the overall current state to a user. */
+ enum resolver_st resolver_state;
+ enum tcp_st tcp_state;
+ enum stream_st stream_state;
+ enum tls_st tls_state;
+ enum sasl_st sasl_state;
+ enum sm_st sm_state;
+ enum carbons_st carbons_state;
+
+ /* Basic configuration. */
+ const char *initial_jid;
+
+ /* Resource limits. */
+ uint32_t stanza_queue_size;
+ uint32_t send_queue_size;
+ uint32_t iq_queue_size;
+
+ /* Callbacks. */
+ log_function_t log_function;
+ sasl_property_cb_t sasl_property_cb;
+ xml_in_cb_t xml_in_cb;
+ xml_out_cb_t xml_out_cb;
+
+ /* Stream-related state. */
+ char *assigned_jid;
+ xmlNodePtr stream_features;
+
+ /* IQs we're waiting for responses to. */
+ rexmpp_iq_t *active_iq;
+
+ /* Connection and stream management. */
+ unsigned int id_counter;
+ unsigned int reconnect_number;
+ struct timeval next_reconnect_time;
+ xmlNodePtr stanza_queue;
+ uint32_t stanzas_out_count;
+ uint32_t stanzas_out_acknowledged;
+ uint32_t stanzas_in_count;
+ char *stream_id;
+
+ /* DNS-related structures. */
+ ares_channel resolver_channel;
+ struct ares_srv_reply *server_srv;
+ struct ares_srv_reply *server_srv_cur;
+ struct ares_srv_reply *server_srv_tls;
+ struct ares_srv_reply *server_srv_tls_cur;
+
+ /* The primary socket used for communication with the server. */
+ int server_socket;
+
+ /* A structure used to establish a TCP connection. */
+ rexmpp_tcp_conn_t server_connection;
+
+ /* Send buffer. NULL if there is nothing to send (and must not be
+ NULL if there is anything in the send queue). Not appending data
+ to it, see send_queue for queuing. */
+ char *send_buffer;
+ ssize_t send_buffer_len;
+ ssize_t send_buffer_sent;
+
+ /* A queue of XML elements to send. */
+ xmlNodePtr send_queue;
+
+ /* XML parser context, and current element pointer for building
+ XML nodes with a SAX2 parser interface. */
+ xmlParserCtxtPtr xml_parser;
+ xmlNodePtr current_element_root;
+ xmlNodePtr current_element;
+
+ /* TLS structures. */
+ void *tls_session_data;
+ size_t tls_session_data_size;
+ gnutls_session_t gnutls_session;
+ gnutls_certificate_credentials_t gnutls_cred;
+
+ /* SASL structures. */
+ Gsasl *sasl_ctx;
+ Gsasl_session *sasl_session;
+};
+
+/**
+ @brief ::rexmpp structure initialisation.
+ @param[out] s An allocated structure.
+ @param[in] jid Initial bare JID.
+ @param[in] log_function A user-provided logging function, can be
+ NULL.
+ @param[in] sasl_property_cb A callback to ask for SASL properties
+ (such as password).
+ @param[in] xml_in_cb A function to handle incoming XML elements. It
+ is called before other processing, so it can alter the elements, or
+ interrupt processing by returning a non-zero value. Optional.
+ @param[in] xml_out_cb Akin to the previous one, but for outbound
+ elements.
+ @returns ::REXMPP_SUCCESS or some ::rexmpp_err error.
+ */
+rexmpp_err_t rexmpp_init (rexmpp_t *s,
+ const char *jid,
+ log_function_t log_function,
+ sasl_property_cb_t sasl_property_cb,
+ xml_in_cb_t xml_in_cb,
+ xml_out_cb_t xml_out_cb);
+/**
+ @brief ::rexmpp structure deinitialisation. This will free all the
+ allocated resources.
+ @param[in,out] s A structure to deinitialise.
+*/
+void rexmpp_done (rexmpp_t *s);
+
+/**
+ @brief Runs a single iteration.
+ @param[in,out] s An initialised :rexmpp structure.
+ @param[in] File descriptors available for reading from.
+ @param[in] write_fds File descriptors available for writing to.
+
+ \callergraph
+*/
+rexmpp_err_t rexmpp_run (rexmpp_t *s, fd_set *read_fds, fd_set *write_fds);
+
+/**
+ @brief Requests stream closing.
+*/
+rexmpp_err_t rexmpp_stop (rexmpp_t *s);
+
+/**
+ @brief Sends (or queues, or at least tries to, if everything goes
+ well) an XML element.
+ @param[in,out] s A ::rexmpp structure.
+ @param[in] node An XML element to send. The library assumes
+ ownership of the element, so it must not be freed by the caller.
+*/
+rexmpp_err_t rexmpp_send (rexmpp_t *s, xmlNodePtr node);
+
+struct timeval *rexmpp_timeout (rexmpp_t *s, struct timeval *max_tv, struct timeval *tv);
+int rexmpp_fds (rexmpp_t *s, fd_set *read_fds, fd_set *write_fds);
+
+
+char *rexmpp_xml_serialize (xmlNodePtr node);
+xmlNodePtr rexmpp_xml_add_id (rexmpp_t *s, xmlNodePtr node);
+
+#endif
diff --git a/src/rexmpp_tcp.c b/src/rexmpp_tcp.c
new file mode 100644
index 0000000..55e6c1b
--- /dev/null
+++ b/src/rexmpp_tcp.c
@@ -0,0 +1,358 @@
+/**
+ @file rexmpp_tcp.c
+ @brief TCP connection establishment.
+ @author defanor <defanor@uberspace.net>
+ @date 2020
+ @copyright MIT license.
+*/
+
+#include <ares.h>
+#include <netdb.h>
+#include <arpa/nameser.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <memory.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <arpa/inet.h>
+#include <fcntl.h>
+
+#include "rexmpp_tcp.h"
+
+
+void rexmpp_dns_aaaa_cb (void *ptr,
+ int status,
+ int timeouts,
+ unsigned char *abuf,
+ int alen)
+{
+ rexmpp_tcp_conn_t *conn = ptr;
+ conn->resolver_status_v6 = status;
+ if (status == ARES_SUCCESS) {
+ conn->resolution_v6 = REXMPP_CONN_RESOLUTION_SUCCESS;
+ ares_parse_aaaa_reply(abuf, alen, &(conn->addr_v6), NULL, NULL);
+ conn->addr_cur_v6 = -1;
+ } else {
+ conn->resolution_v6 = REXMPP_CONN_RESOLUTION_FAILURE;
+ }
+}
+
+void rexmpp_dns_a_cb (void *ptr,
+ int status,
+ int timeouts,
+ unsigned char *abuf,
+ int alen)
+{
+ rexmpp_tcp_conn_t *conn = ptr;
+ conn->resolver_status_v4 = status;
+ if (status == ARES_SUCCESS) {
+ conn->resolution_v4 = REXMPP_CONN_RESOLUTION_SUCCESS;
+ ares_parse_a_reply(abuf, alen, &(conn->addr_v4), NULL, NULL);
+ conn->addr_cur_v4 = -1;
+ if (conn->resolution_v6 == REXMPP_CONN_RESOLUTION_WAITING) {
+ /* Wait for 50 ms for IPv6. */
+ gettimeofday(&(conn->next_connection_time), NULL);
+ conn->next_connection_time.tv_usec += REXMPP_TCP_IPV6_DELAY_MS * 1000;
+ if (conn->next_connection_time.tv_usec >= 1000000) {
+ conn->next_connection_time.tv_usec -= 1000000;
+ conn->next_connection_time.tv_sec++;
+ }
+ }
+ } else {
+ conn->resolution_v4 = REXMPP_CONN_RESOLUTION_FAILURE;
+ }
+}
+
+rexmpp_tcp_conn_error_t
+rexmpp_tcp_conn_init (rexmpp_tcp_conn_t *conn,
+ const char *host,
+ int port)
+{
+ int i;
+ for (i = 0; i < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS; i++) {
+ conn->sockets[i] = -1;
+ }
+ conn->connection_attempts = 0;
+ conn->port = port;
+ conn->addr_v4 = NULL;
+ conn->addr_v6 = NULL;
+ conn->resolver_error = ares_init(&(conn->resolver_channel));
+ conn->fd = -1;
+ conn->next_connection_time.tv_sec = 0;
+ conn->next_connection_time.tv_usec = 0;
+ if (conn->resolver_error) {
+ return REXMPP_CONN_RESOLVER_ERROR;
+ }
+
+ conn->resolution_v4 = REXMPP_CONN_RESOLUTION_INACTIVE;
+ conn->resolution_v6 = REXMPP_CONN_RESOLUTION_INACTIVE;
+
+ struct sockaddr_in addr_v4;
+ int flags;
+ if (inet_pton(AF_INET, host, &addr_v4)) {
+ addr_v4.sin_family = AF_INET;
+ addr_v4.sin_port = htons(port);
+ conn->sockets[conn->connection_attempts] =
+ socket(AF_INET, SOCK_STREAM, 0);
+ flags = fcntl(conn->sockets[conn->connection_attempts], F_GETFL, 0);
+ fcntl(conn->sockets[conn->connection_attempts], F_SETFL, flags | O_NONBLOCK);
+ if (connect(conn->sockets[conn->connection_attempts],
+ (struct sockaddr*)&addr_v4,
+ sizeof(addr_v4))) {
+ if (errno != EINPROGRESS) {
+ return REXMPP_CONN_ERROR;
+ }
+ } else {
+ return REXMPP_CONN_DONE;
+ }
+ conn->connection_attempts++;
+ return REXMPP_CONN_IN_PROGRESS;
+ }
+ struct sockaddr_in addr_v6;
+ if (inet_pton(AF_INET6, host, &addr_v6)) {
+ addr_v6.sin_family = AF_INET6;
+ addr_v6.sin_port = htons(port);
+ conn->sockets[conn->connection_attempts] =
+ socket(AF_INET6, SOCK_STREAM, 0);
+ flags = fcntl(conn->sockets[conn->connection_attempts], F_GETFL, 0);
+ fcntl(conn->sockets[conn->connection_attempts], F_SETFL, flags | O_NONBLOCK);
+ if (connect(conn->sockets[conn->connection_attempts],
+ (struct sockaddr*)&addr_v6,
+ sizeof(addr_v6))) {
+ if (errno != EINPROGRESS) {
+ return REXMPP_CONN_ERROR;
+ }
+ } else {
+ return REXMPP_CONN_DONE;
+ }
+ conn->connection_attempts++;
+ return REXMPP_CONN_IN_PROGRESS;
+ }
+
+ conn->resolution_v4 = REXMPP_CONN_RESOLUTION_WAITING;
+ conn->resolution_v6 = REXMPP_CONN_RESOLUTION_WAITING;
+
+ ares_query(conn->resolver_channel, host,
+ ns_c_in, ns_t_aaaa, rexmpp_dns_aaaa_cb, conn);
+ ares_query(conn->resolver_channel, host,
+ ns_c_in, ns_t_a, rexmpp_dns_a_cb, conn);
+
+ return REXMPP_CONN_IN_PROGRESS;
+}
+
+int rexmpp_tcp_conn_finish (rexmpp_tcp_conn_t *conn) {
+ int i;
+ for (i = 0; i < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS; i++) {
+ if (conn->sockets[i] != -1 && conn->sockets[i] != conn->fd) {
+ close(conn->sockets[i]);
+ conn->sockets[i] = -1;
+ }
+ }
+ ares_destroy(conn->resolver_channel);
+ if (conn->addr_v4 != NULL) {
+ ares_free_hostent(conn->addr_v4);
+ conn->addr_v4 = NULL;
+ }
+ if (conn->addr_v6 != NULL) {
+ ares_free_hostent(conn->addr_v6);
+ conn->addr_v6 = NULL;
+ }
+ return conn->fd;
+}
+
+int rexmpp_tcp_conn_ipv4_available(rexmpp_tcp_conn_t *conn) {
+ return (conn->resolution_v4 == REXMPP_CONN_RESOLUTION_SUCCESS &&
+ conn->addr_v4 != NULL &&
+ conn->addr_v4->h_addr_list[conn->addr_cur_v4 + 1] != NULL);
+}
+
+int rexmpp_tcp_conn_ipv6_available(rexmpp_tcp_conn_t *conn) {
+ return (conn->resolution_v6 == REXMPP_CONN_RESOLUTION_SUCCESS &&
+ conn->addr_v6 != NULL &&
+ conn->addr_v6->h_addr_list[conn->addr_cur_v6 + 1] != NULL);
+}
+
+rexmpp_tcp_conn_error_t
+rexmpp_tcp_conn_proceed (rexmpp_tcp_conn_t *conn,
+ fd_set *read_fds,
+ fd_set *write_fds)
+{
+ struct timeval now;
+ int i;
+
+ /* Check for successful connections. */
+ for (i = 0; i < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS; i++) {
+ int err;
+ socklen_t err_len = sizeof(err);
+ if (conn->sockets[i] != -1 && FD_ISSET(conn->sockets[i], write_fds)) {
+ if (getsockopt(conn->sockets[i], SOL_SOCKET, SO_ERROR, &err, &err_len)) {
+ return REXMPP_CONN_ERROR;
+ } else {
+ if (err == 0) {
+ conn->fd = conn->sockets[i];
+ return REXMPP_CONN_DONE;
+ } else if (err != EINPROGRESS) {
+ close(conn->sockets[i]);
+ conn->sockets[i] = -1;
+ }
+ }
+ }
+ }
+
+ /* Name resolution. */
+ if (conn->resolution_v4 == REXMPP_CONN_RESOLUTION_WAITING ||
+ conn->resolution_v6 == REXMPP_CONN_RESOLUTION_WAITING) {
+ ares_process(conn->resolver_channel, read_fds, write_fds);
+ }
+
+ /* New connections. */
+ int repeat;
+ do {
+ repeat = 0;
+ if (conn->connection_attempts < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS &&
+ (rexmpp_tcp_conn_ipv4_available(conn) ||
+ rexmpp_tcp_conn_ipv6_available(conn))) {
+ gettimeofday(&now, NULL);
+ if (now.tv_sec > conn->next_connection_time.tv_sec ||
+ (now.tv_sec == conn->next_connection_time.tv_sec &&
+ now.tv_usec >= conn->next_connection_time.tv_usec)) {
+ /* Time to attempt a new connection. */
+ int use_ipv6 = 0;
+ if (rexmpp_tcp_conn_ipv4_available(conn) &&
+ rexmpp_tcp_conn_ipv6_available(conn)) {
+ if (conn->addr_cur_v4 >= conn->addr_cur_v6) {
+ use_ipv6 = 1;
+ }
+ } else if (rexmpp_tcp_conn_ipv6_available(conn)) {
+ use_ipv6 = 1;
+ }
+
+ struct sockaddr_in6 addr_v6;
+ struct sockaddr_in addr_v4;
+ struct sockaddr *addr;
+ socklen_t addrlen;
+ int domain;
+
+ if (use_ipv6) {
+ conn->addr_cur_v6++;
+ memcpy(&addr_v6.sin6_addr,
+ conn->addr_v6->h_addr_list[conn->addr_cur_v6],
+ conn->addr_v6->h_length);
+ addr_v6.sin6_family = conn->addr_v6->h_addrtype;
+ addr_v6.sin6_port = htons(conn->port);
+ domain = conn->addr_v6->h_addrtype;
+ addr = (struct sockaddr*)&addr_v6;
+ addrlen = sizeof(addr_v6);
+ } else {
+ conn->addr_cur_v4++;
+ memcpy(&addr_v4.sin_addr,
+ conn->addr_v4->h_addr_list[conn->addr_cur_v4],
+ conn->addr_v4->h_length);
+ addr_v4.sin_family = conn->addr_v4->h_addrtype;
+ addr_v4.sin_port = htons(conn->port);
+ domain = conn->addr_v4->h_addrtype;
+ addr = (struct sockaddr*)&addr_v4;
+ addrlen = sizeof(addr_v4);
+ }
+
+ conn->sockets[conn->connection_attempts] =
+ socket(domain, SOCK_STREAM, 0);
+ int flags = fcntl(conn->sockets[conn->connection_attempts], F_GETFL, 0);
+ fcntl(conn->sockets[conn->connection_attempts], F_SETFL, flags | O_NONBLOCK);
+ if (connect(conn->sockets[conn->connection_attempts], addr, addrlen)) {
+ if (errno == EINPROGRESS) {
+ gettimeofday(&(conn->next_connection_time), NULL);
+ conn->next_connection_time.tv_usec += REXMPP_TCP_CONN_DELAY_MS * 1000;
+ if (conn->next_connection_time.tv_usec >= 1000000) {
+ conn->next_connection_time.tv_usec -= 1000000;
+ conn->next_connection_time.tv_sec++;
+ }
+ conn->connection_attempts++;
+ } else {
+ close(conn->sockets[conn->connection_attempts]);
+ conn->sockets[conn->connection_attempts] = -1;
+ if (conn->connection_attempts < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS &&
+ (rexmpp_tcp_conn_ipv4_available(conn) ||
+ rexmpp_tcp_conn_ipv6_available(conn))) {
+ repeat = 1;
+ }
+ }
+ } else {
+ conn->fd = conn->sockets[conn->connection_attempts];
+ return REXMPP_CONN_DONE;
+ }
+ }
+ }
+ } while (repeat);
+
+ int active_connections = 0;
+ for (i = 0; i < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS; i++) {
+ if (conn->sockets[i] != -1) {
+ active_connections++;
+ break;
+ }
+ }
+
+ gettimeofday(&now, NULL);
+
+ if (active_connections ||
+ conn->resolution_v4 == REXMPP_CONN_RESOLUTION_WAITING ||
+ conn->resolution_v6 == REXMPP_CONN_RESOLUTION_WAITING ||
+ (conn->next_connection_time.tv_sec > now.tv_sec ||
+ (conn->next_connection_time.tv_sec == now.tv_sec &&
+ conn->next_connection_time.tv_usec > now.tv_usec))) {
+ return REXMPP_CONN_IN_PROGRESS;
+ } else {
+ return REXMPP_CONN_FAILURE;
+ }
+}
+
+int rexmpp_tcp_conn_fds (rexmpp_tcp_conn_t *conn,
+ fd_set *read_fds,
+ fd_set *write_fds)
+{
+ int max_fd = 0, i;
+ max_fd = ares_fds(conn->resolver_channel, read_fds, write_fds);
+ for (i = 0; i < REXMPP_TCP_MAX_CONNECTION_ATTEMPTS; i++) {
+ if (conn->sockets[i] != -1) {
+ FD_SET(conn->sockets[i], write_fds);
+ if (max_fd < conn->sockets[i]) {
+ max_fd = conn->sockets[i] + 1;
+ }
+ }
+ }
+ return max_fd;
+}
+
+struct timeval *rexmpp_tcp_conn_timeout (rexmpp_tcp_conn_t *conn,
+ struct timeval *max_tv,
+ struct timeval *tv)
+{
+ struct timeval now;
+ struct timeval *ret;
+ ret = ares_timeout(conn->resolver_channel, max_tv, tv);
+ if (conn->resolution_v4 == REXMPP_CONN_RESOLUTION_SUCCESS ||
+ conn->resolution_v6 == REXMPP_CONN_RESOLUTION_SUCCESS) {
+ gettimeofday(&now, NULL);
+ if (now.tv_sec < conn->next_connection_time.tv_sec ||
+ (now.tv_sec == conn->next_connection_time.tv_sec &&
+ now.tv_usec <= conn->next_connection_time.tv_usec)) {
+ if (ret == NULL ||
+ ret->tv_sec > conn->next_connection_time.tv_sec - now.tv_sec ||
+ (ret->tv_sec == conn->next_connection_time.tv_sec - now.tv_sec &&
+ ret->tv_usec > conn->next_connection_time.tv_usec - now.tv_usec)) {
+ ret = tv;
+ tv->tv_sec = conn->next_connection_time.tv_sec - now.tv_sec;
+ if (conn->next_connection_time.tv_usec > now.tv_usec) {
+ tv->tv_usec = conn->next_connection_time.tv_usec - now.tv_usec;
+ } else {
+ tv->tv_usec = conn->next_connection_time.tv_usec + 1000000 - now.tv_usec;
+ tv->tv_sec--;
+ }
+ }
+ }
+ }
+ return ret;
+}
diff --git a/src/rexmpp_tcp.h b/src/rexmpp_tcp.h
new file mode 100644
index 0000000..124da2a
--- /dev/null
+++ b/src/rexmpp_tcp.h
@@ -0,0 +1,177 @@
+/**
+ @file rexmpp_tcp.h
+ @brief TCP connection establishment.
+ @author defanor <defanor@uberspace.net>
+ @date 2020
+ @copyright MIT license.
+
+ This module tries to establish a TCP connection to a given host
+ and port.
+
+ A connection establishment procedure begins with
+ ::rexmpp_tcp_conn_init, followed by repeated calls to
+ ::rexmpp_tcp_conn_proceed while the return code is
+ ::REXMPP_CONN_IN_PROGRESS, at the times suggested by
+ ::rexmpp_tcp_conn_timeout and on events suggested by
+ ::rexmpp_tcp_conn_fds, and ends with ::rexmpp_tcp_conn_finish.
+*/
+
+#ifndef REXMPP_TCP_H
+#define REXMPP_TCP_H
+
+#define REXMPP_TCP_MAX_CONNECTION_ATTEMPTS 20
+#define REXMPP_TCP_IPV6_DELAY_MS 50
+#define REXMPP_TCP_CONN_DELAY_MS 250
+
+typedef enum rexmpp_tcp_conn_resolution_status
+rexmpp_tcp_conn_resolution_status_t;
+
+/**
+ @brief Resolution status.
+ */
+enum rexmpp_tcp_conn_resolution_status {
+ /** The resolution is not active. */
+ REXMPP_CONN_RESOLUTION_INACTIVE,
+ /** Waiting for resolution. */
+ REXMPP_CONN_RESOLUTION_WAITING,
+ /** Resolved successfully. */
+ REXMPP_CONN_RESOLUTION_SUCCESS,
+ /** Failed to resolve. */
+ REXMPP_CONN_RESOLUTION_FAILURE
+};
+
+typedef enum rexmpp_tcp_conn_error rexmpp_tcp_conn_error_t;
+
+/**
+ @brief Connection errors.
+*/
+enum rexmpp_tcp_conn_error {
+ /** Connected, no error. */
+ REXMPP_CONN_DONE,
+ /** Resolver error occurred. The exact error code can be read from
+ the connection structure. */
+ REXMPP_CONN_RESOLVER_ERROR,
+ /** Connection in progress, no error yet. */
+ REXMPP_CONN_IN_PROGRESS,
+ /** All the connection attempts failed. */
+ REXMPP_CONN_FAILURE,
+ /** An unexpected error during connection. */
+ REXMPP_CONN_ERROR
+};
+
+typedef struct rexmpp_tcp_connection rexmpp_tcp_conn_t;
+
+/** @brief A connection establishment structure. */
+struct rexmpp_tcp_connection {
+ /** @brief A host we are connecting to. */
+ const char *host;
+ /** @brief A port we are connecting to. */
+ int port;
+
+ /** @brief Resolver channel. */
+ ares_channel resolver_channel;
+ /** @brief Resolver error is stored here when
+ ::REXMPP_CONN_RESOLVER_ERROR is returned. */
+ int resolver_error;
+
+ /** @brief State of A record resolution. */
+ enum rexmpp_tcp_conn_resolution_status resolution_v4;
+ /** @brief Status of A record resolution, as returned by the
+ resolver. */
+ int resolver_status_v4;
+ /** @brief AF_INET (IPv4) hostent structure. */
+ struct hostent *addr_v4;
+ /** @brief The AF_INET address number we are currently at. */
+ int addr_cur_v4;
+
+ /** @brief State of AAAA record resolution. */
+ enum rexmpp_tcp_conn_resolution_status resolution_v6;
+ /** @brief Status of AAAA record resolution, as returned by the
+ resolver. */
+ int resolver_status_v6;
+ /** @brief AF_INET6 (IPv6) hostent structure. */
+ struct hostent *addr_v6;
+ /** @brief The AF_INET6 address number we are currently at. */
+ int addr_cur_v6;
+
+ /** @brief Socket array, one for each connection attempt. */
+ int sockets[REXMPP_TCP_MAX_CONNECTION_ATTEMPTS];
+ /** @brief The number of connection attempts so far. */
+ int connection_attempts;
+
+ /** @brief Next scheduled connection time. */
+ struct timeval next_connection_time;
+ /** @brief File descriptor of a connected socket. */
+ int fd;
+};
+
+/**
+ @brief Initiates a connection.
+ @param[out] conn An allocated connection structure.
+ @param[in] host A host to connect to. This could be a domain name,
+ or a textual representation of an IPv4 or an IPv6 address.
+ @param[in] port A port to connect to.
+ @returns A ::rexmpp_tcp_conn_error state.
+*/
+rexmpp_tcp_conn_error_t
+rexmpp_tcp_conn_init (rexmpp_tcp_conn_t *conn,
+ const char *host,
+ int port);
+
+/**
+ @brief Continues a connection process.
+ @param[in,out] conn An active connection structure.
+ @param[in] read_fds File descriptors available for reading from.
+ @param[in] write_fds File descriptors available for writing to.
+ @returns A ::rexmpp_tcp_conn_error state.
+*/
+rexmpp_tcp_conn_error_t
+rexmpp_tcp_conn_proceed (rexmpp_tcp_conn_t *conn,
+ fd_set *read_fds,
+ fd_set *write_fds);
+
+/**
+ @brief Finalises a connection process.
+
+ Closes pending connections except for the established one, frees
+ additionally allocated resources.
+
+ Normally must be called on any state other than
+ ::REXMPP_CONN_IN_PROGRESS. The connection structure can be freed
+ after this.
+
+ @param[in,out] conn An active connection structure.
+ @returns A connected socket's file descriptor, or -1.
+ */
+int rexmpp_tcp_conn_finish (rexmpp_tcp_conn_t *conn);
+
+/**
+ @brief Reports file descriptors a connection process is interested in.
+
+ File descriptors are only added to an @c fd_set, so the ones it
+ already contains will not be lost.
+
+ @param[in] conn An active connection structure.
+ @param[out] read_fds File descriptors a connection process is
+ interested in reading from.
+ @param[out] write_fds File descriptors a connection process is
+ interested in writing to.
+ @returns Maximum file descriptor number, plus 1.
+ */
+int rexmpp_tcp_conn_fds (rexmpp_tcp_conn_t *conn,
+ fd_set *read_fds,
+ fd_set *write_fds);
+
+/**
+ @brief Reports timeouts.
+ @param[in] conn An active connection structure.
+ @param[in] max_tv An existing maximum timeout.
+ @param[out] tv A timeval structure to store a new timeout in.
+ @returns A pointer to either max_tv or tv, depending on which one
+ is smaller.
+*/
+struct timeval *rexmpp_tcp_conn_timeout (rexmpp_tcp_conn_t *conn,
+ struct timeval *max_tv,
+ struct timeval *tv);
+
+#endif