diff --git a/include/nat64/common/config.h b/include/nat64/common/config.h index 194a5b6dd..7f983ebd5 100644 --- a/include/nat64/common/config.h +++ b/include/nat64/common/config.h @@ -63,6 +63,8 @@ enum config_mode { MODE_JOOLD = (1 << 10), MODE_INSTANCE = (1 << 11), + /** The current message is talking about customer pool. */ + MODE_CUSTOMER = (1 << 12), }; char *configmode_to_string(enum config_mode mode); @@ -86,6 +88,7 @@ char *configmode_to_string(enum config_mode mode); #define JOOLD_OPS (OP_ADVERTISE | OP_TEST) #define LOGTIME_OPS (OP_DISPLAY) #define INSTANCE_OPS (OP_ADD | OP_REMOVE) +#define CUSTOMER_OPS (OP_DISPLAY | OP_ADD | OP_REMOVE | OP_FLUSH ) /** * @} */ @@ -131,7 +134,8 @@ enum parse_section { SEC_EAMT = 32, SEC_BLACKLIST = 64, SEC_POOL6791 = 128, - SEC_INIT = 256 + SEC_INIT = 256, + SEC_CUSTOMER = 512 }; /** @@ -139,7 +143,8 @@ enum parse_section { * Allowed modes for the operation mentioned in the name. * eg. DISPLAY_MODES = Allowed modes for display operations. */ -#define POOL_MODES (MODE_POOL6 | MODE_POOL4 | MODE_BLACKLIST | MODE_RFC6791) +#define POOL_MODES (MODE_POOL6 | MODE_POOL4 | MODE_BLACKLIST | MODE_RFC6791 \ + | MODE_CUSTOMER) #define TABLE_MODES (MODE_EAMT | MODE_BIB | MODE_SESSION) #define ANY_MODE 0xFFFF @@ -154,7 +159,7 @@ enum parse_section { | MODE_EAMT | MODE_LOGTIME | MODE_PARSE_FILE | MODE_INSTANCE) #define NAT64_MODES (MODE_GLOBAL | MODE_POOL6 | MODE_POOL4 | MODE_BIB \ | MODE_SESSION | MODE_LOGTIME | MODE_PARSE_FILE \ - | MODE_INSTANCE | MODE_JOOLD) + | MODE_INSTANCE | MODE_JOOLD | MODE_CUSTOMER) /** * @} */ @@ -320,6 +325,39 @@ union request_eamt { } flush; }; +struct customer_entry_usr { + struct ipv6_prefix prefix6; + __u8 groups6_size_len; + struct ipv4_prefix prefix4; + __u8 ports_division_len; + struct port_range ports; +}; + +/** + * Configuration for the "customer" module. + */ +union request_customer { + struct { + /* Nothing needed here. */ + } display; + struct customer_entry_usr add; + struct { + /** + * Whether the BIB and the sessions tables should also be + * cleared (false) or not (true). + */ + config_bool quick; + } rm; + struct { + /** + * Whether the BIB and the sessions tables should also be + * cleared (false) or not (true). + */ + config_bool quick; + } flush; +}; + + /** * Configuration for the "Log time" module. */ diff --git a/include/nat64/mod/common/nl/customer.h b/include/nat64/mod/common/nl/customer.h new file mode 100644 index 000000000..6a6413106 --- /dev/null +++ b/include/nat64/mod/common/nl/customer.h @@ -0,0 +1,9 @@ +#ifndef __NL_CUSTOMER_H__ +#define __NL_CUSTOMER_H__ + +#include +#include "nat64/mod/common/xlator.h" + +int handle_customer_config(struct xlator *jool, struct genl_info *info); + +#endif diff --git a/include/nat64/mod/stateful/pool4/customer.h b/include/nat64/mod/stateful/pool4/customer.h new file mode 100644 index 000000000..50c2fef9c --- /dev/null +++ b/include/nat64/mod/stateful/pool4/customer.h @@ -0,0 +1,79 @@ +#ifndef __JOOL_MOD_POOL4_CUSTOMER_H_ +#define __JOOL_MOD_POOL4_CUSTOMER_H_ + +#include +#include "nat64/mod/common/types.h" +#include "nat64/mod/common/config.h" + +struct customer_table { + /** IPv6 addresses that use this customer table. */ + struct ipv6_prefix prefix6; + /** Number of bits of 'prefix6' which represent the subnetwork. */ + __u8 groups6_size_len; + + /** Pool4 for this table. */ + struct ipv4_prefix prefix4; + /** Hop size that divide the ports range for every IPv6 subnetwork + * in CIDR format. */ + __u8 ports_division_len; + + struct port_range ports; + + /** Port range size "ports" in CIDR format, for bitwise operations. */ + unsigned short ports_size_len; +}; + + +bool customer_table_contains(struct customer_table *table, struct in6_addr *src6); + +/** + * Obtain the total count of ports from this customer. + * (i.e. IPv4 prefix count * port range count ) + */ +__u32 customer_table_get_total_ports_size(struct customer_table *table); + +/** + * Indicates which IPv6 group the address belongs to. + */ +__u16 customer_table_get_group_by_addr(struct customer_table *table, + struct in6_addr *src6); + +/** + * Indicates the available port size for each IPv6 group. + */ +__u32 customer_table_get_group_ports_size(struct customer_table *table); + +/** + * Number of contiguous ports to be used as requested by the user + * for each IPv6 group. + */ +__u16 customer_table_get_port_range_hop(struct customer_table *table); + +/** + * Initial port number for the IPv6 group 'group', + * you can add an offset so that the initial port is different for each + * network request. + */ +__u32 customer_get_group_first_port(struct customer_table *table, + unsigned int offset, __u16 group, __u16 port_hop); + +/** + * Ports hope size for the following range of available ports for an IPv6 group. + */ +__u32 customer_table_get_group_ports_hop(struct customer_table *table); + +/** + * Number of IPv6 addresses for each IPv6 group. + */ +__u32 customer_table_get_group_size(struct customer_table *table); + +/** + * Same as the port_range_count(ports) but + * for bitwise operations (1 << port_mask). + * + * @return port_mask + */ +unsigned short customer_table_get_ports_mask(struct customer_table *table); + +void customer_table_put(struct customer_table *customer); +#endif /* __JOOL_MOD_POOL4_CUSTOMER_H_ */ diff --git a/include/nat64/mod/stateful/pool4/db.h b/include/nat64/mod/stateful/pool4/db.h index 8eb538804..971bf7377 100644 --- a/include/nat64/mod/stateful/pool4/db.h +++ b/include/nat64/mod/stateful/pool4/db.h @@ -11,6 +11,7 @@ #include "nat64/mod/common/types.h" #include "nat64/mod/common/config.h" #include "nat64/mod/common/route.h" +#include "nat64/mod/stateful/pool4/customer.h" struct pool4; @@ -30,6 +31,13 @@ int pool4db_rm(struct pool4 *pool, const __u32 mark, enum l4_protocol proto, int pool4db_rm_usr(struct pool4 *pool, struct pool4_entry_usr *entry); void pool4db_flush(struct pool4 *pool); +int customerdb_foreach(struct pool4 *pool, + int (*cb)(struct customer_table *, void *), void *arg); +int customerdb_add(struct pool4 *pool, const struct customer_entry_usr *entry); +void customerdb_flush(struct pool4 *pool, struct ipv4_range *range_removed, + int *error); +int customerdb_rm(struct pool4 *pool, struct ipv4_range *range_removed); + /* * Read functions (Legal to use anywhere) */ @@ -50,6 +58,7 @@ int mask_domain_next(struct mask_domain *masks, bool *consecutive); bool mask_domain_matches(struct mask_domain *masks, struct ipv4_transport_addr *addr); +bool mask_domain_is_customer(struct mask_domain *masks); bool mask_domain_is_dynamic(struct mask_domain *masks); __u32 mask_domain_get_mark(struct mask_domain *masks); diff --git a/include/nat64/usr/argp/options.h b/include/nat64/usr/argp/options.h index 2fc6ed122..9359d7ada 100644 --- a/include/nat64/usr/argp/options.h +++ b/include/nat64/usr/argp/options.h @@ -30,6 +30,7 @@ enum argp_flags { ARGP_GLOBAL = 'g', ARGP_PARSE_FILE = 'p', ARGP_INSTANCE = 7001, + ARGP_CUSTOMER = 7003, /* Operations */ ARGP_DISPLAY = 'd', diff --git a/include/nat64/usr/customer.h b/include/nat64/usr/customer.h new file mode 100644 index 000000000..f42a25fdc --- /dev/null +++ b/include/nat64/usr/customer.h @@ -0,0 +1,12 @@ +#ifndef _JOOL_USR_CUSTOMER_H +#define _JOOL_USR_CUSTOMER_H + +#include "nat64/common/config.h" +#include "nat64/usr/types.h" + +int customer_display(display_flags flags); +int customer_add(struct customer_entry_usr *entry); +int customer_rm(bool quick); +int customer_flush(bool quick); + +#endif /* _JOOL_USR_CUSTOMER_H */ diff --git a/include/nat64/usr/global.h b/include/nat64/usr/global.h index 9860eeb05..8164c3270 100644 --- a/include/nat64/usr/global.h +++ b/include/nat64/usr/global.h @@ -8,6 +8,7 @@ #define OPTNAME_GLOBAL "global" #define OPTNAME_POOL6 "pool6" #define OPTNAME_POOL4 "pool4" +#define OPTNAME_CUSTOMER "customer" #define OPTNAME_BLACKLIST "blacklist" #define OPTNAME_RFC6791 "pool6791" #define OPTNAME_EAMT "eamt" diff --git a/include/nat64/usr/str_utils.h b/include/nat64/usr/str_utils.h index 4559df81b..139448edf 100644 --- a/include/nat64/usr/str_utils.h +++ b/include/nat64/usr/str_utils.h @@ -60,6 +60,9 @@ int str_to_addr6_port(const char *str, struct ipv6_transport_addr *out); int str_to_prefix6(const char *str, struct ipv6_prefix *out); int str_to_prefix4(const char *str, struct ipv4_prefix *out); +int str_to_customer_prefix6(const char *str, struct ipv6_prefix *prefix_out, __u8 *group_size_len); +int str_to_customer_prefix4(const char *str, struct ipv4_prefix *prefix_out, __u8 *port_len); + /** * Prints the @millis amount of milliseconds as spreadsheet-friendly format in * the console. diff --git a/mod/common/nl/customer.c b/mod/common/nl/customer.c new file mode 100644 index 000000000..941ed2074 --- /dev/null +++ b/mod/common/nl/customer.c @@ -0,0 +1,117 @@ +#include "nat64/mod/common/nl/customer.h" + +#include "nat64/mod/common/nl/nl_common.h" +#include "nat64/mod/common/nl/nl_core2.h" +#include "nat64/mod/stateful/pool4/customer.h" +#include "nat64/mod/stateful/pool4/db.h" +#include "nat64/mod/stateful/bib/db.h" + +static int customer_table_to_usr(struct customer_table *table, void *arg) +{ + return nlbuffer_write(arg, table, sizeof(*table)); +} + +static int handle_customer_display(struct pool4 *pool, struct genl_info *info, + union request_customer *request) +{ + struct nlcore_buffer buffer; + int error = 0; + + log_debug("Sending customer table to userspace."); + + error = nlbuffer_init_response(&buffer, info, nlbuffer_response_max_size()); + if (error) + return nlcore_respond(info, error); + + error = customerdb_foreach(pool, customer_table_to_usr, &buffer); + nlbuffer_set_pending_data(&buffer, error > 0); + error = (error >= 0) + ? nlbuffer_send(info, &buffer) + : nlcore_respond(info, error); + + nlbuffer_free(&buffer); + return error; +} + +static int handle_customer_add(struct pool4 *pool, struct genl_info *info, + union request_customer *request) +{ + if (verify_superpriv()) + return nlcore_respond(info, -EPERM); + + log_debug("Adding elements to customer table."); + return nlcore_respond(info, customerdb_add(pool, &request->add)); +} + +static int handle_customer_rm(struct xlator *jool, struct genl_info *info, + union request_customer *request) +{ + struct ipv4_range range; + int error; + + if (verify_superpriv()) + return nlcore_respond(info, -EPERM); + + log_debug("Removing elements from customer table."); + + error = customerdb_rm(jool->nat64.pool4, &range); + + if (!error && xlat_is_nat64() && !request->rm.quick) { + bib_rm_range(jool->nat64.bib, L4PROTO_TCP, &range); + bib_rm_range(jool->nat64.bib, L4PROTO_ICMP, &range); + bib_rm_range(jool->nat64.bib, L4PROTO_UDP, &range); + } + + return nlcore_respond(info, error); +} + +static int handle_customer_flush(struct xlator *jool, struct genl_info *info, + union request_customer *request) +{ + struct ipv4_range range; + int error; + + if (verify_superpriv()) + return nlcore_respond(info, -EPERM); + + log_debug("Flushing customer table."); + + customerdb_flush(jool->nat64.pool4, &range, &error); + if (!error && xlat_is_nat64() && !request->flush.quick) { + bib_rm_range(jool->nat64.bib, L4PROTO_TCP, &range); + bib_rm_range(jool->nat64.bib, L4PROTO_ICMP, &range); + bib_rm_range(jool->nat64.bib, L4PROTO_UDP, &range); + } + + return nlcore_respond(info, 0); +} + +int handle_customer_config(struct xlator *jool, struct genl_info *info) +{ + struct request_hdr *hdr = get_jool_hdr(info); + union request_customer *request = (union request_customer *)(hdr + 1); + int error; + + if (xlat_is_siit()) { + log_err("SIIT doesn't have customer."); + return nlcore_respond(info, -EINVAL); + } + + error = validate_request_size(info, sizeof(*request)); + if (error) + return nlcore_respond(info, error); + + switch (be16_to_cpu(hdr->operation)) { + case OP_DISPLAY: + return handle_customer_display(jool->nat64.pool4, info, request); + case OP_ADD: + return handle_customer_add(jool->nat64.pool4, info, request); + case OP_REMOVE: + return handle_customer_rm(jool, info, request); + case OP_FLUSH: + return handle_customer_flush(jool, info, request); + } + + log_err("Unknown operation: %u", be16_to_cpu(hdr->operation)); + return nlcore_respond(info, -EINVAL); +} diff --git a/mod/common/nl/nl_handler2.c b/mod/common/nl/nl_handler2.c index ebb32790a..7da036cbf 100644 --- a/mod/common/nl/nl_handler2.c +++ b/mod/common/nl/nl_handler2.c @@ -20,6 +20,7 @@ #include "nat64/mod/common/nl/pool4.h" #include "nat64/mod/common/nl/pool6.h" #include "nat64/mod/common/nl/session.h" +#include "nat64/mod/common/nl/customer.h" static struct genl_multicast_group mc_groups[1] = { { @@ -118,6 +119,8 @@ static int multiplex_request(struct xlator *jool, struct genl_info *info) return handle_joold_request(jool, info); case MODE_INSTANCE: return handle_instance_request(info); + case MODE_CUSTOMER: + return handle_customer_config(jool, info); } log_err("Unknown configuration mode: %d", be16_to_cpu(hdr->mode)); diff --git a/mod/stateful/Kbuild b/mod/stateful/Kbuild index 7c6e97853..e3fd61bc1 100644 --- a/mod/stateful/Kbuild +++ b/mod/stateful/Kbuild @@ -47,8 +47,10 @@ jool_common += ../common/nl/nl_common.o jool_common += ../common/nl/pool4.o jool_common += ../common/nl/pool6.o jool_common += ../common/nl/session.o +jool_common += ../common/nl/customer.o jool += pool4/empty.o +jool += pool4/customer.o jool += pool4/db.o jool += pool4/rfc6056.o diff --git a/mod/stateful/bib/db.c b/mod/stateful/bib/db.c index ea554d50f..05d018201 100644 --- a/mod/stateful/bib/db.c +++ b/mod/stateful/bib/db.c @@ -1089,7 +1089,8 @@ static void commit_add6(struct bib_table *table, struct bib_session_tuple *new, struct slot_group *slots, struct expire_timer *expirer, - struct bib_session *result) + struct bib_session *result, + bool is_customer) { new->session->bib = old->bib ? : new->bib; commit_session_add(table, &slots->session); @@ -1100,7 +1101,8 @@ static void commit_add6(struct bib_table *table, if (!old->bib) { commit_bib_add(table, slots); - log_new_bib(table, new->bib); + if (!is_customer) + log_new_bib(table, new->bib); new->bib = NULL; /* Do not free! */ } } @@ -1284,6 +1286,7 @@ static int find_available_mask(struct bib_table *table, bool consecutive; int error; + bib->src4.l3.s_addr = 0U; /* * We're going to assume the masks are generally consecutive. * I think it's a fair assumption until someone requests otherwise as a @@ -1412,7 +1415,8 @@ static int upgrade_pktqueue_session(struct bib_table *table, pktqueue_put_node(sos); - log_new_bib(table, bib); + if (!mask_domain_is_customer(masks)) + log_new_bib(table, bib); log_new_session(table, session); return 0; @@ -1527,6 +1531,12 @@ static int find_bib_session6(struct bib_table *table, if (error) { if (WARN(error != -ENOENT, "Unknown error: %d", error)) return error; + + if (mask_domain_is_customer(masks)) { + log_warn_once("I'm running out of pool4 addresses for customer."); + return error; + } + /* * TODO the rate limit might be a bit of a problem. * If both mark 0 and mark 1 are running out of @@ -1621,7 +1631,8 @@ int bib_add6(struct bib *db, } /* New connection; add the session. (And maybe the BIB entry as well) */ - commit_add6(table, &old, &new, &slots, &table->est_timer, result); + commit_add6(table, &old, &new, &slots, &table->est_timer, result, + mask_domain_is_customer(masks)); /* Fall through */ end: @@ -1759,7 +1770,8 @@ verdict bib_add_tcp6(struct bib *db, /* All exits up till now require @new.* to be deleted. */ - commit_add6(table, &old, &new, &slots, &table->trans_timer, result); + commit_add6(table, &old, &new, &slots, &table->trans_timer, result, + mask_domain_is_customer(masks)); verdict = VERDICT_CONTINUE; /* Fall through */ diff --git a/mod/stateful/pool4/customer.c b/mod/stateful/pool4/customer.c new file mode 100644 index 000000000..f9cc09c03 --- /dev/null +++ b/mod/stateful/pool4/customer.c @@ -0,0 +1,105 @@ +#include "nat64/mod/stateful/pool4/customer.h" + +#include "nat64/common/types.h" +#include "nat64/mod/common/wkmalloc.h" +#include "nat64/mod/common/address.h" + +bool customer_table_contains(struct customer_table *table, struct in6_addr *src6) +{ + return prefix6_contains(&table->prefix6, src6); +} + +__u32 customer_table_get_total_ports_size(struct customer_table *table) +{ + return (__u32)((prefix4_get_addr_count(&table->prefix4)) << table->ports_size_len); +} + +__u16 customer_table_get_group_by_addr(struct customer_table *table, + struct in6_addr *addr) +{ + __u16 group = 0U; + __u16 bit_counter; + for (bit_counter = 0U; bit_counter < (table->groups6_size_len - table->prefix6.len); + bit_counter++) { + if (!addr6_get_bit(addr, table->groups6_size_len - 1U + bit_counter)) + continue; + + group |= (((__u16) 1U) << bit_counter); + } + + return group; +} + +__u32 customer_table_get_group_ports_size(struct customer_table *table) +{ + return customer_table_get_total_ports_size(table) + >> (table->groups6_size_len - table->prefix6.len); +} + +__u16 customer_table_get_port_range_hop(struct customer_table *table) +{ + return ((__u16) 1U) << (32 - table->ports_division_len); +} + +__u32 customer_get_group_first_port(struct customer_table *table, + unsigned int offset, __u16 group, __u16 port_hop) +{ + __u32 total_ports_size; + __u32 division_result; + __u32 offset_group_result; + __u32 port_hop_backward; + + total_ports_size = customer_table_get_total_ports_size(table); + + if (offset >= total_ports_size) { + offset = offset % total_ports_size; + } + + if (offset < port_hop) { + return group << (32 - table->ports_division_len); + } + + division_result = offset >> (32 - table->ports_division_len); + offset_group_result = division_result + & (customer_table_get_group_size(table) - 1); + + if (offset_group_result == group) + return division_result << (32 - table->ports_division_len); + + if (group < offset_group_result) + port_hop_backward = (offset_group_result - group) << (32 - table->ports_division_len); + else + port_hop_backward = (customer_table_get_group_size(table) + - (group - offset_group_result)) << (32 - table->ports_division_len); + + return (division_result << (32 - table->ports_division_len)) - port_hop_backward; +} + +__u32 customer_table_get_group_ports_hop(struct customer_table *table) +{ + return customer_table_get_group_size(table) << (32 - table->ports_division_len); +} + +__u32 customer_table_get_group_size(struct customer_table *table) +{ + return ((__u32) 1U) << (table->groups6_size_len - table->prefix6.len); +} + +unsigned short customer_table_get_ports_mask(struct customer_table *table) +{ + unsigned short result; + unsigned int range_count; + range_count = port_range_count(&table->ports); + for (result = 1; result < 16; result ++) { + if ((range_count >> result) == 1) + break; + } + + return result; + +} + +void customer_table_put(struct customer_table *customer) +{ + __wkfree("customer_table", customer); +} diff --git a/mod/stateful/pool4/db.c b/mod/stateful/pool4/db.c index 624088580..7a5cb3afd 100644 --- a/mod/stateful/pool4/db.c +++ b/mod/stateful/pool4/db.c @@ -9,6 +9,7 @@ #include "nat64/mod/common/wkmalloc.h" #include "nat64/mod/stateful/pool4/empty.h" #include "nat64/mod/stateful/pool4/rfc6056.h" +#include "nat64/mod/stateful/pool4/customer.h" /* * pool4 (struct pool4) is made out of two tree groups (struct pool4_trees). @@ -91,17 +92,17 @@ struct pool4 { /** Entries indexed via address. (Normally used in 4->6) */ struct pool4_trees tree_addr; + struct customer_table *customer; + spinlock_t lock; struct kref refcounter; }; -struct mask_domain { +struct mask_args { __u32 pool_mark; unsigned int taddr_count; unsigned int taddr_counter; - /* ITERATIONS_INFINITE is represented by this being zero. */ - unsigned int max_iterations; unsigned int range_count; struct pool4_range *current_range; @@ -125,6 +126,51 @@ struct mask_domain { */ }; +struct customer_args { + /** Pool4. */ + struct ipv4_prefix prefix4; + struct port_range ports; + + /** + * The sum of all the ports of each IPv4 in this customer. + */ + __u32 total_table_taddr; + /** + * Current port within the sum of all available ports. + */ + __u32 current_taddr; + + __u32 taddr_counter; + /** + * Number of ports available for this mask_domain. + */ + __u32 taddr_count; + + __u16 block_port_range_counter; + /** + * Number of ports of the current port range. + */ + __u16 block_port_range_count; + + /** + * Ports hop size if the current port range has reached its limit. + */ + __u32 port_hop; + + /** + * IPv6 group number that requested this mask domain. + */ + __u16 group; + + /** + * Port range size "ports" in CIDR format. + * + * mainly used as the port_range_count(ports) but + * for bitwise operations (1 << port_mask). + */ + unsigned short port_mask; +}; + /** * Assumes @domain has at least one entry. */ @@ -133,6 +179,31 @@ struct mask_domain { entry < first_domain_entry(domain) + domain->range_count; \ entry++) +struct mask_domain { + /* ITERATIONS_INFINITE is represented by this being zero. */ + unsigned int max_iterations; + + /** + * true if this mask is a copy of a customer table, otherwise this mask + * represents a copy of pool4 table. + */ + bool is_customer; + + /* + * An array of struct mask_args or customer_args hangs off here. + */ +}; + +static struct customer_args *get_customer_args(struct mask_domain *mask) +{ + return (struct customer_args *)(mask + 1); +} + +static struct mask_args *get_mask_args(struct mask_domain *mask) +{ + return (struct mask_args *)(mask + 1); +} + static struct rb_root *get_tree(struct pool4_trees *trees, l4_protocol proto) { switch (proto) { @@ -206,7 +277,7 @@ static struct pool4_range *last_table_entry(struct pool4_table *table) return first_table_entry(table) + table->sample_count - 1; } -static struct pool4_range *first_domain_entry(struct mask_domain *domain) +static struct pool4_range *first_domain_entry(struct mask_args *domain) { return (struct pool4_range *)(domain + 1); } @@ -251,6 +322,7 @@ int pool4db_init(struct pool4 **pool) spin_lock_init(&result->lock); kref_init(&result->refcounter); + result->customer = NULL; *pool = result; return 0; } @@ -287,6 +359,10 @@ static void release(struct kref *refcounter) struct pool4 *pool; pool = container_of(refcounter, struct pool4, refcounter); clear_trees(pool); + if (pool->customer){ + customer_table_put(pool->customer); + pool->customer = NULL; + } wkfree(struct pool4, pool); } @@ -504,6 +580,12 @@ static int add_to_addr_tree(struct pool4 *pool, return 0; } +static bool port_range_intersects(const struct port_range *r1, + const struct port_range *r2) +{ + return r1->max >= (r2->min) && r1->min <= (r2->max); +} + int pool4db_add(struct pool4 *pool, const struct pool4_entry_usr *entry) { struct pool4_range addend = { .ports = entry->range.ports }; @@ -523,6 +605,21 @@ int pool4db_add(struct pool4 *pool, const struct pool4_entry_usr *entry) if (addend.ports.min == 0) addend.ports.min = 1; + spin_lock_bh(&pool->lock); + if (pool->customer + && prefix4_intersects(&entry->range.prefix, &pool->customer->prefix4) + && port_range_intersects(&addend.ports, &pool->customer->ports)) { + log_err("Pool4 '%pI4 %u-%u' intersects with Customer: '%pI4/%u %u-%u'", + &entry->range.prefix.address, addend.ports.min, addend.ports.max, + &pool->customer->prefix4.address, pool->customer->prefix4.len, + pool->customer->ports.min, pool->customer->ports.max); + error = -EEXIST; + } + + spin_unlock_bh(&pool->lock); + if (error) + return error; + /* log_debug("Adding range:%pI4/%u %u-%u", &range->prefix.address, range->prefix.len, range->ports.min, range->ports.max); */ @@ -843,11 +940,20 @@ static struct pool4_range *find_port_range(struct pool4_table *entry, __u16 port bool pool4db_contains(struct pool4 *pool, struct net *ns, l4_protocol proto, struct ipv4_transport_addr *addr) { + struct customer_table *customer; struct pool4_table *table; bool found = false; spin_lock_bh(&pool->lock); + customer = pool->customer; + if (customer) + found = prefix4_contains(&customer->prefix4, &addr->l3) + && port_range_contains(&customer->ports, addr->l4); + + if (found) + goto end; + if (is_empty(pool)) { spin_unlock_bh(&pool->lock); return pool4empty_contains(ns, addr); @@ -857,6 +963,7 @@ bool pool4db_contains(struct pool4 *pool, struct net *ns, l4_protocol proto, if (table) found = find_port_range(table, addr->l4) != NULL; +end: spin_unlock_bh(&pool->lock); return found; } @@ -1073,28 +1180,142 @@ static struct mask_domain *find_empty(struct route4_args *args, unsigned int offset) { struct mask_domain *masks; + struct mask_args *margs; struct pool4_range *range; masks = __wkmalloc("mask_domain", - sizeof(struct mask_domain) * sizeof(struct pool4_range), - GFP_ATOMIC); + sizeof(struct mask_domain) + sizeof(struct mask_args) + + sizeof(struct pool4_range), GFP_ATOMIC); if (!masks) return NULL; - range = (struct pool4_range *)(masks + 1); + margs = get_mask_args(masks); + + range = (struct pool4_range *)(margs + 1); if (pool4empty_find(args, range)) { __wkfree("mask_domain", masks); return NULL; } - masks->pool_mark = 0; - masks->taddr_count = port_range_count(&range->ports); - masks->taddr_counter = 0; masks->max_iterations = 0; - masks->range_count = 1; - masks->current_range = range; - masks->current_port = range->ports.min + offset % masks->taddr_count; - masks->dynamic = true; + masks->is_customer = false; + + margs->pool_mark = 0; + margs->taddr_count = port_range_count(&range->ports); + margs->taddr_counter = 0; + margs->range_count = 1; + margs->current_range = range; + margs->current_port = range->ports.min + offset % margs->taddr_count; + margs->dynamic = true; + return masks; +} + + + + +static unsigned int compute_max_iterations_for_customer(struct customer_args *customer) +{ + unsigned int result; + + /* + * The following heuristics are based on a few tests I ran. Keep in mind + * that none of them are imposed on the user; they only define the + * default value. + * + * The tests were as follows: + * + * For every one of the following pool4 sizes (in transport addresses), + * I exhausted the corresponding pool4 16 times and kept track of how + * many iterations I needed to allocate a connection in every step. + * + * The sizes were 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, + * 131072, 196608, 262144, 327680, 393216, 458752, 524288, 1048576 and + * 2097152. + * + * I did not test larger pool4s because I feel like more than 32 full + * addresses is rather pushing it and would all the likely require + * manual intervention anyway. + */ + + /* + * Right shift 7 is the same as division by 128. Why 128? + * Because I want roughly 1% of the total size, and also don't want any + * floating point arithmetic. + * Integer division by 100 would be acceptable, but this is faster. + * (Recall that we have a spinlock held.) + */ + result = customer->taddr_count >> 7; + + /* + * If the limit is too big, the NAT64 will iterate too much. + * If the limit is too small, the NAT64 will start losing connections + * early. + * + * So first of all, prevent the algorithm from iterating too little. + * + * (The values don't have to be powers or multiples of anything; I just + * really like base 8.) + * + * A result lower than 1024 will be yielded when pool4 is 128k ports + * large or smaller, so the following paragraph (and conditional) only + * applies to this range: + * + * In all of the tests I ran, a max iterations of 1024 would have been a + * reasonable limit that would have completely prevented the NAT64 from + * dropping *any* connections, at least until pool4 was about 91% + * exhausted. (Connections can be technically dropped regardless, but + * it's very unlikely.) Also, on average, most connections would have + * kept succeeding until pool4 reached 98% utilization. + */ + if (result < 1024) + return 1024; + /* + * And finally: Prevent the algorithm from iterating too much. + * + * 8k will begin dropping connections at 95% utilization and most + * connections will remain on the success side until 99% exhaustion. + */ + if (result > 8192) + return 8192; + return result; + + +} + +static struct mask_domain *create_customer_mask(struct customer_table *customer, + unsigned int offset, struct in6_addr *src) +{ + struct mask_domain *masks; + struct customer_args *args; + + masks = __wkmalloc("mask_domain", sizeof(struct mask_domain) + + sizeof(struct customer_args), GFP_ATOMIC); + if (!masks) + return NULL; + + args = (struct customer_args *)(masks + 1); + + args->prefix4 = customer->prefix4; + args->ports = customer->ports; + + args->total_table_taddr = customer_table_get_total_ports_size(customer); + args->group = customer_table_get_group_by_addr(customer, src); + args->taddr_counter = 0U; + args->taddr_count = customer_table_get_group_ports_size(customer); + + args->block_port_range_counter = 0U; + args->block_port_range_count = customer_table_get_port_range_hop(customer); + + args->port_hop = customer_table_get_group_ports_hop(customer); + + args->port_mask = customer->ports_size_len; + args->current_taddr = customer_get_group_first_port(customer, offset, + args->group, args->port_hop); + + + masks->max_iterations = compute_max_iterations_for_customer(args); + masks->is_customer = true; + return masks; } @@ -1104,6 +1325,7 @@ struct mask_domain *mask_domain_find(struct pool4 *pool, struct tuple *tuple6, struct pool4_table *table; struct pool4_range *entry; struct mask_domain *masks; + struct mask_args *margs; unsigned int offset; if (rfc6056_f(tuple6, f_args, &offset)) @@ -1111,6 +1333,12 @@ struct mask_domain *mask_domain_find(struct pool4 *pool, struct tuple *tuple6, spin_lock_bh(&pool->lock); + if (pool->customer && customer_table_contains(pool->customer, &tuple6->src.addr6.l3)) { + masks = create_customer_mask(pool->customer, offset, &tuple6->src.addr6.l3); + spin_unlock_bh(&pool->lock); + return masks; + } + if (is_empty(pool)) { spin_unlock_bh(&pool->lock); return find_empty(route_args, offset); @@ -1122,28 +1350,32 @@ struct mask_domain *mask_domain_find(struct pool4 *pool, struct tuple *tuple6, goto fail; masks = __wkmalloc("mask_domain", sizeof(struct mask_domain) + + sizeof(struct mask_args) + table->sample_count * sizeof(struct pool4_range), GFP_ATOMIC); if (!masks) goto fail; - memcpy(masks + 1, table + 1, + margs = get_mask_args(masks); + + memcpy(margs + 1, table + 1, table->sample_count * sizeof(struct pool4_range)); - masks->taddr_count = table->taddr_count; masks->max_iterations = compute_max_iterations(table); - masks->range_count = table->sample_count; + margs->taddr_count = table->taddr_count; + margs->range_count = table->sample_count; spin_unlock_bh(&pool->lock); - masks->pool_mark = route_args->mark; - masks->taddr_counter = 0; - masks->dynamic = false; - offset %= masks->taddr_count; + margs->pool_mark = route_args->mark; + margs->taddr_counter = 0; + margs->dynamic = false; + masks->is_customer = false; + offset %= margs->taddr_count; - foreach_domain_range(entry, masks) { + foreach_domain_range(entry, margs) { if (offset <= port_range_count(&entry->ports)) { - masks->current_range = entry; - masks->current_port = entry->ports.min + offset - 1; + margs->current_range = entry; + margs->current_port = entry->ports.min + offset - 1; return masks; /* Happy path */ } offset -= port_range_count(&entry->ports); @@ -1163,39 +1395,102 @@ void mask_domain_put(struct mask_domain *masks) __wkfree("mask_domain", masks); } +static int customer_mask_next(struct mask_domain *masks, + struct ipv4_transport_addr *addr, bool *consecutive) +{ + struct customer_args *args = get_customer_args(masks); + __u32 ip_hop; + __u32 port; + __be32 old_addr; + + args->taddr_counter++; + args->block_port_range_counter++; + if (args->taddr_counter > args->taddr_count) + return -ENOENT; + if (masks->max_iterations) + if (args->taddr_counter > masks->max_iterations) + return -ENOENT; + + if (args->block_port_range_counter >= args->block_port_range_count) { + args->block_port_range_counter = 0U; + args->current_taddr += args->port_hop; + if (args->current_taddr >= args->total_table_taddr) + args->current_taddr = args->group * args->block_port_range_count; + } + + ip_hop = (args->current_taddr + args->block_port_range_counter) + >> args->port_mask; + port = ((args->current_taddr + args->block_port_range_counter) + % ((__u32)1U << args->port_mask)) + args->ports.min; + + old_addr = addr->l3.s_addr; + addr->l3 = args->prefix4.address; + addr->l4 = (__u16) port; + + if (ip_hop) + be32_add_cpu(&addr->l3.s_addr, ip_hop); + + *consecutive = addr->l3.s_addr == old_addr; + + return 0; +} + int mask_domain_next(struct mask_domain *masks, struct ipv4_transport_addr *addr, bool *consecutive) { - masks->taddr_counter++; - if (masks->taddr_counter > masks->taddr_count) + struct mask_args *margs; + + if (mask_domain_is_customer(masks)) + return customer_mask_next(masks, addr, consecutive); + + margs = get_mask_args(masks); + + margs->taddr_counter++; + if (margs->taddr_counter > margs->taddr_count) return -ENOENT; if (masks->max_iterations) - if (masks->taddr_counter > masks->max_iterations) + if (margs->taddr_counter > masks->max_iterations) return -ENOENT; - masks->current_port++; - if (masks->current_port > masks->current_range->ports.max) { + margs->current_port++; + if (margs->current_port > margs->current_range->ports.max) { *consecutive = false; - masks->current_range++; - if (masks->current_range >= first_domain_entry(masks) + masks->range_count) - masks->current_range = first_domain_entry(masks); - masks->current_port = masks->current_range->ports.min; + margs->current_range++; + if (margs->current_range >= first_domain_entry(margs) + margs->range_count) + margs->current_range = first_domain_entry(margs); + margs->current_port = margs->current_range->ports.min; } else { - *consecutive = (masks->taddr_counter != 1); + *consecutive = (margs->taddr_counter != 1); } - addr->l3 = masks->current_range->addr; - addr->l4 = masks->current_port; + addr->l3 = margs->current_range->addr; + addr->l4 = margs->current_port; return 0; } + +static bool customer_mask_matches(struct mask_domain *masks, + struct ipv4_transport_addr *addr) +{ + struct customer_args* customer = get_customer_args(masks); + + return prefix4_contains(&customer->prefix4, &addr->l3) + && port_range_contains(&customer->ports, addr->l4); +} + bool mask_domain_matches(struct mask_domain *masks, struct ipv4_transport_addr *addr) { + struct mask_args *margs; struct pool4_range *entry; - foreach_domain_range(entry, masks) { + if (mask_domain_is_customer(masks)) + return customer_mask_matches(masks, addr); + + margs = get_mask_args(masks); + + foreach_domain_range(entry, margs) { if (entry->addr.s_addr != addr->l3.s_addr) continue; if (port_range_contains(&entry->ports, addr->l4)) @@ -1205,12 +1500,222 @@ bool mask_domain_matches(struct mask_domain *masks, return false; } +bool mask_domain_is_customer(struct mask_domain *masks) +{ + return masks->is_customer; +} + bool mask_domain_is_dynamic(struct mask_domain *masks) { - return masks->dynamic; + return !mask_domain_is_customer(masks) && get_mask_args(masks)->dynamic; } __u32 mask_domain_get_mark(struct mask_domain *masks) { - return masks->pool_mark; + return get_mask_args(masks)->pool_mark; +} + +int customerdb_foreach(struct pool4 *pool, + int (*cb)(struct customer_table *, void *), void *arg) +{ + struct customer_table table; + int error = 0; + bool is_table_set = false; + + spin_lock_bh(&pool->lock); + if (pool->customer) { + table = *(pool->customer); + is_table_set = true; + } + spin_unlock_bh(&pool->lock); + + if (is_table_set) + error = cb(&table, arg); + + return error; +} + +static int validate_customer_entry_usr(const struct customer_entry_usr *entry) +{ + int error; + __u32 port_count; + error = prefix4_validate(&entry->prefix4); + if (error) + return error; + + error = prefix6_validate(&entry->prefix6); + if (error) + return error; + + if (entry->groups6_size_len > 128) { + log_err("Second IPv6 prefix length %u is too high.", entry->groups6_size_len); + return -EINVAL; + } + + if (entry->prefix6.len > entry->groups6_size_len) { + log_err("Second Prefix (/%u) of IPv6 Prefix can't be lower than first prefix (/%u)", + entry->groups6_size_len, entry->prefix6.len ); + return -EINVAL; + } + + if (entry->ports_division_len > 32) { + log_err("Second IPv4 prefix length %u is too high.", entry->ports_division_len); + return -EINVAL; + } + + port_count = port_range_count(&entry->ports); + if (port_count > (1U << 15)) { + log_err("Port range size must be less or equals than %u ports", + (1U << 15)); + return -EINVAL; + } + + if (((__u64)(1 << (entry->groups6_size_len - entry->prefix6.len))) + >= (prefix4_get_addr_count(&entry->prefix4) * port_count)) { + log_err("There are not enough ports for each ipv6 group."); + return -EINVAL; + } + + return 0; +} + +static bool pool4_range_intersects_customer(struct pool4_range *range, + const struct customer_table *customer) +{ + return prefix4_contains(&customer->prefix4, &range->addr) + && port_range_intersects(&customer->ports, &range->ports); +} + +static bool pooltable_and_customer_intersects(struct rb_root *tree, + const struct customer_table *customer) +{ + struct rb_node *node = rb_first(tree); + struct pool4_table *table; + struct pool4_range *entry; + + if (!node) { + return false; + } + + while (node) { + table = rb_entry(node, struct pool4_table, tree_hook); + + foreach_table_range(entry, table) { + + if (pool4_range_intersects_customer(entry, customer)) { + log_err("Pool4 '%pI4 %u-%u' intersects with Customer: '%pI4/%u %u-%u'", + &entry->addr, entry->ports.min, entry->ports.max, + &customer->prefix4.address, customer->prefix4.len, + customer->ports.min, customer->ports.max); + return true; + } + + } + + node = rb_next(node); + } + + return false; +} + +static int pool_and_customer_intersects(struct pool4 *pool, + struct customer_table *table) +{ + if (pooltable_and_customer_intersects(&pool->tree_addr.icmp, table)) + return -EEXIST; // error msg already printed. + + if (pooltable_and_customer_intersects(&pool->tree_addr.tcp, table)) + return -EEXIST; // error msg already printed. + + if (pooltable_and_customer_intersects(&pool->tree_addr.udp, table)) + return -EEXIST; // error msg already printed. + + return 0; +} + +int customerdb_add(struct pool4 *pool, const struct customer_entry_usr *entry) +{ + struct customer_table *table; + int error = 0; + + spin_lock_bh(&pool->lock); + if (pool->customer) + error = -EEXIST; + spin_unlock_bh(&pool->lock); + + if (error) { + log_err("A customer table already exists, remove it before inserting a new one."); + return error; + } + + validate_customer_entry_usr(entry); + + table = __wkmalloc("customer_table", sizeof(struct customer_table) + , GFP_ATOMIC); + table->prefix6 = entry->prefix6; + table->groups6_size_len = entry->groups6_size_len; + table->prefix4 = entry->prefix4; + table->ports_division_len = entry->ports_division_len; + if (table->ports.min > table->ports.max) + swap(table->ports.min, table->ports.max); + if (table->ports.min == 0) + table->ports.min = 1U; + + table->ports = entry->ports; + table->ports_size_len = customer_table_get_ports_mask(table); + + if (((unsigned int)(1 << table->ports_size_len)) + != port_range_count(&table->ports)) { + log_err("Ports range size must be a result of two to the nth."); + customer_table_put(table); + return -EINVAL; + } + + if (customer_table_get_group_size(table) > + (customer_table_get_total_ports_size(table) + >> (32U - table->ports_division_len))) { + log_err("Invalid second IPv4 Prefix /%u, contiguous port range size is too big", + table->ports_division_len); + return -EINVAL; + } + + spin_lock_bh(&pool->lock); + error = pool_and_customer_intersects(pool, table); + if (!error) + pool->customer = table; + + spin_unlock_bh(&pool->lock); + return error; +} +void customerdb_flush(struct pool4 *pool, struct ipv4_range *range_removed, + int *error) +{ + *error = customerdb_rm(pool, range_removed); +} + +int customerdb_rm(struct pool4 *pool, struct ipv4_range *range_removed) +{ + struct customer_table *table; + int error = 0; + + spin_lock_bh(&pool->lock); + if (!pool->customer) + error = -ENOENT; + + if (error) { + spin_unlock_bh(&pool->lock); + log_err("There is no customer table to remove."); + return error; + } + + table = pool->customer; + pool->customer = NULL; + spin_unlock_bh(&pool->lock); + + range_removed->prefix = table->prefix4; + range_removed->ports = table->ports; + + customer_table_put(table); + + return error; } diff --git a/usr/common/argp/options.c b/usr/common/argp/options.c index af611d11b..8728b128f 100644 --- a/usr/common/argp/options.c +++ b/usr/common/argp/options.c @@ -27,6 +27,15 @@ static const struct argp_option pool4_opt = { .group = 0, }; +static const struct argp_option customer_opt = { + .name = OPTNAME_CUSTOMER, + .key = ARGP_CUSTOMER, + .arg = NULL, + .flags = 0, + .doc = "The command will operate on the customer table.", + .group = 0, +}; + static const struct argp_option bib_opt = { .name = OPTNAME_BIB, .key = ARGP_BIB, @@ -637,6 +646,7 @@ static const struct argp_option *opts_nat64[] = { &targets_hdr_opt, &pool6_opt, &pool4_opt, + &customer_opt, &bib_opt, &session_opt, &joold_opt, diff --git a/usr/common/jool.c b/usr/common/jool.c index b19f71690..0e32b247a 100644 --- a/usr/common/jool.c +++ b/usr/common/jool.c @@ -24,6 +24,7 @@ #include "nat64/usr/pool.h" #include "nat64/usr/pool6.h" #include "nat64/usr/pool4.h" +#include "nat64/usr/customer.h" #include "nat64/usr/bib.h" #include "nat64/usr/session.h" #include "nat64/usr/eam.h" @@ -54,6 +55,13 @@ struct arguments { struct ipv4_prefix prefix4; bool prefix4_set; + struct { + __u8 groups6_size_len; + __u8 ports_division_len; + struct port_range ports; + bool ports_set; + } customer; + struct { __u32 mark; __u32 max_iterations; @@ -248,9 +256,31 @@ static int set_global_u16_array(struct arguments *args, __u16 type, char *value) return error; } +static int set_ipv4_customer_prefix(struct arguments *args, char *str) +{ + int error; + error = update_state(args, MODE_CUSTOMER, OP_ADD); + if (error) + return error; + + if (args->db.prefix4_set) { + log_err("Only one IPv4 prefix can be added at a time."); + return -EINVAL; + } + + args->db.prefix4_set = true; + return str_to_customer_prefix4(str, &args->db.prefix4, + &args->db.customer.ports_division_len); +} + static int set_ipv4_prefix(struct arguments *args, char *str) { int error; + char *slash_pos; + slash_pos = strchr(str, '/'); + if (slash_pos && strchr(slash_pos + 1, '/')) { + return set_ipv4_customer_prefix(args, str); + } error = update_state(args, MODE_POOL4 | MODE_BLACKLIST | MODE_RFC6791 | MODE_EAMT, OP_ADD | OP_REMOVE); @@ -266,9 +296,31 @@ static int set_ipv4_prefix(struct arguments *args, char *str) return str_to_prefix4(str, &args->db.prefix4); } +static int set_ipv6_customer_prefix(struct arguments *args, char *str) +{ + int error; + error = update_state(args, MODE_CUSTOMER , OP_ADD); + if (error) + return error; + + if (args->db.prefix6_set) { + log_err("Only one IPv6 prefix can be added at a time."); + return -EINVAL; + } + + args->db.prefix6_set = true; + return str_to_customer_prefix6(str, &args->db.prefix6, + &args->db.customer.groups6_size_len); +} + static int set_ipv6_prefix(struct arguments *args, char *str) { int error; + char *slash_pos; + slash_pos = strchr(str, '/'); + if (slash_pos && strchr(slash_pos + 1, '/')) { + return set_ipv6_customer_prefix(args, str); + } error = update_state(args, MODE_POOL6 | MODE_EAMT, OP_ADD | OP_UPDATE | OP_REMOVE); @@ -339,11 +391,18 @@ static int set_port_range(struct arguments *args, char *str) return -EINVAL; } - error = update_state(args, MODE_POOL4, OP_ADD | OP_REMOVE); + error = update_state(args, MODE_POOL4 | MODE_CUSTOMER, OP_ADD | OP_REMOVE); + if (error) + return error; + + error = str_to_port_range(str, &args->db.pool4.ports); if (error) return error; - return str_to_port_range(str, &args->db.pool4.ports); + args->db.customer.ports = args->db.pool4.ports; + args->db.customer.ports_set = true; + + return error; } static int set_ip_args(struct arguments *args, char *str) @@ -395,6 +454,9 @@ static int parse_opt(int key, char *str, struct argp_state *state) case ARGP_POOL4: error = update_state(args, MODE_POOL4, POOL4_OPS); break; + case ARGP_CUSTOMER: + error = update_state(args, MODE_CUSTOMER, CUSTOMER_OPS); + break; case ARGP_BLACKLIST: error = update_state(args, MODE_BLACKLIST, BLACKLIST_OPS); break; @@ -814,6 +876,54 @@ static int handle_pool4(struct arguments *args) } } +static int __customer_add(struct arguments *args) +{ + struct customer_entry_usr entry; + + if (!args->db.prefix6_set) { + log_err("The address/prefix6 argument is mandatory."); + return -EINVAL; + } + + if (!args->db.prefix4_set) { + log_err("The address/prefix4 argument is mandatory."); + return -EINVAL; + } + + if (!args->db.customer.ports_set) { + log_err("Port range argument is mandatory."); + } + + entry.prefix6 = args->db.prefix6; + entry.prefix4 = args->db.prefix4; + entry.groups6_size_len = args->db.customer.groups6_size_len; + entry.ports_division_len = args->db.customer.ports_division_len; + entry.ports = args->db.customer.ports; + + return customer_add(&entry); +} + +static int handle_customer(struct arguments *args) +{ + if (xlat_is_siit()) { + log_err("SIIT doesn't have customer pool."); + return -EINVAL; + } + + switch (args->op) { + case OP_DISPLAY: + return customer_display(args->flags); + case OP_ADD: + return __customer_add(args); + case OP_REMOVE: + return customer_rm(args->db.quick); + case OP_FLUSH: + return customer_flush(args->db.quick); + default: + return unknown_op("Customer", args->op); + } +} + static int handle_bib(struct arguments *args) { struct ipv6_transport_addr *addr6; @@ -1020,6 +1130,8 @@ static int main_wrapped(struct arguments *args) return handle_joold(args); case MODE_INSTANCE: return handle_instance(args); + case MODE_CUSTOMER: + return handle_customer(args); } log_err("Unknown configuration mode: %u", args->mode); diff --git a/usr/common/str_utils.c b/usr/common/str_utils.c index 0a81f59c4..a34495f90 100644 --- a/usr/common/str_utils.c +++ b/usr/common/str_utils.c @@ -66,6 +66,8 @@ char *configmode_to_string(enum config_mode mode) return OPTNAME_POOL6; case MODE_POOL4: return OPTNAME_POOL4; + case MODE_CUSTOMER: + return OPTNAME_CUSTOMER; case MODE_BLACKLIST: return OPTNAME_BLACKLIST; case MODE_RFC6791: @@ -480,6 +482,94 @@ int str_to_prefix6(const char *str, struct ipv6_prefix *prefix_out) return str_to_u8(token, &prefix_out->len, 0, 128); /* Error msg already printed. */ } +#undef STR_MAX_LEN +#define STR_MAX_LEN (INET_ADDRSTRLEN + 1 + 2 + 1 + 2) /* [addr + null chara] + / + mask */ +int str_to_customer_prefix4(const char *str, struct ipv4_prefix *prefix_out, __u8 *port_len) +{ + const char *FORMAT = "// (eg. 192.0.2.0/24/30)"; + /* strtok corrupts the string, so we'll be using this copy instead. */ + char str_copy[STR_MAX_LEN]; + char *token; + int error; + + if (strlen(str) + 1 > STR_MAX_LEN) { + log_err("'%s' is too long for this poor, limited parser...", str); + return -EINVAL; + } + strcpy(str_copy, str); + + token = strtok(str_copy, "/"); + if (!token) { + log_err("Cannot parse '%s' as a %s.", str, FORMAT); + return -EINVAL; + } + + error = str_to_addr4(token, &prefix_out->address); + if (error) + return error; + + token = strtok(NULL, "/"); + if (!token) { + log_err("Cannot parse '%s' as a %s.", str, FORMAT); + return -EINVAL; + } + error = str_to_u8(token, &prefix_out->len, 0, 32); /* Error msg already printed. */ + if (error) + return error; + + token = strtok(NULL, "/"); + if (!token) { + log_err("Cannot parse '%s' as a %s.", str, FORMAT); + return -EINVAL; + } + return str_to_u8(token, port_len, 0, 32); /* Error msg already printed. */ +} + +#undef STR_MAX_LEN +#define STR_MAX_LEN (INET6_ADDRSTRLEN + 1 + 3 + 1 + 3) /* [addr + null chara] + / + pref len + / + group size len*/ +int str_to_customer_prefix6(const char *str, struct ipv6_prefix *prefix_out, __u8 *group_size_len) +{ + const char *FORMAT = "// (eg. 64:ff9b::/96/97)"; + /* strtok corrupts the string, so we'll be using this copy instead. */ + char str_copy[STR_MAX_LEN]; + char *token; + int error; + + if (strlen(str) + 1 > STR_MAX_LEN) { + log_err("'%s' is too long for this poor, limited parser...", str); + return -EINVAL; + } + strcpy(str_copy, str); + + token = strtok(str_copy, "/"); + if (!token) { + log_err("Cannot parse '%s' as a %s.", str, FORMAT); + return -EINVAL; + } + + error = str_to_addr6(token, &prefix_out->address); + if (error) + return error; + + token = strtok(NULL, "/"); + if (!token) { + log_err("Cannot parse '%s' as a %s.", str, FORMAT); + return -EINVAL; + } + + error = str_to_u8(token, &prefix_out->len, 0, 128); /* Error msg already printed. */ + if (error) + return error; + + token = strtok(NULL, "/"); + if (!token) { + log_err("Cannot parse '%s' as a %s.", str, FORMAT); + return -EINVAL; + } + + return str_to_u8(token, group_size_len, 0, 128); /* Error msg already printed. */ +} + static void print_num_csv(__u64 num, char *separator) { if (num < 10) diff --git a/usr/common/target/customer.c b/usr/common/target/customer.c new file mode 100644 index 000000000..c8131fc90 --- /dev/null +++ b/usr/common/target/customer.c @@ -0,0 +1,115 @@ +#include "nat64/usr/customer.h" + +#include +#include "nat64/common/config.h" +#include "nat64/common/str_utils.h" +#include "nat64/common/types.h" +#include "nat64/usr/netlink.h" + + +#define HDR_LEN sizeof(struct request_hdr) +#define PAYLOAD_LEN sizeof(union request_customer) + + +struct display_args { + display_flags flags; + unsigned int row_count; + union request_customer *request; +}; + +static void print_customer_entry(struct customer_entry_usr *entry, char *separator) +{ + char ipv6_str[INET6_ADDRSTRLEN]; + char *ipv4_str; + + inet_ntop(AF_INET6, &entry->prefix6.address, ipv6_str, sizeof(ipv6_str)); + ipv4_str = inet_ntoa(entry->prefix4.address); + printf("%s/%u/%u", ipv6_str, entry->prefix6.len, entry->groups6_size_len); + printf("%s", separator); + printf("%s/%u/%u", ipv4_str, entry->prefix4.len, entry->ports_division_len); + printf("%s", separator); + printf("%u", entry->ports.min); + printf("%s", separator); + printf("%u", entry->ports.max); + printf("\n"); +} + +static int customer_display_response(struct jool_response *response, void *arg) +{ + struct customer_entry_usr *entry = response->payload; + struct display_args *args = arg; + + if (response->payload_len <= 0) { + log_info(" (empty)"); + return 0; + } + + if (args->flags & DF_CSV_FORMAT) { + print_customer_entry(entry, ","); + } else { + print_customer_entry(entry, " - "); + } + + return 0; +} + +int customer_display(display_flags flags) +{ + unsigned char request[HDR_LEN + PAYLOAD_LEN]; + struct request_hdr *hdr = (struct request_hdr *)request; + union request_customer *payload = (union request_customer *)(request + HDR_LEN); + struct display_args args; + int error; + + init_request_hdr(hdr, MODE_CUSTOMER, OP_DISPLAY); + args.flags = flags; + args.row_count = 0; + args.request = payload; + + if ((flags & DF_SHOW_HEADERS) && (flags & DF_CSV_FORMAT)) + printf("IPv6 Prefix,IPv4 Prefix,Ports\n"); + + error = netlink_request(request, sizeof(request), + customer_display_response, &args); + if (error) + return error; + + + return 0; +} + +int customer_add(struct customer_entry_usr *entry) +{ + unsigned char request[HDR_LEN + PAYLOAD_LEN]; + struct request_hdr *hdr = (struct request_hdr *)request; + union request_customer *payload = (union request_customer *)(request + HDR_LEN); + + init_request_hdr(hdr, MODE_CUSTOMER, OP_ADD); + payload->add = *entry; + + return netlink_request(request, sizeof(request), NULL, NULL); +} + +int customer_rm(bool quick) +{ + unsigned char request[HDR_LEN + PAYLOAD_LEN]; + struct request_hdr *hdr = (struct request_hdr *)request; + union request_customer *payload = (union request_customer *)(request + HDR_LEN); + + init_request_hdr(hdr, MODE_CUSTOMER, OP_REMOVE); + payload->rm.quick = quick; + + return netlink_request(request, sizeof(request), NULL, NULL); +} + +int customer_flush(bool quick) +{ + unsigned char request[HDR_LEN + PAYLOAD_LEN]; + struct request_hdr *hdr = (struct request_hdr *)request; + union request_customer *payload = (union request_customer *)(request + HDR_LEN); + + init_request_hdr(hdr, MODE_CUSTOMER, OP_FLUSH); + payload->flush.quick = quick; + + return netlink_request(&request, sizeof(request), NULL, NULL); +} diff --git a/usr/stateful/Makefile.am b/usr/stateful/Makefile.am index 9f3ce9c4d..d751c3114 100644 --- a/usr/stateful/Makefile.am +++ b/usr/stateful/Makefile.am @@ -23,7 +23,8 @@ jool_SOURCES = \ ../common/target/pool.c \ ../common/target/pool4.c \ ../common/target/pool6.c \ - ../common/target/session.c + ../common/target/session.c \ + ../common/target/customer.c jool_LDADD = ${LIBNLGENL3_LIBS} jool_CFLAGS = -Wall -O2