美文网首页网络安全实验室程序员
使用 memcached DRDoS 攻击 Github

使用 memcached DRDoS 攻击 Github

作者: 钱子晨 | 来源:发表于2018-03-04 11:41 被阅读177次
    image.png

    听说3月1日 GitHub 被DDoS攻击了,好像挺严重的。

    来看看怎么使用mc攻击~

    mc 首先通过cmd line指定UDP端口,然后初始化libevent实例,初始化线程,

    int main (int argc, char **argv) {
    ...
      settings_init();
    ...
      while (-1 != (c = getopt(argc, argv,
        ...
        "U:"  /* UDP port number to listen on */
        ...
        case 'U':
                settings.udpport = atoi(optarg);
                udp_specified = true;
                break;
        ...
      ))) {
    ...
      if (tcp_specified && !udp_specified) {
          settings.udpport = settings.port;
       } else if (udp_specified && !tcp_specified) {
          settings.port = settings.udpport;
       }
    ...
      main_base = event_init();
    ...
      thread_init(settings.num_threads, main_base);
    ...
    /* create unix mode sockets after dropping privileges */
        if (settings.socketpath != NULL) {
            errno = 0;
            if (server_socket_unix(settings.socketpath,settings.access)) {
                vperror("failed to listen on UNIX socket: %s", settings.socketpath);
                exit(EX_OSERR);
            }
        }
    
        /* create the listening socket, bind it, and init */
        if (settings.socketpath == NULL) {
            ...
            // TCP
            errno = 0;
            if (settings.port && server_sockets(settings.port, tcp_transport,
                                               portnumber_file)) {
                vperror("failed to listen on TCP port %d", settings.port);
                exit(EX_OSERR);
            }
    
            /*
             * initialization order: first create the listening sockets
             * (may need root on low ports), then drop root if needed,
             * then daemonise if needed, then init libevent (in some cases
             * descriptors created by libevent wouldn't survive forking).
             */
    
            /* create the UDP listening socket and bind it */
            errno = 0;
            if (settings.udpport && server_sockets(settings.udpport, udp_transport,
                                                  portnumber_file)) {
                vperror("failed to listen on UDP port %d", settings.udpport);
                exit(EX_OSERR);
            }
            ...
            /* enter the event loop */
        if (event_base_loop(main_base, 0) != 0) {
            retval = EXIT_FAILURE;
        }
    ...
        }
    

    在此之前初始化了一些设置,可以看到默认端口是11211,有4个worker线程。

    static void settings_init(void) {
    ...
        settings.port = 11211;
        settings.udpport = 11211;
        /* By default this string should be NULL for getaddrinfo() */
        settings.inter = NULL;
        settings.maxbytes = 64 * 1024 * 1024; /* default is 64MB */
        ...
        settings.chunk_size = 48;         /* space for a modest key and value */
        settings.num_threads = 4;         /* N workers */
        ...
    }
    

    可以检查一下:

    $ echo "stats settings" | nc localhost 11211
    STAT maxbytes 67108864
    STAT maxconns 1024
    STAT tcpport 11211
    STAT udpport 11211
    STAT inter NULL
    ...
    STAT chunk_size 48
    STAT num_threads 4
    ...
    END
    

    随后线程初始化,main_base 是分发任务的主线程,创建管道用于libevent通知。主要调用了setup_thread初始化线程信息数据结构,最后创建并初始化线程,代码段都是 worker_libevent。

    void thread_init(int nthreads, struct event_base *main_base) {
    ...
        threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
        if (! threads) {
            perror("Can't allocate thread descriptors");
            exit(1);
        }
    
        dispatcher_thread.base = main_base;
        dispatcher_thread.thread_id = pthread_self();
    
        for (i = 0; i < nthreads; i++) {
            int fds[2];
            if (pipe(fds)) {
                perror("Can't create notify pipe");
                exit(1);
            }
    
            threads[i].notify_receive_fd = fds[0];
            threads[i].notify_send_fd = fds[1];
    
            setup_thread(&threads[i]);
            /* Reserve three fds for the libevent base, and two for the pipe */
            stats.reserved_fds += 5;
        }
    
        /* Create threads after we've done all the libevent setup. */
        for (i = 0; i < nthreads; i++) {
            create_worker(worker_libevent, &threads[i]);
        }
    ...
    }
    

    这里看到了 thread_libevent_process 指针,在设置线程初始化数据时,设置为me->notify_receive_fd 管道的libevent读事件。

    static void setup_thread(LIBEVENT_THREAD *me) {
        me->base = event_init();
        if (! me->base) {
            fprintf(stderr, "Can't allocate event base\n");
            exit(1);
        }
    
        /* Listen for notifications from other threads */
        event_set(&me->notify_event, me->notify_receive_fd,
                  EV_READ | EV_PERSIST, thread_libevent_process, me);
        event_base_set(me->base, &me->notify_event);
    
        if (event_add(&me->notify_event, 0) == -1) {
            fprintf(stderr, "Can't monitor libevent notify pipe\n");
            exit(1);
        }
    
        me->new_conn_queue = malloc(sizeof(struct conn_queue));
        if (me->new_conn_queue == NULL) {
            perror("Failed to allocate memory for connection queue");
            exit(EXIT_FAILURE);
        }
        cq_init(me->new_conn_queue);
    ...
    }
    

    当管道可读时回调此函数。从队列中取出一个任务,随后调conn_new。

    static void thread_libevent_process(int fd, short which, void *arg) {
        LIBEVENT_THREAD *me = arg;
        CQ_ITEM *item;
        char buf[1];
    
        if (read(fd, buf, 1) != 1)
            if (settings.verbose > 0)
                fprintf(stderr, "Can't read from libevent pipe\n");
    
        switch (buf[0]) {
        case 'c':
        item = cq_pop(me->new_conn_queue);
    
        if (NULL != item) {
            conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
                               item->read_buffer_size, item->transport, me->base);
    ...
        }
    }
    

    conn_new为新的请求建立一个连接结构体。这里只填充conn结构体。主要在 libevent 中注册函数指针event_handler。

    conn *conn_new(const int sfd, enum conn_states init_state,
                    const int event_flags,
                    const int read_buffer_size, enum network_transport transport,
                    struct event_base *base) {
        {
            /* data */
        };
        conn *c = conn_from_freelist();
    
        if (NULL == c) {
            if (!(c = (conn *)calloc(1, sizeof(conn)))) {
                fprintf(stderr, "calloc()\n");
                return NULL;
            }
    
            MEMCACHED_CONN_CREATE(c);
    
            c->rbuf = c->wbuf = 0;
            c->rbuf = (char *)malloc((size_t)c->rsize);
            c->wbuf = (char *)malloc((size_t)c->wsize);
    ...
            c->msglist = (struct msghdr *)malloc(sizeof(struct msghdr) * c->msgsize);
    ...
        }// if
    ....
        c->sfd = sfd;
    ...
        c->item = 0;
    ...
        event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
    
        event_base_set(base, &c->event);
    
        c->ev_flags = event_flags;
    
        if (event_add(&c->event, 0) == -1) {
    ...
        }
    ...
        return c;
    }
    

    当有新的连接的时候将会回调此函数。

    void event_handler(const int fd, const short which, void *arg) {
        conn *c;
    
        c = (conn *)arg;
        assert(c != NULL);
    
        c->which = which;
    
        /* sanity */
    ...
        drive_machine(c);
        return;
    }
    

    client connect 后,memcached server主线程被唤醒,然后调用event_handler()->drive_machine(),进入这个状态机。从别处代码看,只有tcp或UNIX域套接字才会进行conn_listening,即accept过程。conn_waiting等待新的命令请求,conn_read 为读取数据,读完请求后转换 conn 的状态,然后就是解析执行命令咯。在conn_mwrite状态下回复数据;在transmit中最终调用sendmsg写给套接字。

    static void drive_machine(conn *c) {
        bool stop = false;
        int sfd, flags = 1;
        socklen_t addrlen;
        struct sockaddr_storage addr;
        int nreqs = settings.reqs_per_event;
        int res;
        const char *str;
    
        assert(c != NULL);
    
        while (!stop) {
            switch(c->state) {
            case conn_listening:
                addrlen = sizeof(addr);
    
                if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) {
    ...
                }
    ...
            case conn_waiting:
                if (!update_event(c, EV_READ | EV_PERSIST)) {
                    if (settings.verbose > 0)
                        fprintf(stderr, "Couldn't update event\n");
                    conn_set_state(c, conn_closing);
                    break;
                }
    
                conn_set_state(c, conn_read);
                stop = true;
                break;
    
            case conn_read:
                res = IS_UDP(c->transport) ? try_read_udp(c) : try_read_network(c);
                switch (res) {
                case READ_NO_DATA_RECEIVED:
                    conn_set_state(c, conn_waiting);
                    break;
                    ...
                }
                break;
    
            case conn_parse_cmd :
                if (try_read_command(c) == 0) {
                    /* we need more data! */
                    conn_set_state(c, conn_waiting);
                }
    
                break;
            ...
            case conn_nread:
                if (c->rlbytes == 0) {
                    complete_nread(c);
                    break;
                }
    
                /* first check if we have leftovers in the conn_read buffer */
                if (c->rbytes > 0) {
                    int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
                    if (c->ritem != c->rcurr) {
                        memmove(c->ritem, c->rcurr, tocopy);
                    }
                    ...
                }
    
                /*  now try reading from the socket */
                res = read(c->sfd, c->ritem, c->rlbytes);
    ...
            case conn_write:
                ...
                /* fall through... */
    
            case conn_mwrite:
    
              if (IS_UDP(c->transport) && c->msgcurr == 0 && build_udp_headers(c) != 0) {
                if (settings.verbose > 0)
                  fprintf(stderr, "Failed to build UDP headers\n");
                conn_set_state(c, conn_closing);
                break;
              }
                switch (transmit(c)) {
                case TRANSMIT_COMPLETE:
                    if (c->state == conn_mwrite) {
                        ...
                        /* XXX:  I don't know why this wasn't the general case */
                        if(c->protocol == binary_prot) {
                            conn_set_state(c, c->write_and_go);
                        } else {
                            ...
                        }
    
                case TRANSMIT_INCOMPLETE:
                case TRANSMIT_HARD_ERROR:
                    break;                   /* Continue in state machine. */
    
                case TRANSMIT_SOFT_ERROR:
                    stop = true;
                    break;
                }
                break;
            ...
            case conn_closing:
                if (IS_UDP(c->transport))
                    conn_cleanup(c);
                else
                    conn_close(c);
                stop = true;
                break;
              ...
        }
        return;
    }
    

    上文用到的读取UDP,直接调recvfrom,此处从客户端接受数据,将读取到的指令放到rbuf中。

    static enum try_read_result try_read_udp(conn *c) {
        int res;
    
        assert(c != NULL);
    
        c->request_addr_size = sizeof(c->request_addr);
    
        res = recvfrom(c->sfd, c->rbuf, c->rsize,
                       0, &c->request_addr, &c->request_addr_size);
    ...
            memmove(c->rbuf, c->rbuf + 8, res);
    
            c->rbytes = res;
            c->rcurr = c->rbuf;
            return READ_DATA_RECEIVED;
        }
        return READ_NO_DATA_RECEIVED;
    }
    

    主函数中配置的模式,允许客户端以几种方式向mc server发请求 UDP只要绑定之后,直接读取 sfd 就OK,在这里看出它 conn 初始状态应为 conn_read,而 TCP 对应的 conn 初始状态应该为 conn_listening。

    static int server_sockets(int port, enum network_transport transport,
                              FILE *portnumber_file) {
        if (settings.inter == NULL) {
            return server_socket(settings.inter, port, transport, portnumber_file);
        } else {
            // tokenize them and bind to each one of them..
            char *b;
            int ret = 0;
    
            char *list = strdup(settings.inter);
    
            if (list == NULL) {
                fprintf(stderr, "Failed to allocate memory for parsing server interface string\n");
                return 1;
            }
    
            for (char *p = strtok_r(list, ";,", &b);
                ...
                ret |= server_socket(p, the_port, transport, portnumber_file);
            }
            free(list);
            return ret;
        }
    }
    

    针对每个interface绑定。

    static int server_socket(const char *interface,
                             int port,
                             enum network_transport transport,
                             FILE *portnumber_file) {
    ...
        hints.ai_socktype = IS_UDP(transport) ? SOCK_DGRAM : SOCK_STREAM;
    
        if (port == -1) {
            port = 0;
        }
        snprintf(port_buf, sizeof(port_buf), "%d", port);
    
        error= getaddrinfo(interface, port_buf, &hints, &ai);
        ...
        for (next= ai; next; next= next->ai_next) {
            conn *listen_conn_add;
    
            if ((sfd = new_socket(next)) == -1) {
                ...
                continue;
            }
    
    #ifdef IPV6_V6ONLY
          ...
    #endif
    
            setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags));
    
            if (IS_UDP(transport)) {
                maximize_sndbuf(sfd);
            } else {
                ...
            }
    
            if (bind(sfd, next->ai_addr, next->ai_addrlen) == -1) {
                ...
            } else {
                success++;
                if (!IS_UDP(transport) && listen(sfd, settings.backlog) == -1) {
                  ...
                }
    
            if (IS_UDP(transport)) {
                // UDP
                int c;
    
                for (c = 0; c < settings.num_threads_per_udp; c++) {
                    /* this is guaranteed to hit all threads because we round-robin */
                    dispatch_conn_new(sfd, conn_read, EV_READ | EV_PERSIST,
                                      UDP_READ_BUFFER_SIZE, transport);
                }
            } else {
                if (!(listen_conn_add = conn_new(sfd, conn_listening,
                                                 EV_READ | EV_PERSIST, 1,
                                                 transport, main_base))) {
                   ...
                }
    
                listen_conn_add->next = listen_conn;
                listen_conn = listen_conn_add;
            }
        }
    
        freeaddrinfo(ai);
    
        /* Return zero iff we detected no errors in starting up connections */
        return success == 0;
    }
    

    设置了socket的发送缓冲大小为,取默认值,然后和设置的最大值二分查找,取最后的最大值。

    /*
     * Sets a socket's send buffer size to the maximum allowed by the system.
     */
    // defined somewhere else
    #define MAX_SENDBUF_SIZE (256 * 1024 * 1024)
    
    static void maximize_sndbuf(const int sfd) {
        ...
        if (getsockopt(sfd, SOL_SOCKET, SO_SNDBUF, &old_size, &intsize) != 0) {
          ...
        }
    
        min = old_size;
        max = MAX_SENDBUF_SIZE;
    
        while (min <= max) {
            avg = ((unsigned int)(min + max)) / 2;
            if (setsockopt(sfd, SOL_SOCKET, SO_SNDBUF, (void *)&avg, intsize) == 0) {
                last_good = avg;
                min = avg + 1;
            } else {
                max = avg - 1;
            }
        }
        ...
    }
    

    然后分发新的连接到线程池中的一个线程中,就是在一个线程的wq中加入一个任务,并通过管道给相应的线程发信,向一个休眠线程写字符,已注册事件会被触发,随后调thread_libevent_process(上文setup_thread 中线程pd被设置到 event 中)

    void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags,
                           int read_buffer_size, enum network_transport transport) {
        // CQ_ITEM connection queue item
        CQ_ITEM *item = cqi_new();
        char buf[1];
    
        int tid = (last_thread + 1) % settings.num_threads;
    
        LIBEVENT_THREAD *thread = threads + tid;
        ...
    
        cq_push(thread->new_conn_queue, item);
    
        MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id);
    
        buf[0] = 'c';
        if (write(thread->notify_send_fd, buf, 1) != 1) {
            ...
        }
    
    }
    

    这里看到mc可通过UDP模式将放大的数据返回给client,所以可以利用这个特性执行攻击,利用网络上的mc放大攻击效果。

    看一下协议:RFC768

                             User Datagram Protocol
                             ----------------------
    ...
    protocol  is transaction oriented, and delivery and duplicate protection
    are not guaranteed.  Applications requiring ordered reliable delivery of
    streams of data should use the Transmission Control Protocol (TCP) [2].
    Format
    ------
    
    
                      0      7 8     15 16    23 24    31
                     +--------+--------+--------+--------+
                     |     Source      |   Destination   |
                     |      Port       |      Port       |
                     +--------+--------+--------+--------+
                     |                 |                 |
                     |     Length      |    Checksum     |
                     +--------+--------+--------+--------+
                     |
                     |          data octets ...
                     +---------------- ...
    
                          User Datagram Header Format
    
    Fields
    ------
    

    Length字段占2字节。所以UDP协议单次最大发送数据为2 ^ 16 = 65535 = 64KB。UDP协议不基于连接,可直接发送数据报到目标机器。因为UDP协议无连接,直接发数据到target,不需三次握手。target也不好验证客户源IP。

    我们先批量set 多一点大value到远程开放 memcached server上,过期也设置长一点,然后利用UDP伪造源地址在memcached server get 存储的value,请求时间段尽量集中,这样就将数据通过mc server Reflect 到target,实现DRDoS过程。

    2月底,dormando Release了 1.5.6,该版本默认关闭了UDP启动:
    https://groups.google.com/forum/#!topic/memcached/pu6LAIbL_Ks

    若想预防,可以升级新版,也可以网络层做限制。也可以启动 memcached 加入 -U 0启动参数,表达式短路后就不会server_sockets,禁止监听udp协议。


    Linkerist
    2018年3月4日于北京街角的咖啡店

    相关文章

      网友评论

        本文标题:使用 memcached DRDoS 攻击 Github

        本文链接:https://www.haomeiwen.com/subject/hhszxftx.html