美文网首页ptp_in_zephyr_project
zephyr gptp net_pkt RX procedure

zephyr gptp net_pkt RX procedure

作者: SnC_ | 来源:发表于2021-02-05 17:32 被阅读0次

底层收包流程中相关的数据结构

以目前NXP在zephyr中提供的driver为基础,进行gptp应用收包流程的分析。

以下是调用栈中最开始用到的数据结构,应该用于描述device。
eth_mcux_rx_isr中,进行收包触发的中断处理。
将device->data赋给eth_context结构的变量,然后调用eth_context中的enet_handle进行进一步的操作。 (see eth_mcux.c:1166)

/**
 * zephyr/include/device.h:292
 * @brief Runtime device structure (in memory) per driver instance
 */
struct device {
    /** Name of the device instance */
    const char *name;
    /** Address of device instance config information */
    const void *config;
    /** Address of the API structure exposed by the device instance */
    const void *api;
    /** Address of the device instance private data */
    void * const data;
};

以下数据结构的data来源于device->data。
根据其中的enet_handle->callback回调函数,处理enet_handle->userData中的数据。 (see .../kinetis/fsl_enet.c:3067)

// eth_mcux.c:112
struct eth_context {
    ENET_Type *base;
    void (*config_func)(void);
    struct net_if *iface;
    enet_handle_t enet_handle;
#if defined(CONFIG_PTP_CLOCK_MCUX)
    const struct device *ptp_clock;
    enet_ptp_config_t ptp_config;
    float clk_ratio;
#endif
    struct k_sem tx_buf_sem;
    enum eth_mcux_phy_state phy_state;
    bool enabled;
    bool link_up;
    uint32_t phy_addr;
    phy_duplex_t phy_duplex;
    phy_speed_t phy_speed;
    uint8_t mac_addr[6];
    void (*generate_mac)(uint8_t *);
    struct k_work phy_work;
    struct k_delayed_work delayed_phy_work;
    uint8_t frame_buf[NET_ETH_MAX_FRAME_SIZE]; /* Max MTU + ethernet header */
};

// kinetis/fsl_enet.h:649
/*! @brief Defines the ENET handler structure. */
struct _enet_handle
{
    enet_rx_bd_ring_t rxBdRing[FSL_FEATURE_ENET_QUEUE];       /*!< Receive buffer descriptor. */
    enet_tx_bd_ring_t txBdRing[FSL_FEATURE_ENET_QUEUE];       /*!< Transmit buffer descriptor. */
    uint16_t rxBuffSizeAlign[FSL_FEATURE_ENET_QUEUE];         /*!< Receive buffer size alignment. */
    uint16_t txBuffSizeAlign[FSL_FEATURE_ENET_QUEUE];         /*!< Transmit buffer size alignment. */
    bool rxMaintainEnable[FSL_FEATURE_ENET_QUEUE];            /*!< Receive buffer cache maintain. */
    bool txMaintainEnable[FSL_FEATURE_ENET_QUEUE];            /*!< Transmit buffer cache maintain. */
    uint8_t ringNum;                                          /*!< Number of used rings. */
    enet_callback_t callback;                                 /*!< Callback function. */
    void *userData;                                           /*!< Callback function parameter.*/
    enet_tx_dirty_ring_t txDirtyRing[FSL_FEATURE_ENET_QUEUE]; /*!< Ring to store tx frame information.*/
    bool TxReclaimEnable[FSL_FEATURE_ENET_QUEUE];             /*!< Tx reclaim enable flag.*/
};

在eth_callback回调函数中,将enet_handle->userData又当做struct eth_context结构处理。
然后进入收包处理流程。(see eth_mcux.c:872)

到此做一小结。可知数据来源为device->data->enet_handle->userData


接下来进入eth_rx收包函数。以上面的userData作为eth_context。

第一步是读取帧长度。
根据 eth_context->enet_handle 获取帧长度。 (see eth_mcux.c:718)

// kinetis/fsl_enet.c:1417
// 先从handle的rxRing中取出bufferDescriptor,再读取其length
enet_rx_bd_ring_t    rxBdRing = handle->rxBdRing[ringId];
enet_rx_bd_struct_t  curBuffDescrip = rxBdRing->rxBdBase + rxBdRing->rxGenIdx;
length = curBuffDescrip->length;
// kinetis/fsl_enet.h:641
/*! @brief Defines the ENET receive buffer descriptor ring/queue structure. */
typedef struct _enet_rx_bd_ring
{
    volatile enet_rx_bd_struct_t *rxBdBase; /*!< Buffer descriptor base address pointer. */
    uint16_t rxGenIdx;                      /*!< The current available receive buffer descriptor pointer. */
    uint16_t rxRingLen;                     /*!< Receive ring length. */
} enet_rx_bd_ring_t;


// kinetis/fsl_enet.h:381
/*! @brief Defines the receive buffer descriptor structure for the little endian system.*/
typedef struct _enet_rx_bd_struct
{
    uint16_t length;  /*!< Buffer descriptor data length. */
    uint16_t control; /*!< Buffer descriptor control and status. */
    uint8_t *buffer;  /*!< Data buffer pointer. */
#ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
    uint16_t controlExtend0;  /*!< Extend buffer descriptor control0. */
    uint16_t controlExtend1;  /*!< Extend buffer descriptor control1. */
    uint16_t payloadCheckSum; /*!< Internal payload checksum. */
    uint8_t headerLength;     /*!< Header length. */
    uint8_t protocolTyte;     /*!< Protocol type. */
    uint16_t reserved0;
    uint16_t controlExtend2; /*!< Extend buffer descriptor control2. */
    uint32_t timestamp;      /*!< Timestamp. */
    uint16_t reserved1;
    uint16_t reserved2;
    uint16_t reserved3;
    uint16_t reserved4;
#endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
} enet_rx_bd_struct_t;

然后将 enet_rx_bd_struct_t->buffer里帧长度大小的数据copy到context->frame_buf. (see eth_mcux.c:747)

如果到这里一切顺利,则收包函数将会申请用于存储struct net_pkt的内存空间 (see eth_mcux.c:736)
其中。内存申请函数会根据之前计算的帧长度来申请buffer,并赋给net_pkt->buffer。

/**
 * // net_pkt.h:62
 * @brief Network packet.
 *
 * Note that if you add new fields into net_pkt, remember to update
 * net_pkt_clone() function.
 */
struct net_pkt {
    union {
        /** Internal variable that is used when packet is sent
         * or received.
         */
        struct k_work work;
        /** Socket layer will queue received net_pkt into a k_fifo.
         * Since this happens after consuming net_pkt's k_work on
         * RX path, it is then fine to have both attributes sharing
         * the same memory area.
         */
        intptr_t sock_recv_fifo;
    };

    /** Slab pointer from where it belongs to */
    struct k_mem_slab *slab;

    /** buffer holding the packet */
    union {
        struct net_buf *frags;
        struct net_buf *buffer;
    };

    /** Internal buffer iterator used for reading/writing */
    struct net_pkt_cursor cursor;

    /** Network connection context */
    struct net_context *context;

    /** Network interface */
    struct net_if *iface;

    /** @cond ignore */

#if defined(CONFIG_NET_PKT_TIMESTAMP) || \
                defined(CONFIG_NET_PKT_RXTIME_STATS) || \
                defined(CONFIG_NET_PKT_TXTIME_STATS)
    struct {
        /** Timestamp if available. */
        struct net_ptp_time timestamp;
    };
#endif /* CONFIG_NET_PKT_TIMESTAMP */

#if defined(CONFIG_NET_PKT_TXTIME)
    /** Network packet TX time in the future (in nanoseconds) */
    uint64_t txtime;
#endif /* CONFIG_NET_PKT_TXTIME */

    /** Reference counter */
    atomic_t atomic_ref;

    /* Filled by layer 2 when network packet is received. */
    struct net_linkaddr lladdr_src;
    struct net_linkaddr lladdr_dst;

    uint8_t ip_hdr_len; /* pre-filled in order to avoid func call */

    uint8_t overwrite  : 1; /* Is packet content being overwritten? */

    uint8_t sent_or_eof: 1; /* For outgoing packet: is this sent or not
                 * For incoming packet of a socket: last
                 * packet before EOF
                 * Used only if defined(CONFIG_NET_TCP)
                 */
    union {
        uint8_t pkt_queued: 1; /* For outgoing packet: is this packet
                     * queued to be sent but has not reached
                     * the driver yet.
                     * Used only if defined(CONFIG_NET_TCP)
                     */
        uint8_t gptp_pkt: 1; /* For outgoing packet: is this packet
                   * a GPTP packet.
                   * Used only if defined (CONFIG_NET_GPTP)
                   */
    };

    uint8_t forwarding : 1; /* Are we forwarding this pkt
                 * Used only if defined(CONFIG_NET_ROUTE)
                 */
    uint8_t family     : 3; /* IPv4 vs IPv6 */

    union {
        uint8_t ipv4_auto_arp_msg : 1; /* Is this pkt IPv4 autoconf ARP
                         * message. Used only if
                         * defined(CONFIG_NET_IPV4_AUTO).
                         * Note: family needs to be
                         * AF_INET.
                         */
        uint8_t lldp_pkt          : 1; /* Is this pkt an LLDP message.
                         * Used only if
                         * defined(CONFIG_NET_LLDP).
                         * Note: family needs to be
                         * AF_UNSPEC.
                         */
        uint8_t ppp_msg           : 1; /* This is a PPP message */
    };

    union {
        /* IPv6 hop limit or IPv4 ttl for this network packet.
         * The value is shared between IPv6 and IPv4.
         */
        uint8_t ipv6_hop_limit;
        uint8_t ipv4_ttl;
    };

    /** Network packet priority, can be left out in which case packet
     * is not prioritised.
     */
    uint8_t priority;
};

申请完net_pkt后,将context->frame_buffer内的数据write到net_pkt中。

根据 struct net_eth_hdr(pkt->frags->data)->type == NET_ETH_PTYPE_PTP 判断此包中是否携带timestamp。
若是,则根据context->base和context->enet_handle读取timestamp并赋给pkt->timestamp。

初始化net_pkt完毕,调用net_recv_data函数,将net_pkt作为参数传入。

至此,device driver部分的处理流程完毕,流程来到了zephyr L2网络协议栈。

做个小结。这个阶段全都在eth_rx函数内。主要是将context->handle内的数据copy到context->frame_buffer,再write到net_pkt。然后根据config选项,将handle中其他需要的数据放到net_pkt中。
之后的数据处理全部基于此net_pkt。


接下来进入zephyr L2协议栈代码。开始包处理环节。
net_recv_data函数中,对net_pkt进行一些初始化设置,比如

// see net_core.c:394
pkt->cursor.buf = pkt->buffer;
pkt->cursor.pos = pkt->cursor.buf->data;
/* buffer cursor used in net_pkt */
struct net_pkt_cursor {
    /** Current net_buf pointer by the cursor */
    struct net_buf *buf;
    /** Current position in the data buffer of the net_buf */
    uint8_t *pos;
};

然后调用net_queue_rx函数。
在此函数中,调用k_work_init函数,将pkt->work->handler设置为process_rx_packet函数。进入此handler函数。

在handler函数内调用net_rx,在net_rx中调用processing_data,在processing_data内调用process_data,在process_data内调用net_if_recv_data

至此zephyr L2协议栈的流程完毕,进入net interface抽象层。
可以看到,作为中间阶段,L2中对pkt做的处理并不多。


在net_if_recv_data中,根据iface参数,调用iface->if_dev->l2->recv函数指针。
此函数指针用NET_L2_INIT宏设置。这里对应ethernet.c:1000处的宏。

根据函数指针,进入ethernet_recv函数。

取net_pkt->frags->data,此即网络包的数据起始地址。
用一net_eth_hdr结构的变量指向此地址,可以读出ethernet packet的header。

struct net_eth_hdr {
    struct net_eth_addr dst;
    struct net_eth_addr src;
    uint16_t type;
} __packed;

根据eth_hdr->addr,设置net_pkt->lladdr field。
然后调用net_buf_pull函数, 将buf指针后移eth_hdr的长度。

根据eth_hdr->type,调用相关应用的函数。这里是net_gptp_recv

至此,network interface阶段结束,进入gptp应用的阶段。


取net_pkt->frags->data指针,用一gptp_hdr结构的指针指向此地址。可以读出ptp header。

struct gptp_hdr {
    /** Type of the message. */
    uint8_t message_type:4;

    /** Transport specific, always 1. */
    uint8_t transport_specific:4;

    /** Version of the PTP, always 2. */
    uint8_t ptp_version:4;

    /** Reserved field. */
    uint8_t reserved0:4;

    /** Total length of the message from the header to the last TLV. */
    uint16_t message_length;

    /** Domain number, always 0. */
    uint8_t domain_number;

    /** Reserved field. */
    uint8_t reserved1;

    /** Message flags. */
    struct gptp_flags flags;

    /** Correction Field. The content depends of the message type. */
    int64_t correction_field;

    /** Reserved field. */
    uint32_t reserved2;

    /** Port Identity of the sender. */
    struct gptp_port_identity port_id;

    /** Sequence Id. */
    uint16_t sequence_id;

    /** Control value. Sync: 0, Follow-up: 2, Others: 5. */
    uint8_t control;

    /** Message Interval in Log2 for Sync and Announce messages. */
    int8_t log_msg_interval;
} __packed;

在net_gptp_recv函数中,调用gptp_handle_critical_msg函数。
在此函数中,会检测此ptp message是否为GPTP_PATH_DELAY_REQ_MESSAGE。
若是,且此message为valid,则会立即处理。
否则,将messages通过k_fifo_put放入gptp_rx_queue中,留待后续处理。

至此,整个收包流程完毕。
放在kfifo里的包,将由gptp_thread后续自行从gptp_rx_queue中取出。

相关文章

网友评论

    本文标题:zephyr gptp net_pkt RX procedure

    本文链接:https://www.haomeiwen.com/subject/nztbtltx.html