ctrl->dcbaa->dev_context_ptrs[0] = 0;
- free((void *)(uintptr_t)le64_to_cpu(ctrl->scratchpad->sp_array[0]));
+ free(xhci_bus_to_virt(ctrl, le64_to_cpu(ctrl->scratchpad->sp_array[0])));
free(ctrl->scratchpad->sp_array);
free(ctrl->scratchpad);
ctrl->scratchpad = NULL;
* @param link_trbs flag to indicate whether to link the trbs or NOT
* @return none
*/
-static void xhci_link_segments(struct xhci_segment *prev,
- struct xhci_segment *next, bool link_trbs)
+static void xhci_link_segments(struct xhci_ctrl *ctrl, struct xhci_segment *prev,
+ struct xhci_segment *next, bool link_trbs)
{
u32 val;
u64 val_64 = 0;
return;
prev->next = next;
if (link_trbs) {
- val_64 = virt_to_phys(next->trbs);
+ val_64 = xhci_virt_to_bus(ctrl, next->trbs);
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(val_64);
* @param link_trbs flag to indicate whether to link the trbs or NOT
* @return pointer to the newly created RING
*/
-struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs)
+struct xhci_ring *xhci_ring_alloc(struct xhci_ctrl *ctrl, unsigned int num_segs,
+ bool link_trbs)
{
struct xhci_ring *ring;
struct xhci_segment *prev;
next = xhci_segment_alloc();
BUG_ON(!next);
- xhci_link_segments(prev, next, link_trbs);
+ xhci_link_segments(ctrl, prev, next, link_trbs);
prev = next;
num_segs--;
}
- xhci_link_segments(prev, ring->first_seg, link_trbs);
+ xhci_link_segments(ctrl, prev, ring->first_seg, link_trbs);
if (link_trbs) {
/* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
struct xhci_hccr *hccr = ctrl->hccr;
struct xhci_hcor *hcor = ctrl->hcor;
struct xhci_scratchpad *scratchpad;
+ uint64_t val_64;
int num_sp;
uint32_t page_size;
void *buf;
scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64));
if (!scratchpad->sp_array)
goto fail_sp2;
- ctrl->dcbaa->dev_context_ptrs[0] =
- cpu_to_le64((uintptr_t)scratchpad->sp_array);
+
+ val_64 = xhci_virt_to_bus(ctrl, scratchpad->sp_array);
+ ctrl->dcbaa->dev_context_ptrs[0] = cpu_to_le64(val_64);
xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0],
sizeof(ctrl->dcbaa->dev_context_ptrs[0]));
xhci_flush_cache((uintptr_t)buf, num_sp * page_size);
for (i = 0; i < num_sp; i++) {
- uintptr_t ptr = (uintptr_t)buf + i * page_size;
- scratchpad->sp_array[i] = cpu_to_le64(ptr);
+ val_64 = xhci_virt_to_bus(ctrl, buf + i * page_size);
+ scratchpad->sp_array[i] = cpu_to_le64(val_64);
}
xhci_flush_cache((uintptr_t)scratchpad->sp_array,
}
/* Allocate endpoint 0 ring */
- virt_dev->eps[0].ring = xhci_ring_alloc(1, true);
+ virt_dev->eps[0].ring = xhci_ring_alloc(ctrl, 1, true);
- byte_64 = virt_to_phys(virt_dev->out_ctx->bytes);
+ byte_64 = xhci_virt_to_bus(ctrl, virt_dev->out_ctx->bytes);
/* Point to output device context in dcbaa. */
ctrl->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(byte_64);
return -ENOMEM;
}
- val_64 = virt_to_phys(ctrl->dcbaa);
+ val_64 = xhci_virt_to_bus(ctrl, ctrl->dcbaa);
/* Set the pointer in DCBAA register */
xhci_writeq(&hcor->or_dcbaap, val_64);
/* Command ring control pointer register initialization */
- ctrl->cmd_ring = xhci_ring_alloc(1, true);
+ ctrl->cmd_ring = xhci_ring_alloc(ctrl, 1, true);
/* Set the address in the Command Ring Control register */
- trb_64 = virt_to_phys(ctrl->cmd_ring->first_seg->trbs);
+ trb_64 = xhci_virt_to_bus(ctrl, ctrl->cmd_ring->first_seg->trbs);
val_64 = xhci_readq(&hcor->or_crcr);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
ctrl->ir_set = &ctrl->run_regs->ir_set[0];
/* Event ring does not maintain link TRB */
- ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false);
+ ctrl->event_ring = xhci_ring_alloc(ctrl, ERST_NUM_SEGS, false);
ctrl->erst.entries = xhci_malloc(sizeof(struct xhci_erst_entry) *
ERST_NUM_SEGS);
for (val = 0, seg = ctrl->event_ring->first_seg;
val < ERST_NUM_SEGS;
val++) {
- trb_64 = virt_to_phys(seg->trbs);
struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
+ trb_64 = xhci_virt_to_bus(ctrl, seg->trbs);
entry->seg_addr = cpu_to_le64(trb_64);
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0;
xhci_flush_cache((uintptr_t)ctrl->erst.entries,
ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
- deq = virt_to_phys(ctrl->event_ring->dequeue);
+ deq = xhci_virt_to_bus(ctrl, ctrl->event_ring->dequeue);
/* Update HC event ring dequeue pointer */
xhci_writeq(&ctrl->ir_set->erst_dequeue,
/* this is the event ring segment table pointer */
val_64 = xhci_readq(&ctrl->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
- val_64 |= virt_to_phys(ctrl->erst.entries) & ~ERST_PTR_MASK;
+ val_64 |= xhci_virt_to_bus(ctrl, ctrl->erst.entries) & ~ERST_PTR_MASK;
xhci_writeq(&ctrl->ir_set->erst_base, val_64);
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
- trb_64 = virt_to_phys(virt_dev->eps[0].ring->first_seg->trbs);
+ trb_64 = xhci_virt_to_bus(ctrl, virt_dev->eps[0].ring->first_seg->trbs);
ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
/*
u32 ep_index, trb_type cmd)
{
u32 fields[4];
- u64 val_64 = virt_to_phys(ptr);
+ u64 val_64 = 0;
BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
+ if (ptr)
+ val_64 = xhci_virt_to_bus(ctrl, ptr);
+
fields[0] = lower_32_bits(val_64);
fields[1] = upper_32_bits(val_64);
fields[2] = 0;
/* Inform the hardware */
xhci_writeq(&ctrl->ir_set->erst_dequeue,
- virt_to_phys(ctrl->event_ring->dequeue) | ERST_EHB);
+ xhci_virt_to_bus(ctrl, ctrl->event_ring->dequeue) | ERST_EHB);
}
/**
u64 addr;
int ret;
u32 trb_fields[4];
- u64 val_64 = virt_to_phys(buffer);
+ u64 val_64 = xhci_virt_to_bus(ctrl, buffer);
void *last_transfer_trb_addr;
int available_length;
}
if ((uintptr_t)(le64_to_cpu(event->trans_event.buffer)) !=
- (uintptr_t)virt_to_phys(last_transfer_trb_addr)) {
+ (uintptr_t)xhci_virt_to_bus(ctrl, last_transfer_trb_addr)) {
available_length -=
(int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len));
xhci_acknowledge_event(ctrl);
if (length > 0) {
if (req->requesttype & USB_DIR_IN)
field |= TRB_DIR_IN;
- buf_64 = virt_to_phys(buffer);
+ buf_64 = xhci_virt_to_bus(ctrl, buffer);
trb_fields[0] = lower_32_bits(buf_64);
trb_fields[1] = upper_32_bits(buf_64);
#ifndef HOST_XHCI_H_
#define HOST_XHCI_H_
+#include <phys2bus.h>
#include <reset.h>
#include <asm/types.h>
#include <asm/cache.h>
#define XHCI_MTK_HOST BIT(0)
};
+#if CONFIG_IS_ENABLED(DM_USB)
+#define xhci_to_dev(_ctrl) _ctrl->dev
+#else
+#define xhci_to_dev(_ctrl) NULL
+#endif
+
unsigned long trb_addr(struct xhci_segment *seg, union xhci_trb *trb);
struct xhci_input_control_ctx
*xhci_get_input_control_ctx(struct xhci_container_ctx *ctx);
void xhci_flush_cache(uintptr_t addr, u32 type_len);
void xhci_inval_cache(uintptr_t addr, u32 type_len);
void xhci_cleanup(struct xhci_ctrl *ctrl);
-struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs);
+struct xhci_ring *xhci_ring_alloc(struct xhci_ctrl *ctrl, unsigned int num_segs,
+ bool link_trbs);
int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id);
int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
struct xhci_hcor *hcor);
struct xhci_ctrl *xhci_get_ctrl(struct usb_device *udev);
+static inline dma_addr_t xhci_virt_to_bus(struct xhci_ctrl *ctrl, void *addr)
+{
+ return dev_phys_to_bus(xhci_to_dev(ctrl), virt_to_phys(addr));
+}
+
+static inline void *xhci_bus_to_virt(struct xhci_ctrl *ctrl, dma_addr_t addr)
+{
+ return phys_to_virt(dev_bus_to_phys(xhci_to_dev(ctrl), addr));
+}
+
#endif /* HOST_XHCI_H_ */