Index: sys/arch/amd64/amd64/bus_dma.c =================================================================== RCS file: /mount/openbsd/cvs/src/sys/arch/amd64/amd64/bus_dma.c,v diff -u -p -u -p -r1.51 bus_dma.c --- sys/arch/amd64/amd64/bus_dma.c 9 Jun 2019 12:52:04 -0000 1.51 +++ sys/arch/amd64/amd64/bus_dma.c 14 Aug 2024 16:35:29 -0000 @@ -108,8 +108,13 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_ bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { struct bus_dmamap *map; + struct pglist mlist; + struct vm_page **pg, *pgnext; + size_t mapsize, sz, ssize; + vaddr_t va, sva; void *mapstore; - size_t mapsize; + int npages, error; + const struct kmem_dyn_mode *kd; /* * Allocate and initialize the DMA map. The end of the map @@ -125,6 +130,16 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_ */ mapsize = sizeof(struct bus_dmamap) + (sizeof(bus_dma_segment_t) * (nsegments - 1)); + + /* allocate and use bounce buffers when running as SEV guest */ + if (cpu_sev_guestmode) { + /* this many pages plus one in case we get split */ + npages = round_page(size) / PAGE_SIZE + 1; + if (npages < nsegments) + npages = nsegments; + mapsize += sizeof(struct vm_page *) * npages; + } + if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO))) == NULL) @@ -135,8 +150,59 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_ map->_dm_segcnt = nsegments; map->_dm_maxsegsz = maxsegsz; map->_dm_boundary = boundary; + if (cpu_sev_guestmode) { + map->_dm_pages = (void *)&map->dm_segs[nsegments]; + map->_dm_npages = npages; + } map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); + if (!cpu_sev_guestmode) { + *dmamp = map; + return (0); + } + + sz = npages << PGSHIFT; + kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok; + va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, kd); + if (va == 0) { + map->_dm_npages = 0; + free(map, M_DEVBUF, mapsize); + return (ENOMEM); + } + + TAILQ_INIT(&mlist); + error = uvm_pglistalloc(sz, 0, -1, PAGE_SIZE, 0, &mlist, nsegments, + (flags & BUS_DMA_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK); + if (error) { + map->_dm_npages = 0; + km_free((void *)va, sz, &kv_any, &kp_none); + free(map, M_DEVBUF, mapsize); + return (ENOMEM); + } + + sva = va; + ssize = sz; + pgnext = TAILQ_FIRST(&mlist); + for (pg = map->_dm_pages; npages--; va += PAGE_SIZE, pg++) { + *pg = pgnext; + error = pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(*pg), + PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE | PMAP_WIRED | + PMAP_CANFAIL | PMAP_NOCRYPT); + if (error) { + pmap_update(pmap_kernel()); + map->_dm_npages = 0; + km_free((void *)sva, ssize, &kv_any, &kp_none); + free(map, M_DEVBUF, mapsize); + uvm_pglistfree(&mlist); + return (ENOMEM); + } + pgnext = TAILQ_NEXT(*pg, pageq); + bzero((void *)va, PAGE_SIZE); + } + pmap_update(pmap_kernel()); + map->_dm_pgva = sva; + *dmamp = map; return (0); } @@ -149,7 +215,22 @@ void _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) { size_t mapsize; - + struct vm_page **pg; + struct pglist mlist; + + if (map->_dm_pgva) { + km_free((void *)map->_dm_pgva, map->_dm_npages << PGSHIFT, + &kv_any, &kp_none); + } + + if (map->_dm_pages) { + TAILQ_INIT(&mlist); + for (pg = map->_dm_pages; map->_dm_npages--; pg++) { + TAILQ_INSERT_TAIL(&mlist, *pg, pageq); + } + uvm_pglistfree(&mlist); + } + mapsize = sizeof(struct bus_dmamap) + (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1)); free(map, M_DEVBUF, mapsize); @@ -383,6 +464,7 @@ _bus_dmamap_unload(bus_dma_tag_t t, bus_ */ map->dm_mapsize = 0; map->dm_nsegs = 0; + map->_dm_nused = 0; } /* @@ -393,7 +475,40 @@ void _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, bus_size_t size, int op) { - /* Nothing to do here. */ + bus_dma_segment_t *sg; + int i, off = addr; + bus_size_t l; + + if (!cpu_sev_guestmode) + return; + + for (i = map->_dm_segcnt, sg = map->dm_segs; size && i--; sg++) { + if (off >= sg->ds_len) { + off -= sg->ds_len; + continue; + } + + l = sg->ds_len - off; + if (l > size) + l = size; + size -= l; + + /* PREREAD and POSTWRITE are no-ops. */ + + /* READ: device -> memory */ + if (op & BUS_DMASYNC_POSTREAD) { + bcopy((void *)(sg->_ds_bounce_va + off), + (void *)(sg->_ds_va + off), l); + } + + /* WRITE: memory -> device */ + if (op & BUS_DMASYNC_PREWRITE) { + bcopy((void *)(sg->_ds_va + off), + (void *)(sg->_ds_bounce_va + off), l); + } + + off = 0; + } } /* @@ -566,9 +681,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t, { bus_size_t sgsize; bus_addr_t curaddr, lastaddr, baddr, bmask; - vaddr_t vaddr = (vaddr_t)buf; - int seg; + vaddr_t pgva = -1, vaddr = (vaddr_t)buf; + int seg, page, off; pmap_t pmap; + struct vm_page *pg; if (p != NULL) pmap = p->p_vmspace->vm_map.pmap; @@ -589,6 +705,18 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t, panic("Non dma-reachable buffer at curaddr %#lx(raw)", curaddr); + if (cpu_sev_guestmode) { + /* use bounce buffer */ + if (map->_dm_nused + 1 >= map->_dm_npages) + return (ENOMEM); + + off = vaddr & PAGE_MASK; + pg = map->_dm_pages[page = map->_dm_nused++]; + curaddr = VM_PAGE_TO_PHYS(pg) + off; + + pgva = map->_dm_pgva + (page << PGSHIFT) + off; + } + /* * Compute the segment size, and adjust counts. */ @@ -612,6 +740,8 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t, if (first) { map->dm_segs[seg].ds_addr = curaddr; map->dm_segs[seg].ds_len = sgsize; + map->dm_segs[seg]._ds_va = vaddr; + map->dm_segs[seg]._ds_bounce_va = pgva; first = 0; } else { if (curaddr == lastaddr && @@ -626,6 +756,8 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t, break; map->dm_segs[seg].ds_addr = curaddr; map->dm_segs[seg].ds_len = sgsize; + map->dm_segs[seg]._ds_va = vaddr; + map->dm_segs[seg]._ds_bounce_va = pgva; } } Index: sys/arch/amd64/include/bus.h =================================================================== RCS file: /mount/openbsd/cvs/src/sys/arch/amd64/include/bus.h,v diff -u -p -u -p -r1.35 bus.h --- sys/arch/amd64/include/bus.h 28 Oct 2020 09:58:57 -0000 1.35 +++ sys/arch/amd64/include/bus.h 14 Aug 2024 16:35:29 -0000 @@ -552,6 +552,9 @@ typedef struct bus_dmamap *bus_dmamap_t struct bus_dma_segment { bus_addr_t ds_addr; /* DMA address */ bus_size_t ds_len; /* length of transfer */ + vaddr_t _ds_va; /* mapped loaded data */ + vaddr_t _ds_bounce_va; /* mapped bounced data */ + /* * Ugh. need this so can pass alignment down from bus_dmamem_alloc * to scatter gather maps. only the first one is used so the rest is @@ -654,6 +657,11 @@ struct bus_dmamap { bus_size_t _dm_boundary; /* don't cross this */ void *_dm_cookie; /* cookie for bus-specific functions */ + + struct vm_page **_dm_pages; /* replacement pages */ + vaddr_t _dm_pgva; /* those above -- mapped */ + int _dm_npages; /* number of pages allocated */ + int _dm_nused; /* number of pages replaced */ /* * PUBLIC MEMBERS: these are used by machine-independent code.