blob: 148b5d2c5e4ea90ef04908b992b20a0ab2a83215 [file] [log] [blame]
# q35 - VirtIO guest (graphical console)
# =========================================================
#
# Usage:
#
# $ qemu-system-x86_64 \
# -nodefaults \
# -readconfig q35-virtio-graphical.cfg
#
# You will probably need to tweak the lines marked as
# CHANGE ME before being able to use this configuration!
#
# The guest will have a selection of VirtIO devices
# tailored towards optimal performance with modern guests,
# and will be accessed through a graphical console.
#
# ---------------------------------------------------------
#
# Using -nodefaults is required to have full control over
# the virtual hardware: when it's specified, QEMU will
# populate the board with only the builtin peripherals
# plus a small selection of core PCI devices and
# controllers; the user will then have to explicitly add
# further devices.
#
# The core PCI devices show up in the guest as:
#
# 00:00.0 Host bridge
# 00:1f.0 ISA bridge / LPC
# 00:1f.2 SATA (AHCI) controller
# 00:1f.3 SMBus controller
#
# This configuration file adds a number of other useful
# devices, more specifically:
#
# 00:01.0 VGA compatible controller
# 00:1b.0 Audio device
# 00.1c.* PCI bridge (PCI Express Root Ports)
# 01:00.0 SCSI storage controller
# 02:00.0 Ethernet controller
# 03:00.0 USB controller
#
# More information about these devices is available below.
# Machine options
# =========================================================
#
# We use the q35 machine type and enable KVM acceleration
# for better performance.
#
# Using less than 1 GiB of memory is probably not going to
# yield good performance in the guest, and might even lead
# to obscure boot issues in some cases.
[machine]
type = "q35"
[accel]
accel = "kvm"
[memory]
size = "1024"
# PCI bridge (PCI Express Root Ports)
# =========================================================
#
# We create eight PCI Express Root Ports, and we plug them
# all into separate functions of the same slot. Some of
# them will be used by devices, the rest will remain
# available for hotplug.
[device "pcie.1"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.0"
port = "1"
chassis = "1"
multifunction = "on"
[device "pcie.2"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.1"
port = "2"
chassis = "2"
[device "pcie.3"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.2"
port = "3"
chassis = "3"
[device "pcie.4"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.3"
port = "4"
chassis = "4"
[device "pcie.5"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.4"
port = "5"
chassis = "5"
[device "pcie.6"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.5"
port = "6"
chassis = "6"
[device "pcie.7"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.6"
port = "7"
chassis = "7"
[device "pcie.8"]
driver = "pcie-root-port"
bus = "pcie.0"
addr = "1c.7"
port = "8"
chassis = "8"
# SCSI storage controller (and storage)
# =========================================================
#
# We use virtio-scsi here so that we can (hot)plug a large
# number of disks without running into issues; a SCSI disk,
# backed by a qcow2 disk image on the host's filesystem, is
# attached to it.
#
# We also create an optical disk, mostly for installation
# purposes: once the guest OS has been successfully
# installed, the guest will no longer boot from optical
# media. If you don't want, or no longer want, to have an
# optical disk in the guest you can safely comment out
# all relevant sections below.
[device "scsi"]
driver = "virtio-scsi-pci"
bus = "pcie.1"
addr = "00.0"
[device "scsi-disk"]
driver = "scsi-hd"
bus = "scsi.0"
drive = "disk"
bootindex = "1"
[drive "disk"]
file = "guest.qcow2" # CHANGE ME
format = "qcow2"
if = "none"
[device "scsi-optical-disk"]
driver = "scsi-cd"
bus = "scsi.0"
drive = "optical-disk"
bootindex = "2"
[drive "optical-disk"]
file = "install.iso" # CHANGE ME
format = "raw"
if = "none"
# Ethernet controller
# =========================================================
#
# We use virtio-net for improved performance over emulated
# hardware; on the host side, we take advantage of user
# networking so that the QEMU process doesn't require any
# additional privileges.
[netdev "hostnet"]
type = "user"
[device "net"]
driver = "virtio-net-pci"
netdev = "hostnet"
bus = "pcie.2"
addr = "00.0"
# USB controller (and input devices)
# =========================================================
#
# We add a virtualization-friendly USB 3.0 controller and
# a USB tablet so that graphical guests can be controlled
# appropriately. A USB keyboard is not needed, as q35
# guests get a PS/2 one added automatically.
[device "usb"]
driver = "nec-usb-xhci"
bus = "pcie.3"
addr = "00.0"
[device "tablet"]
driver = "usb-tablet"
bus = "usb.0"
# VGA compatible controller
# =========================================================
#
# We plug the QXL video card directly into the PCI Express
# Root Bus as it is a legacy PCI device; this way, we can
# reduce the number of PCI Express controllers in the
# guest.
#
# If you're running the guest on a remote, potentially
# headless host, you will probably want to append something
# like
#
# -display vnc=127.0.0.1:0
#
# to the command line in order to prevent QEMU from
# creating a graphical display window on the host and
# enable remote access instead.
[device "video"]
driver = "qxl-vga"
bus = "pcie.0"
addr = "01.0"
# Audio device
# =========================================================
#
# Like the video card, the sound card is a legacy PCI
# device and as such can be plugged directly into the PCI
# Express Root Bus.
[device "sound"]
driver = "ich9-intel-hda"
bus = "pcie.0"
addr = "1b.0"
[device "duplex"]
driver = "hda-duplex"
bus = "sound.0"
cad = "0"