mirror of https://gitee.com/openkylin/qemu.git
Fix some typos found by codespell
Signed-off-by: Stefan Weil <sw@weilnetz.de> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
This commit is contained in:
parent
d506dc87b9
commit
cb8d4c8f54
|
@ -270,7 +270,7 @@ f_sample *mixeng_clip[2][2][2][3] = {
|
|||
* August 21, 1998
|
||||
* Copyright 1998 Fabrice Bellard.
|
||||
*
|
||||
* [Rewrote completly the code of Lance Norskog And Sundry
|
||||
* [Rewrote completely the code of Lance Norskog And Sundry
|
||||
* Contributors with a more efficient algorithm.]
|
||||
*
|
||||
* This source code is freely redistributable and may be used for
|
||||
|
|
|
@ -898,7 +898,7 @@ static struct audio_option oss_options[] = {
|
|||
.name = "EXCLUSIVE",
|
||||
.tag = AUD_OPT_BOOL,
|
||||
.valp = &glob_conf.exclusive,
|
||||
.descr = "Open device in exclusive mode (vmix wont work)"
|
||||
.descr = "Open device in exclusive mode (vmix won't work)"
|
||||
},
|
||||
#ifdef USE_DSP_POLICY
|
||||
{
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* unix socket. For each client, the server will create some eventfd
|
||||
* (see EVENTFD(2)), one per vector. These fd are transmitted to all
|
||||
* clients using the SCM_RIGHTS cmsg message. Therefore, each client is
|
||||
* able to send a notification to another client without beeing
|
||||
* able to send a notification to another client without being
|
||||
* "profixied" by the server.
|
||||
*
|
||||
* We use this mechanism to send interruptions between guests.
|
||||
|
|
|
@ -303,7 +303,7 @@ Endianness
|
|||
----------
|
||||
|
||||
Device registers are hard-coded to little-endian (LE). The driver should
|
||||
convert to/from host endianess to LE for device register accesses.
|
||||
convert to/from host endianness to LE for device register accesses.
|
||||
|
||||
Descriptors are LE. Descriptor buffer TLVs will have LE type and length
|
||||
fields, but the value field can either be LE or network-byte-order, depending
|
||||
|
|
|
@ -10,7 +10,7 @@ Introduction
|
|||
------------
|
||||
QEMU includes a throttling module that can be used to set limits to
|
||||
I/O operations. The code itself is generic and independent of the I/O
|
||||
units, but it is currenly used to limit the number of bytes per second
|
||||
units, but it is currently used to limit the number of bytes per second
|
||||
and operations per second (IOPS) when performing disk I/O.
|
||||
|
||||
This document explains how to use the throttling code in QEMU, and how
|
||||
|
|
|
@ -247,7 +247,7 @@ static void imx_i2c_write(void *opaque, hwaddr offset,
|
|||
if (s->address == ADDR_RESET) {
|
||||
if (i2c_start_transfer(s->bus, extract32(s->i2dr_write, 1, 7),
|
||||
extract32(s->i2dr_write, 0, 1))) {
|
||||
/* if non zero is returned, the adress is not valid */
|
||||
/* if non zero is returned, the address is not valid */
|
||||
s->i2sr |= I2SR_RXAK;
|
||||
} else {
|
||||
s->address = s->i2dr_write;
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#define VMXNET3_MSIX_BAR_SIZE 0x2000
|
||||
#define MIN_BUF_SIZE 60
|
||||
|
||||
/* Compatability flags for migration */
|
||||
/* Compatibility flags for migration */
|
||||
#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT 0
|
||||
#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS \
|
||||
(1 << VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT)
|
||||
|
@ -341,7 +341,7 @@ typedef struct {
|
|||
uint32_t mcast_list_len;
|
||||
uint32_t mcast_list_buff_size; /* needed for live migration. */
|
||||
|
||||
/* Compatability flags for migration */
|
||||
/* Compatibility flags for migration */
|
||||
uint32_t compat_flags;
|
||||
} VMXNET3State;
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
*
|
||||
* Setting this flag to false will remove MSI/MSI-X capability from all devices.
|
||||
*
|
||||
* It is preferrable for controllers to set this to true (non-broken) even if
|
||||
* It is preferable for controllers to set this to true (non-broken) even if
|
||||
* they do not actually support MSI/MSI-X: guests normally probe the controller
|
||||
* type and do not attempt to enable MSI/MSI-X with interrupt controllers not
|
||||
* supporting such, so removing the capability is not required, and
|
||||
|
|
|
@ -116,7 +116,7 @@ pcibus_t pci_bridge_get_base(const PCIDevice *bridge, uint8_t type)
|
|||
return base;
|
||||
}
|
||||
|
||||
/* accessor funciton to get bridge filtering limit */
|
||||
/* accessor function to get bridge filtering limit */
|
||||
pcibus_t pci_bridge_get_limit(const PCIDevice *bridge, uint8_t type)
|
||||
{
|
||||
pcibus_t limit;
|
||||
|
|
|
@ -698,7 +698,7 @@ static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req)
|
|||
uint8_t resp_data[36];
|
||||
int rc, len, alen;
|
||||
|
||||
/* We dont do EVPD. Also check that page_code is 0 */
|
||||
/* We don't do EVPD. Also check that page_code is 0 */
|
||||
if ((cdb[1] & 0x01) || cdb[2] != 0) {
|
||||
/* Send INVALID FIELD IN CDB */
|
||||
vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0);
|
||||
|
|
|
@ -63,7 +63,7 @@ typedef struct PVSCSIClass {
|
|||
#define PVSCSI_DEVICE_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(PVSCSIClass, (obj), TYPE_PVSCSI)
|
||||
|
||||
/* Compatability flags for migration */
|
||||
/* Compatibility flags for migration */
|
||||
#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
|
||||
#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
|
||||
(1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
|
||||
|
|
|
@ -184,7 +184,7 @@ static void a9_gtimer_write(void *opaque, hwaddr addr, uint64_t value,
|
|||
case R_COUNTER_LO:
|
||||
/*
|
||||
* Keep it simple - ARM docco explicitly says to disable timer before
|
||||
* modding it, so dont bother trying to do all the difficult on the fly
|
||||
* modding it, so don't bother trying to do all the difficult on the fly
|
||||
* timer modifications - (if they even work in real hardware??).
|
||||
*/
|
||||
if (s->control & R_CONTROL_TIMER_ENABLE) {
|
||||
|
|
|
@ -187,7 +187,7 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg,
|
|||
}
|
||||
|
||||
/* Control register operations are broken out into helpers that can be
|
||||
* explictly called on aspeed_timer_reset(), but also from
|
||||
* explicitly called on aspeed_timer_reset(), but also from
|
||||
* aspeed_timer_ctrl_op().
|
||||
*/
|
||||
|
||||
|
@ -380,7 +380,7 @@ static void aspeed_timer_reset(DeviceState *dev)
|
|||
|
||||
for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
|
||||
AspeedTimer *t = &s->timers[i];
|
||||
/* Explictly call helpers to avoid any conditional behaviour through
|
||||
/* Explicitly call helpers to avoid any conditional behaviour through
|
||||
* aspeed_timer_set_ctrl().
|
||||
*/
|
||||
aspeed_timer_ctrl_enable(t, false);
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
* Fill @buf with @buflen bytes of cryptographically strong
|
||||
* random data
|
||||
*
|
||||
* Returns 0 on sucess, -1 on error
|
||||
* Returns 0 on success, -1 on error
|
||||
*/
|
||||
int qcrypto_random_bytes(uint8_t *buf,
|
||||
size_t buflen,
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
* We don't support Xen prior to 4.2.0.
|
||||
*/
|
||||
|
||||
/* Xen 4.2 thru 4.6 */
|
||||
/* Xen 4.2 through 4.6 */
|
||||
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
|
||||
|
||||
typedef xc_interface xenforeignmemory_handle;
|
||||
|
|
|
@ -219,7 +219,7 @@ void qio_task_run_in_thread(QIOTask *task,
|
|||
* qio_task_complete:
|
||||
* @task: the task struct
|
||||
*
|
||||
* Mark the operation as succesfully completed
|
||||
* Mark the operation as successfully completed
|
||||
* and free the memory for @task.
|
||||
*/
|
||||
void qio_task_complete(QIOTask *task);
|
||||
|
|
|
@ -313,7 +313,7 @@ static inline void qemu_timersub(const struct timeval *val1,
|
|||
void qemu_set_cloexec(int fd);
|
||||
|
||||
/* QEMU "hardware version" setting. Used to replace code that exposed
|
||||
* QEMU_VERSION to guests in the past and need to keep compatibilty.
|
||||
* QEMU_VERSION to guests in the past and need to keep compatibility.
|
||||
* Do not use qemu_hw_version() in new code.
|
||||
*/
|
||||
void qemu_set_hw_version(const char *);
|
||||
|
|
|
@ -413,7 +413,7 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
|
|||
* userspace memory corruption (which is not detectable by valgrind
|
||||
* too, in most cases).
|
||||
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
|
||||
* a hope that sizeof(long) wont become >8 any time soon.
|
||||
* a hope that sizeof(long) won't become >8 any time soon.
|
||||
*/
|
||||
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
|
||||
/*HOST_LONG_BITS*/ 64) / 8;
|
||||
|
|
|
@ -278,7 +278,7 @@ static void deferred_incoming_migration(Error **errp)
|
|||
void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
|
||||
ram_addr_t start, size_t len)
|
||||
{
|
||||
uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */
|
||||
uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
|
||||
size_t msglen = 12; /* start + len */
|
||||
|
||||
*(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
|
||||
|
|
|
@ -1272,7 +1272,7 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
|
|||
}
|
||||
|
||||
/**
|
||||
* ram_save_host_page: Starting at *offset send pages upto the end
|
||||
* ram_save_host_page: Starting at *offset send pages up to the end
|
||||
* of the current host page. It's valid for the initial
|
||||
* offset to point into the middle of a host page
|
||||
* in which case the remainder of the hostpage is sent.
|
||||
|
|
|
@ -373,7 +373,7 @@ static QIOChannel *nbd_receive_starttls(QIOChannel *ioc,
|
|||
}
|
||||
length = be32_to_cpu(length);
|
||||
if (length != 0) {
|
||||
error_setg(errp, "Start TLS reponse was not zero %x",
|
||||
error_setg(errp, "Start TLS response was not zero %x",
|
||||
length);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ static gboolean ga_channel_prepare(GSource *source, gint *timeout_ms)
|
|||
}
|
||||
|
||||
out:
|
||||
/* dont block forever, iterate the main loop every once and a while */
|
||||
/* don't block forever, iterate the main loop every once in a while */
|
||||
*timeout_ms = 500;
|
||||
/* if there's data in the read buffer, or another event is pending,
|
||||
* skip polling and issue user cb.
|
||||
|
|
|
@ -182,8 +182,8 @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **err)
|
|||
*/
|
||||
#ifdef G_OS_WIN32
|
||||
/* Additionally WIN32 does not provide any additional information
|
||||
* on whetherthe child exited or terminated via signal.
|
||||
* We use this simple range check to distingish application exit code
|
||||
* on whether the child exited or terminated via signal.
|
||||
* We use this simple range check to distinguish application exit code
|
||||
* (usually value less then 256) and unhandled exception code with
|
||||
* ntstatus (always value greater then 0xC0000005). */
|
||||
if ((uint32_t)gei->status < 0xC0000000U) {
|
||||
|
|
|
@ -363,7 +363,7 @@ sub sanitise_line {
|
|||
for ($off = 1; $off < length($line); $off++) {
|
||||
$c = substr($line, $off, 1);
|
||||
|
||||
# Comments we are wacking completly including the begin
|
||||
# Comments we are wacking completely including the begin
|
||||
# and end, all to $;.
|
||||
if ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {
|
||||
$sanitise_quote = '*/';
|
||||
|
|
|
@ -206,7 +206,7 @@ soread(struct socket *so)
|
|||
* We don't test for <= 0 this time, because there legitimately
|
||||
* might not be any more data (since the socket is non-blocking),
|
||||
* a close will be detected on next iteration.
|
||||
* A return of -1 wont (shouldn't) happen, since it didn't happen above
|
||||
* A return of -1 won't (shouldn't) happen, since it didn't happen above
|
||||
*/
|
||||
if (n == 2 && nn == iov[0].iov_len) {
|
||||
int ret;
|
||||
|
|
|
@ -3057,7 +3057,7 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
|
|||
*
|
||||
* When the software returns from an exception, the branch will re-execute.
|
||||
* On QEMU care needs to be taken when a branch+delayslot sequence is broken
|
||||
* and the branch and delayslot dont share pages.
|
||||
* and the branch and delayslot don't share pages.
|
||||
*
|
||||
* The TB contaning the branch insn will set up env->btarget and evaluate
|
||||
* env->btaken. When the translation loop exits we will note that the branch
|
||||
|
@ -3246,7 +3246,7 @@ void gen_intermediate_code(CPUCRISState *env, struct TranslationBlock *tb)
|
|||
}
|
||||
|
||||
/* If we are rexecuting a branch due to exceptions on
|
||||
delay slots dont break. */
|
||||
delay slots don't break. */
|
||||
if (!(tb->pc & 1) && cs->singlestep_enabled) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ static void cris_set_prefix(DisasContext *dc)
|
|||
dc->tb_flags |= PFIX_FLAG;
|
||||
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], PFIX_FLAG);
|
||||
|
||||
/* prefix insns dont clear the x flag. */
|
||||
/* prefix insns don't clear the x flag. */
|
||||
dc->clear_x = 0;
|
||||
cris_lock_irq(dc);
|
||||
}
|
||||
|
|
|
@ -2523,7 +2523,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|||
|
||||
/* The Linux kernel checks for the CMPLegacy bit and
|
||||
* discards multiple thread information if it is set.
|
||||
* So dont set it here for Intel to make Linux guests happy.
|
||||
* So don't set it here for Intel to make Linux guests happy.
|
||||
*/
|
||||
if (cs->nr_cores * cs->nr_threads > 1) {
|
||||
if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
|
||||
|
|
|
@ -1366,7 +1366,7 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
|
|||
* If value is NULL, no default will be set and the original
|
||||
* value from the CPU model table will be kept.
|
||||
*
|
||||
* It is valid to call this funciton only for properties that
|
||||
* It is valid to call this function only for properties that
|
||||
* are already present in the kvm_default_props table.
|
||||
*/
|
||||
void x86_cpu_change_kvm_default(const char *prop, const char *value);
|
||||
|
|
|
@ -581,7 +581,7 @@ static bool mips_vp_is_wfi(MIPSCPU *c)
|
|||
|
||||
static inline void mips_vpe_wake(MIPSCPU *c)
|
||||
{
|
||||
/* Dont set ->halted = 0 directly, let it be done via cpu_has_work
|
||||
/* Don't set ->halted = 0 directly, let it be done via cpu_has_work
|
||||
because there might be other conditions that state that c should
|
||||
be sleeping. */
|
||||
cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
|
||||
|
|
|
@ -2858,7 +2858,7 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
|
|||
} else if (shift_count == -32) {
|
||||
/* set PSW.C */
|
||||
tcg_gen_mov_tl(cpu_PSW_C, r1);
|
||||
/* fill ret completly with sign bit */
|
||||
/* fill ret completely with sign bit */
|
||||
tcg_gen_sari_tl(ret, r1, 31);
|
||||
/* clear PSW.V */
|
||||
tcg_gen_movi_tl(cpu_PSW_V, 0);
|
||||
|
|
|
@ -473,7 +473,7 @@ On a 32 bit target, all 64 bit operations are converted to 32 bits. A
|
|||
few specific operations must be implemented to allow it (see add2_i32,
|
||||
sub2_i32, brcond2_i32).
|
||||
|
||||
On a 64 bit target, the values are transfered between 32 and 64-bit
|
||||
On a 64 bit target, the values are transferred between 32 and 64-bit
|
||||
registers using the following ops:
|
||||
- trunc_shr_i64_i32
|
||||
- ext_i32_i64
|
||||
|
|
|
@ -51,7 +51,7 @@ int main(void)
|
|||
t = (unsigned char *)x;
|
||||
t -= 32768;
|
||||
p = (unsigned char *) &y.v1;
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_pi_d(p, t);
|
||||
|
@ -62,7 +62,7 @@ int main(void)
|
|||
|
||||
|
||||
t += 32770;
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_pi_w(p, t);
|
||||
|
@ -71,7 +71,7 @@ int main(void)
|
|||
if (*r != 0x4455aa77)
|
||||
err();
|
||||
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_d(p, r);
|
||||
|
@ -81,7 +81,7 @@ int main(void)
|
|||
if (*r != 0xee19ccff)
|
||||
err();
|
||||
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_pi_b(p, t);
|
||||
|
@ -90,7 +90,7 @@ int main(void)
|
|||
if (*(uint16_t*)r != 0xff22)
|
||||
err();
|
||||
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_b(p, r);
|
||||
|
@ -100,7 +100,7 @@ int main(void)
|
|||
if (*r != 0x4455aa77)
|
||||
err();
|
||||
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_w(p, r);
|
||||
|
@ -110,7 +110,7 @@ int main(void)
|
|||
if (*r != 0xff224455)
|
||||
err();
|
||||
|
||||
mb(); /* dont reorder anything beyond here. */
|
||||
mb(); /* don't reorder anything beyond here. */
|
||||
cris_tst_cc_init();
|
||||
asm volatile ("setf\tzvnc\n");
|
||||
cris_addo_pi_d(p, t);
|
||||
|
|
|
@ -108,7 +108,7 @@ static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
|
|||
smp_rmb(); /* read memory barrier before accessing record */
|
||||
/* read the record header to know record length */
|
||||
read_from_buffer(idx, &record, sizeof(TraceRecord));
|
||||
*recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
|
||||
*recordptr = malloc(record.length); /* don't use g_malloc, can deadlock when traced */
|
||||
/* make a copy of record to avoid being overwritten */
|
||||
read_from_buffer(idx, *recordptr, record.length);
|
||||
smp_rmb(); /* memory barrier before clearing valid flag */
|
||||
|
@ -180,7 +180,7 @@ static gpointer writeout_thread(gpointer opaque)
|
|||
while (get_trace_record(idx, &recordptr)) {
|
||||
unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
|
||||
writeout_idx += recordptr->length;
|
||||
free(recordptr); /* dont use g_free, can deadlock when traced */
|
||||
free(recordptr); /* don't use g_free, can deadlock when traced */
|
||||
idx = writeout_idx % TRACE_BUF_LEN;
|
||||
}
|
||||
|
||||
|
|
|
@ -1394,7 +1394,7 @@ static void addRemovableDevicesMenuItems(void)
|
|||
[menuItem setEnabled: NO];
|
||||
[menu addItem: menuItem];
|
||||
|
||||
/* Loop thru all the block devices in the emulator */
|
||||
/* Loop through all the block devices in the emulator */
|
||||
while (currentDevice) {
|
||||
deviceName = [[NSString stringWithFormat: @"%s", currentDevice->value->device] retain];
|
||||
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
* Benoît Canet <benoit.canet@nodalink.com>
|
||||
* Alberto Garcia <berto@igalia.com>
|
||||
*
|
||||
* This program is free sofware: you can redistribute it and/or modify
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Sofware Foundation, either version 2 of the License, or
|
||||
* the Free Software Foundation, either version 2 of the License, or
|
||||
* (at your option) version 3 or any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
|
|
Loading…
Reference in New Issue