Commit d6e05edc authored by Andreas Mohr's avatar Andreas Mohr Committed by Adrian Bunk

spelling fixes

acquired (aquired)
contiguous (contigious)
successful (succesful, succesfull)
surprise (suprise)
whether (weather)
some other misspellings
Signed-off-by: default avatarAndreas Mohr <andi@lisas.de>
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
parent f18190bd
......@@ -354,7 +354,7 @@ static void __init init_nsc(struct cpuinfo_x86 *c)
* This function only handles the GX processor, and kicks every
* thing else to the Cyrix init function above - that should
* cover any processors that might have been branded differently
* after NSC aquired Cyrix.
* after NSC acquired Cyrix.
*
* If this breaks your GX1 horribly, please e-mail
* info-linux@ldcmail.amd.com to tell us.
......
......@@ -175,7 +175,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
* do without slowing down good hardware unnecesserily.
* do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
......
......@@ -59,7 +59,7 @@ void hook_irq_handler(int int_cause, int bit_num, void *isr_ptr)
* bit_num - Indicates which bit number in the cause register
*
* Outputs :
* 1 if succesful, 0 if failure
* 1 if successful, 0 if failure
*/
int enable_galileo_irq(int int_cause, int bit_num)
{
......@@ -83,7 +83,7 @@ int enable_galileo_irq(int int_cause, int bit_num)
* bit_num - Indicates which bit number in the cause register
*
* Outputs :
* 1 if succesful, 0 if failure
* 1 if successful, 0 if failure
*/
int disable_galileo_irq(int int_cause, int bit_num)
{
......
......@@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(spu_save);
* @spu: pointer to SPU iomem structure.
*
* Perform harvest + restore, as we may not be coming
* from a previous succesful save operation, and the
* from a previous successful save operation, and the
* hardware state is unknown.
*/
int spu_restore(struct spu_state *new, struct spu *spu)
......
......@@ -287,7 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
* find the pci device that corresponds to a given address.
* This routine scans all pci busses to build the cache.
* Must be run late in boot process, after the pci controllers
* have been scaned for devices (after all device resources are known).
* have been scanned for devices (after all device resources are known).
*/
void __init pci_addr_cache_build(void)
{
......
......@@ -356,7 +356,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
set_vtimer(event->expires);
spin_unlock_irqrestore(&vt_list->lock, flags);
/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
......
......@@ -1222,7 +1222,7 @@ int open_ubd_file(char *file, struct openflags *openflags, int shared,
}
}
/* Succesful return case! */
/* Successful return case! */
if(backing_file_out == NULL)
return(fd);
......
......@@ -278,7 +278,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
* do without slowing down good hardware unnecesserily.
* do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
......
......@@ -892,7 +892,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
}
/*
* as_can_anticipate indicates weather we should either run arq
* as_can_anticipate indicates whether we should either run arq
* or keep anticipating a better request.
*/
static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
......
......@@ -2745,7 +2745,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
return 0;
/*
* not contigious
* not contiguous
*/
if (req->sector + req->nr_sectors != next->sector)
return 0;
......@@ -3415,7 +3415,7 @@ static struct notifier_block blk_cpu_notifier = {
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completionc callback
* unless the driver actually implements this in its completion callback
* through requeueing. Theh actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
......
......@@ -951,7 +951,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
it most likely that the chip will notice it. It also prevents us
from having to wait for completion. On the other hand, we may
need to wait for completion anyway, to see if it completed
succesfully. */
successfully. */
switch (atm_vcc->qos.aal) {
case ATM_AAL2:
......
......@@ -118,7 +118,7 @@ static int amd_create_gatt_pages(int nr_tables)
return retval;
}
/* Since we don't need contigious memory we just try
/* Since we don't need contiguous memory we just try
* to get the gatt table once
*/
......
......@@ -261,7 +261,7 @@ static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
#endif
/*
*Since we don't need contigious memory we just try
*Since we don't need contiguous memory we just try
* to get the gatt table once
*/
......
......@@ -177,7 +177,7 @@ static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
/*
* Since we don't need contigious memory we just try
* Since we don't need contiguous memory we just try
* to get the gatt table once
*/
......
......@@ -546,7 +546,7 @@ static void RIOReceive(struct rio_info *p, struct Port *PortP)
** run out of space it will be set to the offset of the
** next byte to copy from the packet data area. The packet
** length field is decremented by the number of bytes that
** we succesfully removed from the packet. When this reaches
** we successfully removed from the packet. When this reaches
** zero, we reset the offset pointer to be zero, and free
** the packet from the front of the queue.
*/
......
......@@ -341,7 +341,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
......
......@@ -358,7 +358,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
......
......@@ -253,7 +253,7 @@ set_fan(min2, fan_min[1], LM80_REG_FAN_MIN(2), fan_div[1]);
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
......
......@@ -421,7 +421,7 @@ static void set_fan_min(struct device *dev, const char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan clock divider. This follows the principle
of least suprise; the user doesn't expect the fan minimum to change just
of least surprise; the user doesn't expect the fan minimum to change just
because the divider changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
......
......@@ -380,7 +380,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
......
......@@ -207,7 +207,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan clock divider. This follows the principle
of least suprise; the user doesn't expect the fan minimum to change just
of least surprise; the user doesn't expect the fan minimum to change just
because the divider changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
......
......@@ -781,7 +781,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)
......
......@@ -630,7 +630,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)
......
......@@ -463,7 +463,7 @@ show_fan_div(struct device *dev, struct device_attribute *attr,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div(struct device *dev, struct device_attribute *attr,
......
......@@ -37,7 +37,7 @@
* Version 1.15 convert all calls to ide_raw_taskfile
* since args will return register content.
* Version 1.16 added suspend-resume-checkpower
* Version 1.17 do flush on standy, do flush on ATA < ATA6
* Version 1.17 do flush on standby, do flush on ATA < ATA6
* fix wcache setup.
*/
......
......@@ -1665,7 +1665,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
* Initialize a request before we fill it in and send it down to
* ide_do_drive_cmd. Commands must be set up by this function. Right
* now it doesn't do a lot, but if that changes abusers will have a
* nasty suprise.
* nasty surprise.
*/
void ide_init_drive_cmd (struct request *rq)
......
......@@ -103,7 +103,7 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
* driver specific parts, enable the controller and make it available
* to the general subsystem using hpsb_add_host().
*
* Return Value: a pointer to the &hpsb_host if succesful, %NULL if
* Return Value: a pointer to the &hpsb_host if successful, %NULL if
* no memory was available.
*/
static DEFINE_MUTEX(host_num_alloc);
......
......@@ -139,7 +139,7 @@ int hpsb_bus_reset(struct hpsb_host *host);
/*
* Hand over received selfid packet to the core. Complement check (second
* quadlet is complement of first) is expected to be done and succesful.
* quadlet is complement of first) is expected to be done and successful.
*/
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
......
......@@ -592,7 +592,7 @@ static int put_address(char *st, u_char *p, int len)
} /* put_address */
/*************************************/
/* report a succesfull interrogation */
/* report a successful interrogation */
/*************************************/
static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
{ char *src = ic->parm.dss1_io.data;
......
......@@ -4848,7 +4848,7 @@ static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input)
*
* The IVC120G security card has 4 i2c controlled TDA8540 matrix
* swichers to provide 16 channels to MUX0. The TDA8540's have
* 4 indepedant outputs and as such the IVC120G also has the
* 4 independent outputs and as such the IVC120G also has the
* optional "Monitor Out" bus. This allows the card to be looking
* at one input while the monitor is looking at another.
*
......
......@@ -508,11 +508,11 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
* speak of. We simply pull the packet out of its PIO buffer (which is slow)
* and queue it for the kernel. Then we reset the card for the next packet.
*
* We sometimes get suprise interrupts late both because the SMP IRQ delivery
* We sometimes get surprise interrupts late both because the SMP IRQ delivery
* is message passing and because the card sometimes seems to deliver late. I
* think if it is part way through a receive and the mode is changed it carries
* on receiving and sends us an interrupt. We have to band aid all these cases
* to get a sensible 150kbytes/second performance. Even then you want a small
* to get a sensible 150kBytes/second performance. Even then you want a small
* TCP window.
*/
......
......@@ -386,7 +386,7 @@ static int __irport_change_speed(struct irda_task *task)
/* Locking notes : this function may be called from irq context with
* spinlock, via irport_write_wakeup(), or from non-interrupt without
* spinlock (from the task timer). Yuck !
* This is ugly, and unsafe is the spinlock is not already aquired.
* This is ugly, and unsafe is the spinlock is not already acquired.
* This will be fixed when irda-task get rewritten.
* Jean II */
if (!spin_is_locked(&self->lock)) {
......
......@@ -1883,7 +1883,7 @@ static void smc_reset(struct net_device *dev)
/* Set the Window 1 control, configuration and station addr registers.
No point in writing the I/O base register ;-> */
SMC_SELECT_BANK(1);
/* Automatically release succesfully transmitted packets,
/* Automatically release successfully transmitted packets,
Accept link errors, counter and Tx error interrupts. */
outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
ioaddr + CONTROL);
......
......@@ -1485,7 +1485,7 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
*
* Sending the PREPARE_FOR_POWER_DOWN will restrict the
* hardware from going into standby mode and will transition
* out of D0-standy if it is already in that state.
* out of D0-standby if it is already in that state.
*
* STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
* driver upon completion. Once received, the driver can
......
......@@ -369,7 +369,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
* Firmware method after natice method ?
* Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);
......
......@@ -167,7 +167,7 @@ zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
* initiates adapter recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
......@@ -203,7 +203,7 @@ zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
* purpose: Wrappper for zfcp_erp_adapter_reopen_internal
* used to ensure the correct locking
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
......@@ -469,7 +469,7 @@ zfcp_test_link(struct zfcp_port *port)
* initiates Forced Reopen recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
......@@ -509,7 +509,7 @@ zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
* purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
* used to ensure the correct locking
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
......@@ -536,7 +536,7 @@ zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
* initiates Reopen recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
......@@ -605,7 +605,7 @@ zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
* initiates Reopen recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
......@@ -1805,7 +1805,7 @@ zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
* purpose: Wrappper for zfcp_erp_port_reopen_all_internal
* used to ensure the correct locking
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
......
......@@ -500,7 +500,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
/*
* Function : int should_disconnect (unsigned char cmd)
*
* Purpose : decide weather a command would normally disconnect or
* Purpose : decide whether a command would normally disconnect or
* not, since if it won't disconnect we should go to sleep.
*
* Input : cmd - opcode of SCSI command
......
......@@ -12374,7 +12374,7 @@ AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
ASC_PRINT1(
"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i);
} else {
ASC_PRINT("AscInitFromEEP: Succesfully re-wrote EEPROM.");
ASC_PRINT("AscInitFromEEP: Successfully re-wrote EEPROM.\n");
}
}
return (warn_code);
......
......@@ -3771,7 +3771,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
* @target: The target for the new device.
* @lun: The lun for the new device.
*
* Return the new device if succesfull or NULL on failure.
* Return the new device if successful or NULL on failure.
**/
static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
u8 target, u8 lun)
......
......@@ -760,7 +760,7 @@ static int device_inquiry(int host_index, int ldn)
while (!got_interrupt(host_index))
barrier();
/*if command succesful, break */
/*if command successful, break */
if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
......@@ -885,7 +885,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
while (!got_interrupt(host_index))
barrier();
/*if command succesful, break */
/*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
......@@ -921,7 +921,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
return 2;
} else
global_command_error_excuse = 0;
/*if command succesful, break */
/*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
......@@ -959,7 +959,7 @@ static int immediate_reset(int host_index, unsigned int ldn)
/* did not work, finish */
return 1;
}
/*if command succesful, break */
/*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
......
......@@ -6438,7 +6438,7 @@ ips_erase_bios(ips_ha_t * ha)
/* VPP failure */
return (1);
/* check for succesful flash */
/* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
......@@ -6550,7 +6550,7 @@ ips_erase_bios_memio(ips_ha_t * ha)
/* VPP failure */
return (1);
/* check for succesful flash */
/* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
......
......@@ -2818,7 +2818,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
(cmdstatp->sense_hdr.sense_key == NO_SENSE ||
cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
undone == 0) {
ioctl_result = 0; /* EOF written succesfully at EOM */
ioctl_result = 0; /* EOF written successfully at EOM */
if (fileno >= 0)
fileno++;
STps->drv_file = fileno;
......
......@@ -712,7 +712,7 @@ static void v9fs_read_work(void *a)
* v9fs_send_request - send 9P request
* The function can sleep until the request is scheduled for sending.
* The function can be interrupted. Return from the function is not
* a guarantee that the request is sent succesfully. Can return errors
* a guarantee that the request is sent successfully. Can return errors
* that can be retrieved by PTR_ERR macros.
*
* @m: mux data
......
......@@ -641,7 +641,7 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb)
* invoked both for initial i/o submission and
* subsequent retries via the aio_kick_handler.
* Expects to be invoked with iocb->ki_ctx->lock
* already held. The lock is released and reaquired
* already held. The lock is released and reacquired
* as needed during processing.
*
* Calls the iocb retry method (already setup for the
......
......@@ -43,7 +43,7 @@ int jffs2_sum_init(struct jffs2_sb_info *c)
return -ENOMEM;
}
dbg_summary("returned succesfully\n");
dbg_summary("returned successfully\n");
return 0;
}
......
......@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
* if this fails (xlen contigious free blocks not avaliable), it'll
* if this fails (xlen contiguous free blocks not avaliable), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
......@@ -493,7 +493,7 @@ int extFill(struct inode *ip, xad_t * xp)
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
* contigious free blocks not avaliable), we'll try to allocate
* contiguous free blocks not avaliable), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
......@@ -529,7 +529,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
/* get the number of blocks to initially attempt to allocate.
* we'll first try the number of blocks requested unless this
* number is greater than the maximum number of contigious free
* number is greater than the maximum number of contiguous free
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
......@@ -586,7 +586,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* in place. if this fails, we'll try to move the extent
* to a new set of blocks. if moving the extent, we initially
* will try to allocate disk blocks for the requested size
* (nnew). if this fails (nnew contigious free blocks not
* (nnew). if this fails (new contiguous free blocks not
* avaliable), we'll try to allocate a smaller number of
* blocks (producing a smaller extent), with this smaller
* number of blocks consisting of the requested number of
......
......@@ -427,7 +427,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like thiswhere there are no data dependencies.
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while(0)
......
......@@ -318,7 +318,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like thiswhere there are no data dependencies.
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
......
......@@ -126,7 +126,7 @@ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
/* Just increments the mechanism's reference count and returns its input: */
struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
/* For every succesful gss_mech_get or gss_mech_get_by_* call there must be a
/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a